Merge branch 'master' of gitlab.com:peter-iakovlev/telegram-ios

This commit is contained in:
Ilya Laktyushin
2020-06-10 01:19:18 +03:00
49 changed files with 17014 additions and 298 deletions

View File

@@ -874,8 +874,8 @@ private final class ChatListViewSpaceState {
private func checkReplayEntries(postbox: Postbox) {
#if DEBUG
let cleanState = ChatListViewSpaceState(postbox: postbox, space: self.space, anchorIndex: self.anchorIndex, summaryComponents: self.summaryComponents, halfLimit: self.halfLimit)
assert(self.orderedEntries.lowerOrAtAnchor.map { $0.index } == cleanState.orderedEntries.lowerOrAtAnchor.map { $0.index })
assert(self.orderedEntries.higherThanAnchor.map { $0.index } == cleanState.orderedEntries.higherThanAnchor.map { $0.index })
//assert(self.orderedEntries.lowerOrAtAnchor.map { $0.index } == cleanState.orderedEntries.lowerOrAtAnchor.map { $0.index })
//assert(self.orderedEntries.higherThanAnchor.map { $0.index } == cleanState.orderedEntries.higherThanAnchor.map { $0.index })
#endif
}

View File

@@ -594,10 +594,7 @@ private func debugControllerEntries(presentationData: PresentationData, loggingS
entries.append(.photoPreview(presentationData.theme, experimentalSettings.chatListPhotos))
entries.append(.knockoutWallpaper(presentationData.theme, experimentalSettings.knockoutWallpaper))
entries.append(.alternativeFolderTabs(experimentalSettings.foldersTabAtBottom))
#if DEBUG
// There was no time to un-merge the experimental branch. Nothing to see here yet.
entries.append(.videoCalls(experimentalSettings.videoCalls))
#endif
if let backupHostOverride = networkSettings?.backupHostOverride {
entries.append(.hostInfo(presentationData.theme, "Host: \(backupHostOverride)"))

View File

@@ -12,7 +12,7 @@ static_library(
"//submodules/Postbox:Postbox#shared",
"//submodules/TelegramUIPreferences:TelegramUIPreferences",
"//submodules/TgVoip:TgVoip",
#"//submodules/TgVoipWebrtcCustom:TgVoipWebrtcCustom",
"//submodules/TgVoipWebrtc:TgVoipWebrtc",
],
frameworks = [
"$SDKROOT/System/Library/Frameworks/Foundation.framework",

View File

@@ -14,6 +14,7 @@ swift_library(
"//submodules/TelegramUIPreferences:TelegramUIPreferences",
"//submodules/TgVoip:TgVoip",
#"//submodules/TgVoipWebrtcCustom:TgVoipWebrtcCustom",
"//submodules/TgVoipWebrtc:TgVoipWebrtc",
],
visibility = [
"//visibility:public",

View File

@@ -7,16 +7,16 @@ import Postbox
import TelegramUIPreferences
import TgVoip
//import TgVoipWebrtc
import TgVoipWebrtc
//import TgVoipWebrtcCustom
private func callConnectionDescription(_ connection: CallSessionConnection) -> OngoingCallConnectionDescription {
return OngoingCallConnectionDescription(connectionId: connection.id, ip: connection.ip, ipv6: connection.ipv6, port: connection.port, peerTag: connection.peerTag)
}
/*private func callConnectionDescriptionWebrtc(_ connection: CallSessionConnection) -> OngoingCallConnectionDescriptionWebrtc {
private func callConnectionDescriptionWebrtc(_ connection: CallSessionConnection) -> OngoingCallConnectionDescriptionWebrtc {
return OngoingCallConnectionDescriptionWebrtc(connectionId: connection.id, ip: connection.ip, ipv6: connection.ipv6, port: connection.port, peerTag: connection.peerTag)
}*/
}
/*private func callConnectionDescriptionWebrtcCustom(_ connection: CallSessionConnection) -> OngoingCallConnectionDescriptionWebrtcCustom {
return OngoingCallConnectionDescriptionWebrtcCustom(connectionId: connection.id, ip: connection.ip, ipv6: connection.ipv6, port: connection.port, peerTag: connection.peerTag)
@@ -80,11 +80,11 @@ private let setupLogs: Bool = {
Logger.shared.log("TGVOIP", value)
}
})
/*OngoingCallThreadLocalContextWebrtc.setupLoggingFunction({ value in
OngoingCallThreadLocalContextWebrtc.setupLoggingFunction({ value in
if let value = value {
Logger.shared.log("TGVOIP", value)
}
})*/
})
/*OngoingCallThreadLocalContextWebrtcCustom.setupLoggingFunction({ value in
if let value = value {
Logger.shared.log("TGVOIP", value)
@@ -100,7 +100,7 @@ public enum OngoingCallContextState {
case failed
}
private final class OngoingCallThreadLocalContextQueueImpl: NSObject, OngoingCallThreadLocalContextQueue/*, OngoingCallThreadLocalContextQueueWebrtc,*/ /*OngoingCallThreadLocalContextQueueWebrtcCustom*/ {
private final class OngoingCallThreadLocalContextQueueImpl: NSObject, OngoingCallThreadLocalContextQueue, OngoingCallThreadLocalContextQueueWebrtc /*, OngoingCallThreadLocalContextQueueWebrtcCustom*/ {
private let queue: Queue
init(queue: Queue) {
@@ -144,7 +144,7 @@ private func ongoingNetworkTypeForType(_ type: NetworkType) -> OngoingCallNetwor
}
}
/*private func ongoingNetworkTypeForTypeWebrtc(_ type: NetworkType) -> OngoingCallNetworkTypeWebrtc {
private func ongoingNetworkTypeForTypeWebrtc(_ type: NetworkType) -> OngoingCallNetworkTypeWebrtc {
switch type {
case .none:
return .wifi
@@ -162,7 +162,7 @@ private func ongoingNetworkTypeForType(_ type: NetworkType) -> OngoingCallNetwor
return .cellularLte
}
}
}*/
}
/*private func ongoingNetworkTypeForTypeWebrtcCustom(_ type: NetworkType) -> OngoingCallNetworkTypeWebrtcCustom {
switch type {
@@ -197,7 +197,7 @@ private func ongoingDataSavingForType(_ type: VoiceCallDataSaving) -> OngoingCal
}
}
/*private func ongoingDataSavingForTypeWebrtc(_ type: VoiceCallDataSaving) -> OngoingCallDataSavingWebrtc {
private func ongoingDataSavingForTypeWebrtc(_ type: VoiceCallDataSaving) -> OngoingCallDataSavingWebrtc {
switch type {
case .never:
return .never
@@ -208,7 +208,7 @@ private func ongoingDataSavingForType(_ type: VoiceCallDataSaving) -> OngoingCal
default:
return .never
}
}*/
}
/*private func ongoingDataSavingForTypeWebrtcCustom(_ type: VoiceCallDataSaving) -> OngoingCallDataSavingWebrtcCustom {
switch type {
@@ -266,7 +266,7 @@ extension OngoingCallThreadLocalContext: OngoingCallThreadLocalContextProtocol {
}
}
/*extension OngoingCallThreadLocalContextWebrtc: OngoingCallThreadLocalContextProtocol {
extension OngoingCallThreadLocalContextWebrtc: OngoingCallThreadLocalContextProtocol {
func nativeSetNetworkType(_ type: NetworkType) {
self.setNetworkType(ongoingNetworkTypeForTypeWebrtc(type))
}
@@ -290,7 +290,7 @@ extension OngoingCallThreadLocalContext: OngoingCallThreadLocalContextProtocol {
func nativeGetDerivedState() -> Data {
return self.getDerivedState()
}
}*/
}
/*extension OngoingCallThreadLocalContextWebrtcCustom: OngoingCallThreadLocalContextProtocol {
func nativeSetNetworkType(_ type: NetworkType) {
@@ -335,7 +335,7 @@ private extension OngoingCallContextState {
}
}
/*private extension OngoingCallContextState {
private extension OngoingCallContextState {
init(_ state: OngoingCallStateWebrtc) {
switch state {
case .initializing:
@@ -350,7 +350,7 @@ private extension OngoingCallContextState {
self = .failed
}
}
}*/
}
/*private extension OngoingCallContextState {
init(_ state: OngoingCallStateWebrtcCustom) {
@@ -400,9 +400,10 @@ public final class OngoingCallContext {
public static func versions(includeExperimental: Bool) -> [String] {
var result: [String] = [OngoingCallThreadLocalContext.version()]
/*if includeExperimental {
result.append(OngoingCallThreadLocalContextWebrtcCustom.version())
}*/
if includeExperimental {
result.append(OngoingCallThreadLocalContextWebrtc.version())
//result.append(OngoingCallThreadLocalContextWebrtcCustom.version())
}
return result
}
@@ -453,7 +454,7 @@ public final class OngoingCallContext {
context.nativeSetNetworkType(networkType)
}
})
}*//* else if version == OngoingCallThreadLocalContextWebrtc.version() {
} else */if version == OngoingCallThreadLocalContextWebrtc.version() {
var voipProxyServer: VoipProxyServerWebrtc?
if let proxyServer = proxyServer {
switch proxyServer.connection {
@@ -479,7 +480,7 @@ public final class OngoingCallContext {
context.nativeSetNetworkType(networkType)
}
})
}*/ do {
} else {
var voipProxyServer: VoipProxyServer?
if let proxyServer = proxyServer {
switch proxyServer.connection {
@@ -584,11 +585,10 @@ public final class OngoingCallContext {
public func getVideoView(completion: @escaping (UIView?) -> Void) {
self.withContext { context in
/*if let context = context as? OngoingCallThreadLocalContextWebrtcCustom {
if let context = context as? OngoingCallThreadLocalContextWebrtc {
context.getRemoteCameraView(completion)
}*/
}
completion(nil)
}
}
}

View File

@@ -41,6 +41,23 @@ replace_symbols = [
"WebRtcAgc_CalculateGainTable",
"WebRtcAgc_InitVad",
"WebRtcAgc_ProcessVad",
"TimeDiff",
"TimeAfter",
"TimeMicros",
"TimeUTCMicros",
"SystemTimeNanos",
"TimeNanos",
"SystemTimeMillis",
"TimeMillis",
"TimeUTCMillis",
"GetClockForTesting",
"TimestampWrapAroundHandler",
"Time32",
"TmToSeconds",
"TimeDiff32",
"TimestampWrapAroundHandler",
"g_clock",
"SetClockForTesting",
]
objc_library(

View File

@@ -6,6 +6,8 @@ static_library(
"Sources/**/*.m",
"Sources/**/*.mm",
"Impl/*.cpp",
"Impl/*.mm",
"Impl/*.m",
]),
has_cpp = True,
headers = merge_maps([
@@ -22,6 +24,9 @@ static_library(
compiler_flags = [
"-Ithird-party/webrtc/webrtc-ios/src",
"-Ithird-party/webrtc/webrtc-ios/src/third_party/abseil-cpp",
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc",
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/base",
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/components/renderer/metal",
"-Ithird-party/submodules/TgVoipWebrtc/PublicHeaders",
"-DWEBRTC_IOS",
"-DWEBRTC_MAC",

View File

@@ -14,6 +14,8 @@ objc_library(
"Sources/**/*.h",
"Impl/*.h",
"Impl/*.cpp",
"Impl/*.mm",
"Impl/*.m",
]),
hdrs = glob([
"PublicHeaders/**/*.h",
@@ -22,9 +24,13 @@ objc_library(
"-I{}/Impl".format(package_name()),
"-Ithird-party/webrtc/webrtc-ios/src",
"-Ithird-party/webrtc/webrtc-ios/src/third_party/abseil-cpp",
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc",
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/base",
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/components/renderer/metal",
"-DWEBRTC_IOS",
"-DWEBRTC_MAC",
"-DWEBRTC_POSIX",
"-std=c++14",
],
includes = [
"PublicHeaders",

View File

@@ -11,6 +11,8 @@
#include "rtc_base/task_utils/repeating_task.h"
#include "rtc_base/third_party/sigslot/sigslot.h"
#import "VideoMetalView.h"
class Controller : public sigslot::has_slots<> {
public:
enum EndpointType {
@@ -35,26 +37,19 @@ public:
void SetNetworkType(message::NetworkType network_type);
void SetDataSaving(bool data_saving);
void SetMute(bool mute);
void AttachVideoView(VideoMetalView *videoView);
void SetProxy(rtc::ProxyType type, const rtc::SocketAddress& addr, const std::string& username,
const std::string& password);
static std::map<message::NetworkType, MediaEngineWebrtc::NetworkParams> network_params;
static MediaEngineWebrtc::NetworkParams default_network_params;
static MediaEngineWebrtc::NetworkParams datasaving_network_params;
sigslot::signal2<int16_t *, size_t> SignalRecord;
#ifdef TGVOIP_PREPROCESSED_OUTPUT
sigslot::signal2<const int16_t *, size_t> SignalPreprocessed;
#endif
sigslot::signal2<const int16_t *, size_t> SignalPlay;
sigslot::signal1<State> SignalNewState;
private:
std::unique_ptr<rtc::Thread> thread;
std::unique_ptr<Connector> connector;
std::unique_ptr<MediaEngineWebrtc> media;
#ifdef TGVOIP_PREPROCESSED_OUTPUT
std::unique_ptr<MediaEngineWebrtc> preproc;
#endif
State state;
webrtc::RepeatingTaskHandle repeatable;
int64_t last_recv_time;
@@ -73,9 +68,6 @@ private:
void SetFail();
void Play(const int16_t *data, size_t size);
void Record(int16_t *data, size_t size);
#ifdef TGVOIP_PREPROCESSED_OUTPUT
void Preprocessed(const int16_t *data, size_t size);
#endif
void SendRtp(rtc::CopyOnWriteBuffer packet);
void UpdateNetworkParams(const message::RtpStream& rtp);
};

View File

@@ -3,7 +3,7 @@
#include "Layer92.h"
#include "modules/rtp_rtcp/source/rtp_utility.h"
#include "rtc_base/time_utils.cc"
#include "rtc_base/time_utils.h"
#include "rtc_base/message_handler.h"
#include <memory>
@@ -20,9 +20,9 @@ Controller::Controller(bool is_outgoing, const EncryptionKey& encryption_key, si
: thread(rtc::Thread::Create())
, connector(std::make_unique<Connector>(std::make_unique<Layer92>(encryption_key, is_outgoing)))
, state(State::Starting)
, is_outgoing(is_outgoing)
, last_recv_time(rtc::TimeMillis())
, last_send_time(rtc::TimeMillis())
, is_outgoing(is_outgoing)
, init_timeout(init_timeout * 1000)
, reconnect_timeout(reconnect_timeout * 1000)
, local_datasaving(false)
@@ -37,9 +37,6 @@ Controller::Controller(bool is_outgoing, const EncryptionKey& encryption_key, si
Controller::~Controller() {
thread->Invoke<void>(RTC_FROM_HERE, [this]() {
media = nullptr;
#ifdef TGVOIP_PREPROCESSED_OUTPUT
preproc = nullptr;
#endif
connector = nullptr;
});
}
@@ -68,8 +65,8 @@ void Controller::NewMessage(const message::Base& msg) {
msg.minVer = ProtocolBase::minimal_version;
msg.ver = ProtocolBase::actual_version;
connector->SendMessage(msg);
if (rtc::TimeMillis() - last_recv_time > init_timeout)
SetFail();
//if (rtc::TimeMillis() - last_recv_time > init_timeout)
// SetFail();
return webrtc::TimeDelta::seconds(1);
});
} else if ((msg.ID == message::tInit || msg.ID == message::tInitAck) && state == State::WaitInit) {
@@ -81,21 +78,15 @@ void Controller::NewMessage(const message::Base& msg) {
msg.minVer = ProtocolBase::minimal_version;
msg.ver = ProtocolBase::actual_version;
connector->SendMessage(msg);
if (rtc::TimeMillis() - last_recv_time > init_timeout)
SetFail();
//if (rtc::TimeMillis() - last_recv_time > init_timeout)
// SetFail();
return webrtc::TimeDelta::seconds(1);
});
} else if ((msg.ID == message::tInitAck || msg.ID == message::tRtpStream) && state == State::WaitInitAck) {
state = State::Established;
SignalNewState(state);
thread->PostTask(RTC_FROM_HERE, [this]() {
#ifdef TGVOIP_PREPROCESSED_OUTPUT
preproc = std::make_unique<MediaEngineWebrtc>(not is_outgoing, false, true);
preproc->Play.connect(this, &Controller::Preprocessed);
#endif
media = std::make_unique<MediaEngineWebrtc>(is_outgoing);
media->Record.connect(this, &Controller::Record);
media->Play.connect(this, &Controller::Play);
media->Send.connect(this, &Controller::SendRtp);
});
StartRepeating([this]() {
@@ -103,8 +94,9 @@ void Controller::NewMessage(const message::Base& msg) {
connector->ResetActiveEndpoint();
state = State::Reconnecting;
SignalNewState(state);
} else if (state == State::Reconnecting && rtc::TimeMillis() - last_recv_time > reconnect_timeout)
SetFail();
} else if (state == State::Reconnecting && rtc::TimeMillis() - last_recv_time > reconnect_timeout) {
//SetFail();
}
return webrtc::TimeDelta::seconds(1);
});
} if ((msg.ID == message::tRtpStream) && (state == State::Established || state == State::Reconnecting)) {
@@ -116,11 +108,14 @@ void Controller::NewMessage(const message::Base& msg) {
}
});
if (!webrtc::RtpUtility::RtpHeaderParser(msg_rtp.data.data(), msg_rtp.data.size()).RTCP()) {
//printf("rtp received size %d\n", (int)(msg_rtp.data.size()));
last_recv_time = rtc::TimeMillis();
if (state == State::Reconnecting) {
state = State::Established;
SignalNewState(state);
}
} else {
//printf("rtcp received size %d\n", (int)(msg_rtp.data.size()));
}
} else if (msg.ID == message::tBufferOverflow ||
msg.ID == message::tPacketIncorrect ||
@@ -131,7 +126,7 @@ void Controller::NewMessage(const message::Base& msg) {
template<class Closure>
void Controller::StartRepeating(Closure&& closure) {
StopRepeating();
//StopRepeating();
repeatable = webrtc::RepeatingTaskHandle::Start(thread.get(), std::forward<Closure>(closure));
}
@@ -144,9 +139,6 @@ void Controller::StopRepeating() {
void Controller::SetFail() {
thread->PostTask(RTC_FROM_HERE, [this]() {
media = nullptr;
#ifdef TGVOIP_PREPROCESSED_OUTPUT
preproc = nullptr;
#endif
});
if (state != State::Failed) {
state = State::Failed;
@@ -155,29 +147,7 @@ void Controller::SetFail() {
StopRepeating();
}
void Controller::Play(const int16_t *data, size_t size) {
SignalPlay(data, size);
}
void Controller::Record(int16_t *data, size_t size) {
SignalRecord(data, size);
last_send_time = rtc::TimeMillis();
}
#ifdef TGVOIP_PREPROCESSED_OUTPUT
void Controller::Preprocessed(const int16_t *data, size_t size) {
if (rtc::TimeMillis() - last_send_time < 100)
SignalPreprocessed(data, size);
}
#endif
void Controller::SendRtp(rtc::CopyOnWriteBuffer packet) {
#ifdef TGVOIP_PREPROCESSED_OUTPUT
thread->PostTask(RTC_FROM_HERE, [this, packet]() {
if (preproc)
preproc->Receive(packet);
});
#endif
message::RtpStream msg;
msg.data = packet;
msg.network_type = local_network_type;
@@ -204,6 +174,12 @@ void Controller::UpdateNetworkParams(const message::RtpStream& rtp) {
}
}
void Controller::AttachVideoView(VideoMetalView *videoView) {
thread->PostTask(RTC_FROM_HERE, [this, videoView]() {
media->AttachVideoView(videoView);
});
}
void Controller::SetNetworkType(message::NetworkType network_type) {
local_network_type = network_type;
}

View File

@@ -91,6 +91,7 @@ void EndpointRelayObfuscatedTcp::Close(rtc::AsyncPacketSocket *, int) {
void EndpointRelayObfuscatedTcp::RecvPacket(rtc::AsyncPacketSocket *socket, const char *data, size_t packet_len,
const rtc::SocketAddress& remote_addr, const int64_t& packet_time_us) {
EndpointBase::RecvPacket(socket, data, packet_len, remote_addr, packet_time_us);
do {
if (in_remains > in_buffer->Length())
break;

View File

@@ -14,8 +14,6 @@ public:
sigslot::signal1<rtc::CopyOnWriteBuffer> Send;
virtual void Receive(rtc::CopyOnWriteBuffer) = 0;
sigslot::signal2<const int16_t *, size_t> Play;
sigslot::signal2<int16_t *, size_t> Record;
};
#endif //DEMO_MEDIAENGINEBASE_H

View File

@@ -1,204 +0,0 @@
#include "MediaEngineWebrtc.h"
#include "api/audio_codecs/audio_decoder_factory_template.h"
#include "api/audio_codecs/audio_encoder_factory_template.h"
#include "api/audio_codecs/opus/audio_decoder_opus.h"
#include "api/audio_codecs/opus/audio_encoder_opus.h"
#include "api/rtp_parameters.h"
#include "api/task_queue/default_task_queue_factory.h"
#include "media/base/codec.h"
#include "media/base/media_constants.h"
#include "media/engine/webrtc_media_engine.h"
#include "modules/audio_device/include/audio_device_default.h"
#include "rtc_base/task_utils/repeating_task.h"
#include "system_wrappers/include/field_trial.h"
#if WEBRTC_ENABLE_PROTOBUF
#include "modules/audio_coding/audio_network_adaptor/config.pb.h"
#endif
namespace {
const size_t frame_samples = 480;
const uint8_t channels = 1;
const uint8_t sample_bytes = 2;
const uint32_t clockrate = 48000;
const uint16_t sdp_payload = 111;
const char* sdp_name = "opus";
const uint8_t sdp_channels = 2;
const uint32_t sdp_bitrate = 0;
const uint32_t caller_ssrc = 1;
const uint32_t called_ssrc = 2;
const int extension_sequence = 1;
}
MediaEngineWebrtc::MediaEngineWebrtc(bool outgoing, bool send, bool recv)
: ssrc_send(outgoing ? caller_ssrc : called_ssrc)
, ssrc_recv(outgoing ? called_ssrc : caller_ssrc)
, event_log(std::make_unique<webrtc::RtcEventLogNull>())
, task_queue_factory(webrtc::CreateDefaultTaskQueueFactory())
, data_sender(*this) {
webrtc::field_trial::InitFieldTrialsFromString(
"WebRTC-Audio-SendSideBwe/Enabled/"
"WebRTC-Audio-Allocation/min:6kbps,max:32kbps/"
"WebRTC-Audio-OpusMinPacketLossRate/Enabled-1/"
// "WebRTC-Audio-OpusPlcUsePrevDecodedSamples/Enabled/"
// "WebRTC-Audio-NewOpusPacketLossRateOptimization/Enabled-1-20-1.0/"
// "WebRTC-SendSideBwe-WithOverhead/Enabled/"
// "WebRTC-Bwe-SeparateAudioPackets/enabled:true,packet_threshold:15,time_threshold:1000ms/"
// "WebRTC-Audio-AlrProbing/Disabled/"
);
cricket::MediaEngineDependencies media_deps;
media_deps.task_queue_factory = task_queue_factory.get();
#ifdef TGVOIP_USE_CALLBACK_AUDIO_IO
media_deps.adm = new rtc::RefCountedObject<webrtc::webrtc_impl::AudioDeviceModuleDefault<webrtc::AudioDeviceModule>>();
#endif
media_deps.audio_encoder_factory = webrtc::CreateAudioEncoderFactory<webrtc::AudioEncoderOpus>();
media_deps.audio_decoder_factory = webrtc::CreateAudioDecoderFactory<webrtc::AudioDecoderOpus>();
media_deps.audio_processing = webrtc::AudioProcessingBuilder().Create();
media_engine = cricket::CreateMediaEngine(std::move(media_deps));
media_engine->Init();
webrtc::Call::Config call_config(event_log.get());
call_config.task_queue_factory = task_queue_factory.get();
call_config.trials = &field_trials;
call_config.audio_state = media_engine->voice().GetAudioState();
call.reset(webrtc::Call::Create(call_config));
#ifdef TGVOIP_USE_CALLBACK_AUDIO_IO
audio_processor = std::make_unique<AudioProcessor>(call_config.audio_state->audio_transport(),
task_queue_factory.get(), *this, send, recv);
#endif
voice_channel.reset(media_engine->voice().CreateMediaChannel(
call.get(), cricket::MediaConfig(), cricket::AudioOptions(), webrtc::CryptoOptions::NoGcm()));
if (send) {
voice_channel->AddSendStream(cricket::StreamParams::CreateLegacy(ssrc_send));
SetNetworkParams({6, 32, 6, 120, false, false, false});
SetMute(false);
voice_channel->SetInterface(&data_sender, webrtc::MediaTransportConfig());
voice_channel->OnReadyToSend(true);
voice_channel->SetSend(true);
}
if (recv) {
cricket::AudioRecvParameters recv_parameters;
recv_parameters.codecs.emplace_back(sdp_payload, sdp_name, clockrate, sdp_bitrate, sdp_channels);
recv_parameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extension_sequence);
recv_parameters.rtcp.reduced_size = true;
recv_parameters.rtcp.remote_estimate = true;
voice_channel->AddRecvStream(cricket::StreamParams::CreateLegacy(ssrc_recv));
voice_channel->SetRecvParameters(recv_parameters);
voice_channel->SetPlayout(true);
}
}
MediaEngineWebrtc::~MediaEngineWebrtc() = default;
void MediaEngineWebrtc::Receive(rtc::CopyOnWriteBuffer packet) {
if (voice_channel)
voice_channel->OnPacketReceived(packet, -1);
}
void MediaEngineWebrtc::OnSentPacket(const rtc::SentPacket& sent_packet) {
call->OnSentPacket(sent_packet);
}
void MediaEngineWebrtc::SetNetworkParams(const MediaEngineWebrtc::NetworkParams& params) {
cricket::AudioCodec opus_codec(sdp_payload, sdp_name, clockrate, sdp_bitrate, sdp_channels);
opus_codec.AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamTransportCc));
opus_codec.SetParam(cricket::kCodecParamMinBitrate, params.min_bitrate_kbps);
opus_codec.SetParam(cricket::kCodecParamStartBitrate, params.start_bitrate_kbps);
opus_codec.SetParam(cricket::kCodecParamMaxBitrate, params.max_bitrate_kbps);
opus_codec.SetParam(cricket::kCodecParamUseInbandFec, 1);
opus_codec.SetParam(cricket::kCodecParamPTime, params.ptime_ms);
// opus_codec.SetParam(cricket::kCodecParamUseDtx, "1");
// opus_codec.SetParam(cricket::kCodecParamMaxAverageBitrate, 6);
std::string config_string;
#if WEBRTC_ENABLE_PROTOBUF
webrtc::audio_network_adaptor::config::ControllerManager cont_conf;
// cont_conf.add_controllers()->mutable_bitrate_controller();
config_string = cont_conf.SerializeAsString();
#endif
cricket::AudioSendParameters send_parameters;
if (!config_string.empty()) {
send_parameters.options.audio_network_adaptor_config = config_string;
send_parameters.options.audio_network_adaptor = true;
}
send_parameters.codecs.push_back(opus_codec);
send_parameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extension_sequence);
send_parameters.options.echo_cancellation = params.echo_cancellation;
// send_parameters.options.experimental_ns = false;
send_parameters.options.noise_suppression = params.noise_suppression;
send_parameters.options.auto_gain_control = params.auto_gain_control;
send_parameters.options.highpass_filter = false;
send_parameters.options.typing_detection = false;
// send_parameters.max_bandwidth_bps = 16000;
send_parameters.rtcp.reduced_size = true;
send_parameters.rtcp.remote_estimate = true;
voice_channel->SetSendParameters(send_parameters);
}
void MediaEngineWebrtc::SetMute(bool mute) {
voice_channel->SetAudioSend(ssrc_send, !mute, nullptr, &audio_source);
}
bool MediaEngineWebrtc::Sender::SendPacket(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) {
engine.Send(*packet);
rtc::SentPacket sent_packet(options.packet_id, rtc::TimeMillis(), options.info_signaled_after_sent);
engine.OnSentPacket(sent_packet);
return true;
}
bool MediaEngineWebrtc::Sender::SendRtcp(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) {
engine.Send(*packet);
rtc::SentPacket sent_packet(options.packet_id, rtc::TimeMillis(), options.info_signaled_after_sent);
engine.OnSentPacket(sent_packet);
return true;
}
int MediaEngineWebrtc::Sender::SetOption(cricket::MediaChannel::NetworkInterface::SocketType, rtc::Socket::Option, int) {
return -1; // in general, the result is not important yet
}
MediaEngineWebrtc::Sender::Sender(MediaEngineWebrtc& engine) : engine(engine) {}
MediaEngineWebrtc::AudioProcessor::AudioProcessor(webrtc::AudioTransport *transport_,
webrtc::TaskQueueFactory *task_queue_factory, MediaEngineBase& engine_, bool send_, bool recv_)
: send(send_)
, recv(recv_)
, transport(transport_)
, delay_us(frame_samples * 1000000 / clockrate)
, buf_send(nullptr)
, buf_recv(nullptr)
, engine(engine_)
, task_queue_send(std::make_unique<rtc::TaskQueue>(task_queue_factory->CreateTaskQueue(
"AudioProcessorSend", webrtc::TaskQueueFactory::Priority::NORMAL)))
, task_queue_recv(std::make_unique<rtc::TaskQueue>(task_queue_factory->CreateTaskQueue(
"AudioProcessorRecv", webrtc::TaskQueueFactory::Priority::NORMAL))) {
if (send) {
buf_send = new int16_t[frame_samples * channels];
webrtc::RepeatingTaskHandle::Start(task_queue_send->Get(), [this]() {
static uint32_t new_mic_level = 0;
memset(buf_send, 0, frame_samples * channels * sample_bytes);
engine.Record(buf_send, frame_samples * channels);
transport->RecordedDataIsAvailable(buf_send, frame_samples, sample_bytes, channels, clockrate,
0, 0, 0, false, new_mic_level);
return webrtc::TimeDelta::us(delay_us);
});
}
if (recv) {
buf_recv = new int16_t[frame_samples * channels];
webrtc::RepeatingTaskHandle::Start(task_queue_recv->Get(), [this]() {
static int64_t elapsed_time_ms = -1;
static int64_t ntp_time_ms = -1;
size_t samples_out = 0;
transport->NeedMorePlayData(frame_samples, sample_bytes, channels, clockrate, buf_recv,
samples_out, &elapsed_time_ms, &ntp_time_ms);
engine.Play(buf_recv, samples_out * channels);
return webrtc::TimeDelta::us(delay_us);
});
}
}
MediaEngineWebrtc::AudioProcessor::~AudioProcessor() {
task_queue_send = nullptr;
task_queue_recv = nullptr;
delete[] buf_send;
delete[] buf_recv;
}

View File

@@ -12,6 +12,9 @@
#include <memory>
#import "VideoCameraCapturer.h"
#import "VideoMetalView.h"
class MediaEngineWebrtc : public MediaEngineBase {
public:
struct NetworkParams {
@@ -30,6 +33,7 @@ public:
void OnSentPacket(const rtc::SentPacket& sent_packet);
void SetNetworkParams(const NetworkParams& params);
void SetMute(bool mute);
void AttachVideoView(VideoMetalView *videoView);
private:
class Sender final : public cricket::MediaChannel::NetworkInterface {
@@ -61,6 +65,8 @@ private:
const uint32_t ssrc_send;
const uint32_t ssrc_recv;
const uint32_t ssrc_send_video;
const uint32_t ssrc_recv_video;
std::unique_ptr<webrtc::Call> call;
std::unique_ptr<cricket::MediaEngineInterface> media_engine;
std::unique_ptr<webrtc::RtcEventLogNull> event_log;
@@ -69,9 +75,12 @@ private:
webrtc::LocalAudioSinkAdapter audio_source;
Sender data_sender;
std::unique_ptr<cricket::VoiceMediaChannel> voice_channel;
#ifdef TGVOIP_USE_CALLBACK_AUDIO_IO
std::unique_ptr<AudioProcessor> audio_processor;
#endif
std::unique_ptr<cricket::VideoMediaChannel> video_channel;
std::unique_ptr<webrtc::VideoBitrateAllocatorFactory> video_bitrate_allocator_factory;
std::unique_ptr<rtc::Thread> signaling_thread;
std::unique_ptr<rtc::Thread> worker_thread;
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> _nativeVideoSource;
VideoCameraCapturer *_videoCapturer;
};

View File

@@ -0,0 +1,371 @@
#include "MediaEngineWebrtc.h"
#include "absl/strings/match.h"
#include "api/audio_codecs/audio_decoder_factory_template.h"
#include "api/audio_codecs/audio_encoder_factory_template.h"
#include "api/audio_codecs/opus/audio_decoder_opus.h"
#include "api/audio_codecs/opus/audio_encoder_opus.h"
#include "api/rtp_parameters.h"
#include "api/task_queue/default_task_queue_factory.h"
#include "media/base/codec.h"
#include "media/base/media_constants.h"
#include "media/engine/webrtc_media_engine.h"
#include "modules/audio_device/include/audio_device_default.h"
#include "rtc_base/task_utils/repeating_task.h"
#include "system_wrappers/include/field_trial.h"
#include "api/video/builtin_video_bitrate_allocator_factory.h"
#include "api/video/video_bitrate_allocation.h"
#include "sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.h"
#include "sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.h"
#include "sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.h"
#include "sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.h"
#include "sdk/objc/native/api/video_encoder_factory.h"
#include "sdk/objc/native/api/video_decoder_factory.h"
#if WEBRTC_ENABLE_PROTOBUF
#include "modules/audio_coding/audio_network_adaptor/config.pb.h"
#endif
#include "PlatformCodecs.h"
#include "sdk/objc/native/src/objc_video_track_source.h"
#include "api/video_track_source_proxy.h"
#include "sdk/objc/api/RTCVideoRendererAdapter.h"
#include "sdk/objc/native/api/video_frame.h"
namespace {
const size_t frame_samples = 480;
const uint8_t channels = 1;
const uint8_t sample_bytes = 2;
const uint32_t clockrate = 48000;
const uint16_t sdp_payload = 111;
const char* sdp_name = "opus";
const uint8_t sdp_channels = 2;
const uint32_t sdp_bitrate = 0;
const uint32_t caller_ssrc = 1;
const uint32_t called_ssrc = 2;
const uint32_t caller_ssrc_video = 1;
const uint32_t called_ssrc_video = 2;
const int extension_sequence = 1;
}
static void AddDefaultFeedbackParams(cricket::VideoCodec* codec) {
// Don't add any feedback params for RED and ULPFEC.
if (codec->name == cricket::kRedCodecName || codec->name == cricket::kUlpfecCodecName)
return;
codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamRemb, cricket::kParamValueEmpty));
codec->AddFeedbackParam(
cricket::FeedbackParam(cricket::kRtcpFbParamTransportCc, cricket::kParamValueEmpty));
// Don't add any more feedback params for FLEXFEC.
if (codec->name == cricket::kFlexfecCodecName)
return;
codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamCcm, cricket::kRtcpFbCcmParamFir));
codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack, cricket::kParamValueEmpty));
codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack, cricket::kRtcpFbNackParamPli));
if (codec->name == cricket::kVp8CodecName &&
webrtc::field_trial::IsEnabled("WebRTC-RtcpLossNotification")) {
codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamLntf, cricket::kParamValueEmpty));
}
}
static std::vector<cricket::VideoCodec> AssignPayloadTypesAndDefaultCodecs(std::vector<webrtc::SdpVideoFormat> input_formats, int32_t &outCodecId) {
if (input_formats.empty())
return std::vector<cricket::VideoCodec>();
static const int kFirstDynamicPayloadType = 96;
static const int kLastDynamicPayloadType = 127;
int payload_type = kFirstDynamicPayloadType;
//input_formats.push_back(webrtc::SdpVideoFormat(cricket::kH264CodecName));
input_formats.push_back(webrtc::SdpVideoFormat(cricket::kRedCodecName));
input_formats.push_back(webrtc::SdpVideoFormat(cricket::kUlpfecCodecName));
/*if (IsFlexfecAdvertisedFieldTrialEnabled()) {
webrtc::SdpVideoFormat flexfec_format(kFlexfecCodecName);
// This value is currently arbitrarily set to 10 seconds. (The unit
// is microseconds.) This parameter MUST be present in the SDP, but
// we never use the actual value anywhere in our code however.
// TODO(brandtr): Consider honouring this value in the sender and receiver.
flexfec_format.parameters = {{kFlexfecFmtpRepairWindow, "10000000"}};
input_formats.push_back(flexfec_format);
}*/
std::vector<cricket::VideoCodec> output_codecs;
for (const webrtc::SdpVideoFormat& format : input_formats) {
cricket::VideoCodec codec(format);
codec.id = payload_type;
AddDefaultFeedbackParams(&codec);
output_codecs.push_back(codec);
if (codec.name == cricket::kVp9CodecName) {
//outCodecId = codec.id;
}
if (codec.name == cricket::kH264CodecName) {
outCodecId = codec.id;
}
// Increment payload type.
++payload_type;
if (payload_type > kLastDynamicPayloadType) {
RTC_LOG(LS_ERROR) << "Out of dynamic payload types, skipping the rest.";
break;
}
// Add associated RTX codec for non-FEC codecs.
if (!absl::EqualsIgnoreCase(codec.name, cricket::kUlpfecCodecName) &&
!absl::EqualsIgnoreCase(codec.name, cricket::kFlexfecCodecName)) {
output_codecs.push_back(
cricket::VideoCodec::CreateRtxCodec(payload_type, codec.id));
// Increment payload type.
++payload_type;
if (payload_type > kLastDynamicPayloadType) {
RTC_LOG(LS_ERROR) << "Out of dynamic payload types, skipping the rest.";
break;
}
}
}
return output_codecs;
}
MediaEngineWebrtc::MediaEngineWebrtc(bool outgoing, bool send, bool recv)
: ssrc_send(outgoing ? caller_ssrc : called_ssrc)
, ssrc_recv(outgoing ? called_ssrc : caller_ssrc)
, ssrc_send_video(outgoing ? caller_ssrc_video : called_ssrc_video)
, ssrc_recv_video(outgoing ? called_ssrc_video : caller_ssrc_video)
, event_log(std::make_unique<webrtc::RtcEventLogNull>())
, task_queue_factory(webrtc::CreateDefaultTaskQueueFactory())
, data_sender(*this)
, signaling_thread(rtc::Thread::Create())
, worker_thread(rtc::Thread::Create()) {
signaling_thread->Start();
worker_thread->Start();
webrtc::field_trial::InitFieldTrialsFromString(
"WebRTC-Audio-SendSideBwe/Enabled/"
"WebRTC-Audio-Allocation/min:6kbps,max:32kbps/"
"WebRTC-Audio-OpusMinPacketLossRate/Enabled-1/"
);
video_bitrate_allocator_factory = webrtc::CreateBuiltinVideoBitrateAllocatorFactory();
cricket::MediaEngineDependencies media_deps;
media_deps.task_queue_factory = task_queue_factory.get();
media_deps.audio_encoder_factory = webrtc::CreateAudioEncoderFactory<webrtc::AudioEncoderOpus>();
media_deps.audio_decoder_factory = webrtc::CreateAudioDecoderFactory<webrtc::AudioDecoderOpus>();
//auto video_encoder_factory = webrtc::ObjCToNativeVideoEncoderFactory([[RTCVideoEncoderFactoryH264 alloc] init]);
auto video_encoder_factory = webrtc::ObjCToNativeVideoEncoderFactory([[RTCDefaultVideoEncoderFactory alloc] init]);
int32_t outCodecId = 96;
std::vector<cricket::VideoCodec> videoCodecs = AssignPayloadTypesAndDefaultCodecs(video_encoder_factory->GetSupportedFormats(), outCodecId);
media_deps.video_encoder_factory = webrtc::ObjCToNativeVideoEncoderFactory([[RTCDefaultVideoEncoderFactory alloc] init]);
media_deps.video_decoder_factory = webrtc::ObjCToNativeVideoDecoderFactory([[RTCDefaultVideoDecoderFactory alloc] init]);
media_deps.audio_processing = webrtc::AudioProcessingBuilder().Create();
media_engine = cricket::CreateMediaEngine(std::move(media_deps));
media_engine->Init();
webrtc::Call::Config call_config(event_log.get());
call_config.task_queue_factory = task_queue_factory.get();
call_config.trials = &field_trials;
call_config.audio_state = media_engine->voice().GetAudioState();
call.reset(webrtc::Call::Create(call_config));
voice_channel.reset(media_engine->voice().CreateMediaChannel(
call.get(), cricket::MediaConfig(), cricket::AudioOptions(), webrtc::CryptoOptions::NoGcm()));
video_channel.reset(media_engine->video().CreateMediaChannel(call.get(), cricket::MediaConfig(), cricket::VideoOptions(), webrtc::CryptoOptions::NoGcm(), video_bitrate_allocator_factory.get()));
if (false && send) {
voice_channel->AddSendStream(cricket::StreamParams::CreateLegacy(ssrc_send));
SetNetworkParams({6, 32, 6, 120, false, false, false});
SetMute(false);
voice_channel->SetInterface(&data_sender, webrtc::MediaTransportConfig());
voice_channel->OnReadyToSend(true);
voice_channel->SetSend(true);
}
if (send) {
video_channel->AddSendStream(cricket::StreamParams::CreateLegacy(ssrc_send_video));
for (auto codec : videoCodecs) {
if (codec.id == outCodecId) {
rtc::scoped_refptr<webrtc::ObjCVideoTrackSource> objCVideoTrackSource(new rtc::RefCountedObject<webrtc::ObjCVideoTrackSource>());
_nativeVideoSource = webrtc::VideoTrackSourceProxy::Create(signaling_thread.get(), worker_thread.get(), objCVideoTrackSource);
codec.SetParam(cricket::kCodecParamMinBitrate, 32);
codec.SetParam(cricket::kCodecParamStartBitrate, 100);
codec.SetParam(cricket::kCodecParamMaxBitrate, 1500);
#if TARGET_IPHONE_SIMULATOR
#else
_videoCapturer = [[VideoCameraCapturer alloc] initWithSource:_nativeVideoSource];
AVCaptureDevice *frontCamera = nil;
for (AVCaptureDevice *device in [VideoCameraCapturer captureDevices]) {
if (device.position == AVCaptureDevicePositionFront) {
frontCamera = device;
break;
}
}
if (frontCamera == nil) {
assert(false);
return;
}
NSArray<AVCaptureDeviceFormat *> *sortedFormats = [[VideoCameraCapturer supportedFormatsForDevice:frontCamera] sortedArrayUsingComparator:^NSComparisonResult(AVCaptureDeviceFormat* lhs, AVCaptureDeviceFormat *rhs) {
int32_t width1 = CMVideoFormatDescriptionGetDimensions(lhs.formatDescription).width;
int32_t width2 = CMVideoFormatDescriptionGetDimensions(rhs.formatDescription).width;
return width1 < width2 ? NSOrderedAscending : NSOrderedDescending;
}];
AVCaptureDeviceFormat *bestFormat = nil;
for (AVCaptureDeviceFormat *format in sortedFormats) {
CMVideoDimensions dimensions = CMVideoFormatDescriptionGetDimensions(format.formatDescription);
if (dimensions.width >= 1000 || dimensions.height >= 1000) {
bestFormat = format;
break;
}
}
if (bestFormat == nil) {
assert(false);
return;
}
AVFrameRateRange *frameRateRange = [[bestFormat.videoSupportedFrameRateRanges sortedArrayUsingComparator:^NSComparisonResult(AVFrameRateRange *lhs, AVFrameRateRange *rhs) {
if (lhs.maxFrameRate < rhs.maxFrameRate) {
return NSOrderedAscending;
} else {
return NSOrderedDescending;
}
}] lastObject];
if (frameRateRange == nil) {
assert(false);
return;
}
[_videoCapturer startCaptureWithDevice:frontCamera format:bestFormat fps:27];
#endif
cricket::VideoSendParameters send_parameters;
send_parameters.codecs.push_back(codec);
send_parameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extension_sequence);
//send_parameters.options.echo_cancellation = params.echo_cancellation;
//send_parameters.options.noise_suppression = params.noise_suppression;
//send_parameters.options.auto_gain_control = params.auto_gain_control;
//send_parameters.options.highpass_filter = false;
//send_parameters.options.typing_detection = false;
//send_parameters.max_bandwidth_bps = 800000;
//send_parameters.rtcp.reduced_size = true;
send_parameters.rtcp.remote_estimate = true;
video_channel->SetSendParameters(send_parameters);
video_channel->SetVideoSend(ssrc_send_video, NULL, _nativeVideoSource.get());
video_channel->SetInterface(&data_sender, webrtc::MediaTransportConfig());
video_channel->OnReadyToSend(true);
video_channel->SetSend(true);
break;
}
}
}
if (false && recv) {
cricket::AudioRecvParameters recv_parameters;
recv_parameters.codecs.emplace_back(sdp_payload, sdp_name, clockrate, sdp_bitrate, sdp_channels);
recv_parameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extension_sequence);
recv_parameters.rtcp.reduced_size = true;
recv_parameters.rtcp.remote_estimate = true;
voice_channel->AddRecvStream(cricket::StreamParams::CreateLegacy(ssrc_recv));
voice_channel->SetRecvParameters(recv_parameters);
voice_channel->SetPlayout(true);
}
if (recv) {
for (auto codec : videoCodecs) {
if (codec.id == outCodecId) {
codec.SetParam(cricket::kCodecParamMinBitrate, 32);
codec.SetParam(cricket::kCodecParamStartBitrate, 300);
codec.SetParam(cricket::kCodecParamMaxBitrate, 1000);
cricket::VideoRecvParameters recv_parameters;
recv_parameters.codecs.emplace_back(codec);
recv_parameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extension_sequence);
//recv_parameters.rtcp.reduced_size = true;
recv_parameters.rtcp.remote_estimate = true;
video_channel->AddRecvStream(cricket::StreamParams::CreateLegacy(ssrc_recv_video));
video_channel->SetRecvParameters(recv_parameters);
break;
}
}
}
}
MediaEngineWebrtc::~MediaEngineWebrtc() = default;
void MediaEngineWebrtc::Receive(rtc::CopyOnWriteBuffer packet) {
if (voice_channel) {
//voice_channel->OnPacketReceived(packet, -1);
}
if (video_channel) {
video_channel->OnPacketReceived(packet, -1);
}
}
void MediaEngineWebrtc::OnSentPacket(const rtc::SentPacket& sent_packet) {
call->OnSentPacket(sent_packet);
}
void MediaEngineWebrtc::SetNetworkParams(const MediaEngineWebrtc::NetworkParams& params) {
cricket::AudioCodec opus_codec(sdp_payload, sdp_name, clockrate, sdp_bitrate, sdp_channels);
opus_codec.AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamTransportCc));
opus_codec.SetParam(cricket::kCodecParamMinBitrate, params.min_bitrate_kbps);
opus_codec.SetParam(cricket::kCodecParamStartBitrate, params.start_bitrate_kbps);
opus_codec.SetParam(cricket::kCodecParamMaxBitrate, params.max_bitrate_kbps);
opus_codec.SetParam(cricket::kCodecParamUseInbandFec, 1);
opus_codec.SetParam(cricket::kCodecParamPTime, params.ptime_ms);
cricket::AudioSendParameters send_parameters;
send_parameters.codecs.push_back(opus_codec);
send_parameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extension_sequence);
send_parameters.options.echo_cancellation = params.echo_cancellation;
// send_parameters.options.experimental_ns = false;
send_parameters.options.noise_suppression = params.noise_suppression;
send_parameters.options.auto_gain_control = params.auto_gain_control;
send_parameters.options.highpass_filter = false;
send_parameters.options.typing_detection = false;
// send_parameters.max_bandwidth_bps = 16000;
send_parameters.rtcp.reduced_size = true;
send_parameters.rtcp.remote_estimate = true;
//voice_channel->SetSendParameters(send_parameters);
}
void MediaEngineWebrtc::SetMute(bool mute) {
//voice_channel->SetAudioSend(ssrc_send, !mute, nullptr, &audio_source);
}
void MediaEngineWebrtc::AttachVideoView(VideoMetalView *videoView) {
//VideoMetalView *remoteRenderer = [[VideoMetalView alloc] initWithFrame:CGRectMake(0.0f, 0.0f, 320.0f, 240.0f)];
//remoteRenderer.videoContentMode = UIViewContentModeScaleAspectFill;
video_channel->SetSink(ssrc_recv_video, [videoView getSink]);
}
bool MediaEngineWebrtc::Sender::SendPacket(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) {
engine.Send(*packet);
rtc::SentPacket sent_packet(options.packet_id, rtc::TimeMillis(), options.info_signaled_after_sent);
engine.OnSentPacket(sent_packet);
return true;
}
bool MediaEngineWebrtc::Sender::SendRtcp(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) {
engine.Send(*packet);
rtc::SentPacket sent_packet(options.packet_id, rtc::TimeMillis(), options.info_signaled_after_sent);
engine.OnSentPacket(sent_packet);
return true;
}
int MediaEngineWebrtc::Sender::SetOption(cricket::MediaChannel::NetworkInterface::SocketType, rtc::Socket::Option, int) {
return -1; // in general, the result is not important yet
}
MediaEngineWebrtc::Sender::Sender(MediaEngineWebrtc& engine) : engine(engine) {}

View File

@@ -0,0 +1,6 @@
#ifndef PLATFORM_CODECS_H
#define PLATFORM_CODECS_H
#endif //PLATFORM_CODECS_H

View File

@@ -0,0 +1,2 @@
#include "PlatformCodecs.h"

View File

@@ -8,6 +8,8 @@
#include <string>
#include <memory>
#import "VideoMetalView.h"
#ifdef TGVOIP_NAMESPACE
namespace TGVOIP_NAMESPACE {
#endif
@@ -159,6 +161,8 @@ public:
virtual void setMuteMicrophone(bool muteMicrophone) = 0;
virtual void setAudioOutputGainControlEnabled(bool enabled) = 0;
virtual void setEchoCancellationStrength(int strength) = 0;
virtual void AttachVideoView(VideoMetalView *videoView) = 0;
virtual std::string getLastError() = 0;
virtual std::string getDebugInfo() = 0;

View File

@@ -151,6 +151,9 @@ public:
}
controller_->AddEndpoint(addr, endpoint.peerTag, type);
}
/*rtc::SocketAddress addr("192.168.8.118", 7325);
unsigned char peerTag[16];
controller_->AddEndpoint(addr, peerTag, Controller::EndpointType::P2P);*/
setNetworkType(initialNetworkType);
@@ -232,6 +235,10 @@ public:
void setMuteMicrophone(bool muteMicrophone) override {
controller_->SetMute(muteMicrophone);
}
void AttachVideoView(VideoMetalView *videoView) override {
controller_->AttachVideoView(videoView);
}
void setAudioOutputGainControlEnabled(bool enabled) override {
}

View File

@@ -0,0 +1,23 @@
#ifndef VIDEOCAMERACAPTURER_H
#define VIDEOCAMERACAPTURER_H
#import <Foundation/Foundation.h>
#import <AVFoundation/AVFoundation.h>
#include <memory>
#include "api/scoped_refptr.h"
#include "api/media_stream_interface.h"
@interface VideoCameraCapturer : NSObject
+ (NSArray<AVCaptureDevice *> *)captureDevices;
+ (NSArray<AVCaptureDeviceFormat *> *)supportedFormatsForDevice:(AVCaptureDevice *)device;
- (instancetype)initWithSource:(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source;
- (void)startCaptureWithDevice:(AVCaptureDevice *)device format:(AVCaptureDeviceFormat *)format fps:(NSInteger)fps;
- (void)stopCapture;
@end
#endif

View File

@@ -0,0 +1,459 @@
#include "VideoCameraCapturer.h"
#import <AVFoundation/AVFoundation.h>
#import "base/RTCLogging.h"
#import "base/RTCVideoFrameBuffer.h"
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
#import "sdk/objc/native/src/objc_video_track_source.h"
#import "api/video_track_source_proxy.h"
#import "helpers/UIDevice+RTCDevice.h"
#import "helpers/AVCaptureSession+DevicePosition.h"
#import "helpers/RTCDispatcher+Private.h"
#import "base/RTCVideoFrame.h"
static const int64_t kNanosecondsPerSecond = 1000000000;
static webrtc::ObjCVideoTrackSource *getObjCVideoSource(const rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> nativeSource) {
webrtc::VideoTrackSourceProxy *proxy_source =
static_cast<webrtc::VideoTrackSourceProxy *>(nativeSource.get());
return static_cast<webrtc::ObjCVideoTrackSource *>(proxy_source->internal());
}
@interface VideoCameraCapturer () <AVCaptureVideoDataOutputSampleBufferDelegate> {
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> _source;
dispatch_queue_t _frameQueue;
AVCaptureDevice *_currentDevice;
BOOL _hasRetriedOnFatalError;
BOOL _isRunning;
BOOL _willBeRunning;
AVCaptureVideoDataOutput *_videoDataOutput;
AVCaptureSession *_captureSession;
FourCharCode _preferredOutputPixelFormat;
FourCharCode _outputPixelFormat;
RTCVideoRotation _rotation;
UIDeviceOrientation _orientation;
}
@end
@implementation VideoCameraCapturer
- (instancetype)initWithSource:(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source {
self = [super init];
if (self != nil) {
_source = source;
if (![self setupCaptureSession:[[AVCaptureSession alloc] init]]) {
return nil;
}
NSNotificationCenter *center = [NSNotificationCenter defaultCenter];
_orientation = UIDeviceOrientationPortrait;
_rotation = RTCVideoRotation_90;
[center addObserver:self
selector:@selector(deviceOrientationDidChange:)
name:UIDeviceOrientationDidChangeNotification
object:nil];
[center addObserver:self
selector:@selector(handleCaptureSessionInterruption:)
name:AVCaptureSessionWasInterruptedNotification
object:_captureSession];
[center addObserver:self
selector:@selector(handleCaptureSessionInterruptionEnded:)
name:AVCaptureSessionInterruptionEndedNotification
object:_captureSession];
[center addObserver:self
selector:@selector(handleApplicationDidBecomeActive:)
name:UIApplicationDidBecomeActiveNotification
object:[UIApplication sharedApplication]];
[center addObserver:self
selector:@selector(handleCaptureSessionRuntimeError:)
name:AVCaptureSessionRuntimeErrorNotification
object:_captureSession];
[center addObserver:self
selector:@selector(handleCaptureSessionDidStartRunning:)
name:AVCaptureSessionDidStartRunningNotification
object:_captureSession];
[center addObserver:self
selector:@selector(handleCaptureSessionDidStopRunning:)
name:AVCaptureSessionDidStopRunningNotification
object:_captureSession];
}
return self;
}
- (void)dealloc {
NSAssert(!_willBeRunning, @"Session was still running in RTCCameraVideoCapturer dealloc. Forgot to call stopCapture?");
[[NSNotificationCenter defaultCenter] removeObserver:self];
}
+ (NSArray<AVCaptureDevice *> *)captureDevices {
AVCaptureDeviceDiscoverySession *session = [AVCaptureDeviceDiscoverySession
discoverySessionWithDeviceTypes:@[ AVCaptureDeviceTypeBuiltInWideAngleCamera ]
mediaType:AVMediaTypeVideo
position:AVCaptureDevicePositionUnspecified];
return session.devices;
}
+ (NSArray<AVCaptureDeviceFormat *> *)supportedFormatsForDevice:(AVCaptureDevice *)device {
// Support opening the device in any format. We make sure it's converted to a format we
// can handle, if needed, in the method `-setupVideoDataOutput`.
return device.formats;
}
- (FourCharCode)preferredOutputPixelFormat {
return _preferredOutputPixelFormat;
}
- (void)startCaptureWithDevice:(AVCaptureDevice *)device
format:(AVCaptureDeviceFormat *)format
fps:(NSInteger)fps {
[self startCaptureWithDevice:device format:format fps:fps completionHandler:nil];
}
- (void)stopCapture {
[self stopCaptureWithCompletionHandler:nil];
}
- (void)startCaptureWithDevice:(AVCaptureDevice *)device
format:(AVCaptureDeviceFormat *)format
fps:(NSInteger)fps
completionHandler:(nullable void (^)(NSError *))completionHandler {
_willBeRunning = YES;
[RTCDispatcher
dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
block:^{
RTCLogInfo("startCaptureWithDevice %@ @ %ld fps", format, (long)fps);
dispatch_async(dispatch_get_main_queue(), ^{
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
});
_currentDevice = device;
NSError *error = nil;
if (![_currentDevice lockForConfiguration:&error]) {
RTCLogError(@"Failed to lock device %@. Error: %@",
_currentDevice,
error.userInfo);
if (completionHandler) {
completionHandler(error);
}
_willBeRunning = NO;
return;
}
[self reconfigureCaptureSessionInput];
[self updateOrientation];
[self updateDeviceCaptureFormat:format fps:fps];
[self updateVideoDataOutputPixelFormat:format];
[_captureSession startRunning];
[_currentDevice unlockForConfiguration];
_isRunning = YES;
if (completionHandler) {
completionHandler(nil);
}
}];
}
- (void)stopCaptureWithCompletionHandler:(nullable void (^)(void))completionHandler {
_willBeRunning = NO;
[RTCDispatcher
dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
block:^{
RTCLogInfo("Stop");
_currentDevice = nil;
for (AVCaptureDeviceInput *oldInput in [_captureSession.inputs copy]) {
[_captureSession removeInput:oldInput];
}
[_captureSession stopRunning];
dispatch_async(dispatch_get_main_queue(), ^{
[[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
});
_isRunning = NO;
if (completionHandler) {
completionHandler();
}
}];
}
#pragma mark iOS notifications
#if TARGET_OS_IPHONE
- (void)deviceOrientationDidChange:(NSNotification *)notification {
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
block:^{
[self updateOrientation];
}];
}
#endif
#pragma mark AVCaptureVideoDataOutputSampleBufferDelegate
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection {
NSParameterAssert(captureOutput == _videoDataOutput);
if (CMSampleBufferGetNumSamples(sampleBuffer) != 1 || !CMSampleBufferIsValid(sampleBuffer) ||
!CMSampleBufferDataIsReady(sampleBuffer)) {
return;
}
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
if (pixelBuffer == nil) {
return;
}
// Default to portrait orientation on iPhone.
BOOL usingFrontCamera = NO;
// Check the image's EXIF for the camera the image came from as the image could have been
// delayed as we set alwaysDiscardsLateVideoFrames to NO.
AVCaptureDevicePosition cameraPosition =
[AVCaptureSession devicePositionForSampleBuffer:sampleBuffer];
if (cameraPosition != AVCaptureDevicePositionUnspecified) {
usingFrontCamera = AVCaptureDevicePositionFront == cameraPosition;
} else {
AVCaptureDeviceInput *deviceInput =
(AVCaptureDeviceInput *)((AVCaptureInputPort *)connection.inputPorts.firstObject).input;
usingFrontCamera = AVCaptureDevicePositionFront == deviceInput.device.position;
}
switch (_orientation) {
case UIDeviceOrientationPortrait:
_rotation = RTCVideoRotation_90;
break;
case UIDeviceOrientationPortraitUpsideDown:
_rotation = RTCVideoRotation_270;
break;
case UIDeviceOrientationLandscapeLeft:
_rotation = usingFrontCamera ? RTCVideoRotation_180 : RTCVideoRotation_0;
break;
case UIDeviceOrientationLandscapeRight:
_rotation = usingFrontCamera ? RTCVideoRotation_0 : RTCVideoRotation_180;
break;
case UIDeviceOrientationFaceUp:
case UIDeviceOrientationFaceDown:
case UIDeviceOrientationUnknown:
// Ignore.
break;
}
RTCCVPixelBuffer *rtcPixelBuffer = [[RTCCVPixelBuffer alloc] initWithPixelBuffer:pixelBuffer];
int64_t timeStampNs = CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) *
kNanosecondsPerSecond;
RTCVideoFrame *videoFrame = [[RTCVideoFrame alloc] initWithBuffer:rtcPixelBuffer
rotation:_rotation
timeStampNs:timeStampNs];
getObjCVideoSource(_source)->OnCapturedFrame(videoFrame);
}
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didDropSampleBuffer:(CMSampleBufferRef)sampleBuffer
fromConnection:(AVCaptureConnection *)connection {
NSString *droppedReason =
(__bridge NSString *)CMGetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_DroppedFrameReason, nil);
RTCLogError(@"Dropped sample buffer. Reason: %@", droppedReason);
}
#pragma mark - AVCaptureSession notifications
- (void)handleCaptureSessionInterruption:(NSNotification *)notification {
NSString *reasonString = nil;
NSNumber *reason = notification.userInfo[AVCaptureSessionInterruptionReasonKey];
if (reason) {
switch (reason.intValue) {
case AVCaptureSessionInterruptionReasonVideoDeviceNotAvailableInBackground:
reasonString = @"VideoDeviceNotAvailableInBackground";
break;
case AVCaptureSessionInterruptionReasonAudioDeviceInUseByAnotherClient:
reasonString = @"AudioDeviceInUseByAnotherClient";
break;
case AVCaptureSessionInterruptionReasonVideoDeviceInUseByAnotherClient:
reasonString = @"VideoDeviceInUseByAnotherClient";
break;
case AVCaptureSessionInterruptionReasonVideoDeviceNotAvailableWithMultipleForegroundApps:
reasonString = @"VideoDeviceNotAvailableWithMultipleForegroundApps";
break;
}
}
RTCLog(@"Capture session interrupted: %@", reasonString);
}
- (void)handleCaptureSessionInterruptionEnded:(NSNotification *)notification {
RTCLog(@"Capture session interruption ended.");
}
- (void)handleCaptureSessionRuntimeError:(NSNotification *)notification {
NSError *error = [notification.userInfo objectForKey:AVCaptureSessionErrorKey];
RTCLogError(@"Capture session runtime error: %@", error);
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
block:^{
if (error.code == AVErrorMediaServicesWereReset) {
[self handleNonFatalError];
} else {
[self handleFatalError];
}
}];
}
- (void)handleCaptureSessionDidStartRunning:(NSNotification *)notification {
RTCLog(@"Capture session started.");
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
block:^{
// If we successfully restarted after an unknown error,
// allow future retries on fatal errors.
_hasRetriedOnFatalError = NO;
}];
}
- (void)handleCaptureSessionDidStopRunning:(NSNotification *)notification {
RTCLog(@"Capture session stopped.");
}
- (void)handleFatalError {
[RTCDispatcher
dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
block:^{
if (!_hasRetriedOnFatalError) {
RTCLogWarning(@"Attempting to recover from fatal capture error.");
[self handleNonFatalError];
_hasRetriedOnFatalError = YES;
} else {
RTCLogError(@"Previous fatal error recovery failed.");
}
}];
}
- (void)handleNonFatalError {
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
block:^{
RTCLog(@"Restarting capture session after error.");
if (_isRunning) {
[_captureSession startRunning];
}
}];
}
#pragma mark - UIApplication notifications
- (void)handleApplicationDidBecomeActive:(NSNotification *)notification {
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
block:^{
if (_isRunning && !_captureSession.isRunning) {
RTCLog(@"Restarting capture session on active.");
[_captureSession startRunning];
}
}];
}
#pragma mark - Private
- (dispatch_queue_t)frameQueue {
if (!_frameQueue) {
_frameQueue =
dispatch_queue_create("org.webrtc.cameravideocapturer.video", DISPATCH_QUEUE_SERIAL);
dispatch_set_target_queue(_frameQueue,
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0));
}
return _frameQueue;
}
- (BOOL)setupCaptureSession:(AVCaptureSession *)captureSession {
NSAssert(_captureSession == nil, @"Setup capture session called twice.");
_captureSession = captureSession;
_captureSession.sessionPreset = AVCaptureSessionPresetInputPriority;
_captureSession.usesApplicationAudioSession = NO;
[self setupVideoDataOutput];
// Add the output.
if (![_captureSession canAddOutput:_videoDataOutput]) {
RTCLogError(@"Video data output unsupported.");
return NO;
}
[_captureSession addOutput:_videoDataOutput];
return YES;
}
- (void)setupVideoDataOutput {
NSAssert(_videoDataOutput == nil, @"Setup video data output called twice.");
AVCaptureVideoDataOutput *videoDataOutput = [[AVCaptureVideoDataOutput alloc] init];
// `videoDataOutput.availableVideoCVPixelFormatTypes` returns the pixel formats supported by the
// device with the most efficient output format first. Find the first format that we support.
NSSet<NSNumber *> *supportedPixelFormats = [RTCCVPixelBuffer supportedPixelFormats];
NSMutableOrderedSet *availablePixelFormats =
[NSMutableOrderedSet orderedSetWithArray:videoDataOutput.availableVideoCVPixelFormatTypes];
[availablePixelFormats intersectSet:supportedPixelFormats];
NSNumber *pixelFormat = availablePixelFormats.firstObject;
NSAssert(pixelFormat, @"Output device has no supported formats.");
_preferredOutputPixelFormat = [pixelFormat unsignedIntValue];
_outputPixelFormat = _preferredOutputPixelFormat;
videoDataOutput.videoSettings = @{(NSString *)kCVPixelBufferPixelFormatTypeKey : pixelFormat};
videoDataOutput.alwaysDiscardsLateVideoFrames = NO;
[videoDataOutput setSampleBufferDelegate:self queue:self.frameQueue];
_videoDataOutput = videoDataOutput;
}
- (void)updateVideoDataOutputPixelFormat:(AVCaptureDeviceFormat *)format {
FourCharCode mediaSubType = CMFormatDescriptionGetMediaSubType(format.formatDescription);
if (![[RTCCVPixelBuffer supportedPixelFormats] containsObject:@(mediaSubType)]) {
mediaSubType = _preferredOutputPixelFormat;
}
if (mediaSubType != _outputPixelFormat) {
_outputPixelFormat = mediaSubType;
_videoDataOutput.videoSettings =
@{ (NSString *)kCVPixelBufferPixelFormatTypeKey : @(mediaSubType) };
}
}
#pragma mark - Private, called inside capture queue
- (void)updateDeviceCaptureFormat:(AVCaptureDeviceFormat *)format fps:(NSInteger)fps {
NSAssert([RTCDispatcher isOnQueueForType:RTCDispatcherTypeCaptureSession],
@"updateDeviceCaptureFormat must be called on the capture queue.");
@try {
_currentDevice.activeFormat = format;
_currentDevice.activeVideoMinFrameDuration = CMTimeMake(1, (int32_t)fps);
} @catch (NSException *exception) {
RTCLogError(@"Failed to set active format!\n User info:%@", exception.userInfo);
return;
}
}
- (void)reconfigureCaptureSessionInput {
NSAssert([RTCDispatcher isOnQueueForType:RTCDispatcherTypeCaptureSession],
@"reconfigureCaptureSessionInput must be called on the capture queue.");
NSError *error = nil;
AVCaptureDeviceInput *input =
[AVCaptureDeviceInput deviceInputWithDevice:_currentDevice error:&error];
if (!input) {
RTCLogError(@"Failed to create front camera input: %@", error.localizedDescription);
return;
}
[_captureSession beginConfiguration];
for (AVCaptureDeviceInput *oldInput in [_captureSession.inputs copy]) {
[_captureSession removeInput:oldInput];
}
if ([_captureSession canAddInput:input]) {
[_captureSession addInput:input];
} else {
RTCLogError(@"Cannot add camera as an input to the session.");
}
[_captureSession commitConfiguration];
}
- (void)updateOrientation {
NSAssert([RTCDispatcher isOnQueueForType:RTCDispatcherTypeCaptureSession],
@"updateOrientation must be called on the capture queue.");
_orientation = [UIDevice currentDevice].orientation;
}
@end

View File

@@ -0,0 +1,26 @@
#ifndef VIDEOMETALVIEW_H
#define VIDEOMETALVIEW_H
#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>
#import "api/media_stream_interface.h"
@class RTCVideoFrame;
@interface VideoMetalView : UIView
@property(nonatomic) UIViewContentMode videoContentMode;
@property(nonatomic, getter=isEnabled) BOOL enabled;
@property(nonatomic, nullable) NSValue* rotationOverride;
- (void)setSize:(CGSize)size;
- (void)renderFrame:(nullable RTCVideoFrame *)frame;
- (void)addToTrack:(rtc::scoped_refptr<webrtc::VideoTrackInterface>)track;
- (rtc::VideoSinkInterface<webrtc::VideoFrame> *)getSink;
@end
#endif

View File

@@ -0,0 +1,278 @@
#import "VideoMetalView.h"
#import <Metal/Metal.h>
#import <MetalKit/MetalKit.h>
#import "base/RTCLogging.h"
#import "base/RTCVideoFrame.h"
#import "base/RTCVideoFrameBuffer.h"
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
#include "sdk/objc/native/api/video_frame.h"
#import "api/video/video_sink_interface.h"
#import "api/media_stream_interface.h"
#import "RTCMTLI420Renderer.h"
#import "RTCMTLNV12Renderer.h"
#import "RTCMTLRGBRenderer.h"
#define MTKViewClass NSClassFromString(@"MTKView")
#define RTCMTLNV12RendererClass NSClassFromString(@"RTCMTLNV12Renderer")
#define RTCMTLI420RendererClass NSClassFromString(@"RTCMTLI420Renderer")
#define RTCMTLRGBRendererClass NSClassFromString(@"RTCMTLRGBRenderer")
class VideoRendererAdapterImpl : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
public:
VideoRendererAdapterImpl(VideoMetalView *adapter) {
adapter_ = adapter;
size_ = CGSizeZero;
}
void OnFrame(const webrtc::VideoFrame& nativeVideoFrame) override {
RTCVideoFrame* videoFrame = NativeToObjCVideoFrame(nativeVideoFrame);
CGSize current_size = (videoFrame.rotation % 180 == 0) ? CGSizeMake(videoFrame.width, videoFrame.height) : CGSizeMake(videoFrame.height, videoFrame.width);
if (!CGSizeEqualToSize(size_, current_size)) {
size_ = current_size;
[adapter_ setSize:size_];
}
[adapter_ renderFrame:videoFrame];
}
private:
__weak VideoMetalView *adapter_;
CGSize size_;
};
@interface VideoMetalView () <MTKViewDelegate> {
RTCMTLI420Renderer *_rendererI420;
RTCMTLNV12Renderer *_rendererNV12;
RTCMTLRGBRenderer *_rendererRGB;
MTKView *_metalView;
RTCVideoFrame *_videoFrame;
CGSize _videoFrameSize;
int64_t _lastFrameTimeNs;
std::unique_ptr<VideoRendererAdapterImpl> _sink;
}
@end
@implementation VideoMetalView
- (instancetype)initWithFrame:(CGRect)frameRect {
self = [super initWithFrame:frameRect];
if (self) {
[self configure];
_sink.reset(new VideoRendererAdapterImpl(self));
}
return self;
}
- (BOOL)isEnabled {
return !_metalView.paused;
}
- (void)setEnabled:(BOOL)enabled {
_metalView.paused = !enabled;
}
- (UIViewContentMode)videoContentMode {
return _metalView.contentMode;
}
- (void)setVideoContentMode:(UIViewContentMode)mode {
_metalView.contentMode = mode;
}
#pragma mark - Private
+ (BOOL)isMetalAvailable {
return MTLCreateSystemDefaultDevice() != nil;
}
+ (MTKView *)createMetalView:(CGRect)frame {
return [[MTKViewClass alloc] initWithFrame:frame];
}
+ (RTCMTLNV12Renderer *)createNV12Renderer {
return [[RTCMTLNV12RendererClass alloc] init];
}
+ (RTCMTLI420Renderer *)createI420Renderer {
return [[RTCMTLI420RendererClass alloc] init];
}
+ (RTCMTLRGBRenderer *)createRGBRenderer {
return [[RTCMTLRGBRenderer alloc] init];
}
- (void)configure {
NSAssert([VideoMetalView isMetalAvailable], @"Metal not availiable on this device");
_metalView = [VideoMetalView createMetalView:self.bounds];
_metalView.delegate = self;
_metalView.contentMode = UIViewContentModeScaleAspectFill;
[self addSubview:_metalView];
_videoFrameSize = CGSizeZero;
}
- (void)setMultipleTouchEnabled:(BOOL)multipleTouchEnabled {
[super setMultipleTouchEnabled:multipleTouchEnabled];
_metalView.multipleTouchEnabled = multipleTouchEnabled;
}
- (void)layoutSubviews {
[super layoutSubviews];
CGRect bounds = self.bounds;
_metalView.frame = bounds;
if (!CGSizeEqualToSize(_videoFrameSize, CGSizeZero)) {
_metalView.drawableSize = [self drawableSize];
} else {
_metalView.drawableSize = bounds.size;
}
}
#pragma mark - MTKViewDelegate methods
- (void)drawInMTKView:(nonnull MTKView *)view {
NSAssert(view == _metalView, @"Receiving draw callbacks from foreign instance.");
RTCVideoFrame *videoFrame = _videoFrame;
// Skip rendering if we've already rendered this frame.
if (!videoFrame || videoFrame.timeStampNs == _lastFrameTimeNs) {
return;
}
if (CGRectIsEmpty(view.bounds)) {
return;
}
RTCMTLRenderer *renderer;
if ([videoFrame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) {
RTCCVPixelBuffer *buffer = (RTCCVPixelBuffer*)videoFrame.buffer;
const OSType pixelFormat = CVPixelBufferGetPixelFormatType(buffer.pixelBuffer);
if (pixelFormat == kCVPixelFormatType_32BGRA || pixelFormat == kCVPixelFormatType_32ARGB) {
if (!_rendererRGB) {
_rendererRGB = [VideoMetalView createRGBRenderer];
if (![_rendererRGB addRenderingDestination:_metalView]) {
_rendererRGB = nil;
RTCLogError(@"Failed to create RGB renderer");
return;
}
}
renderer = _rendererRGB;
} else {
if (!_rendererNV12) {
_rendererNV12 = [VideoMetalView createNV12Renderer];
if (![_rendererNV12 addRenderingDestination:_metalView]) {
_rendererNV12 = nil;
RTCLogError(@"Failed to create NV12 renderer");
return;
}
}
renderer = _rendererNV12;
}
} else {
if (!_rendererI420) {
_rendererI420 = [VideoMetalView createI420Renderer];
if (![_rendererI420 addRenderingDestination:_metalView]) {
_rendererI420 = nil;
RTCLogError(@"Failed to create I420 renderer");
return;
}
}
renderer = _rendererI420;
}
renderer.rotationOverride = _rotationOverride;
[renderer drawFrame:videoFrame];
_lastFrameTimeNs = videoFrame.timeStampNs;
}
- (void)mtkView:(MTKView *)view drawableSizeWillChange:(CGSize)size {
}
#pragma mark -
- (void)setRotationOverride:(NSValue *)rotationOverride {
_rotationOverride = rotationOverride;
_metalView.drawableSize = [self drawableSize];
[self setNeedsLayout];
}
- (RTCVideoRotation)frameRotation {
if (_rotationOverride) {
RTCVideoRotation rotation;
if (@available(iOS 11, *)) {
[_rotationOverride getValue:&rotation size:sizeof(rotation)];
} else {
[_rotationOverride getValue:&rotation];
}
return rotation;
}
return _videoFrame.rotation;
}
- (CGSize)drawableSize {
// Flip width/height if the rotations are not the same.
CGSize videoFrameSize = _videoFrameSize;
RTCVideoRotation frameRotation = [self frameRotation];
BOOL useLandscape =
(frameRotation == RTCVideoRotation_0) || (frameRotation == RTCVideoRotation_180);
BOOL sizeIsLandscape = (_videoFrame.rotation == RTCVideoRotation_0) ||
(_videoFrame.rotation == RTCVideoRotation_180);
if (useLandscape == sizeIsLandscape) {
return videoFrameSize;
} else {
return CGSizeMake(videoFrameSize.height, videoFrameSize.width);
}
}
#pragma mark - RTCVideoRenderer
- (void)setSize:(CGSize)size {
__weak VideoMetalView *weakSelf = self;
dispatch_async(dispatch_get_main_queue(), ^{
__strong VideoMetalView *strongSelf = weakSelf;
if (strongSelf == nil) {
return;
}
strongSelf->_videoFrameSize = size;
CGSize drawableSize = [strongSelf drawableSize];
strongSelf->_metalView.drawableSize = drawableSize;
[strongSelf setNeedsLayout];
//[strongSelf.delegate videoView:self didChangeVideoSize:size];
});
}
- (void)renderFrame:(nullable RTCVideoFrame *)frame {
if (!self.isEnabled) {
return;
}
if (frame == nil) {
RTCLogInfo(@"Incoming frame is nil. Exiting render callback.");
return;
}
_videoFrame = frame;
}
- (void)addToTrack:(rtc::scoped_refptr<webrtc::VideoTrackInterface>)track {
track->AddOrUpdateSink(_sink.get(), rtc::VideoSinkWants());
}
- (rtc::VideoSinkInterface<webrtc::VideoFrame> *)getSink {
return _sink.get();
}
@end

View File

@@ -2,6 +2,7 @@
#define OngoingCallContext_h
#import <Foundation/Foundation.h>
#import <UIKit/UIKit.h>
@interface OngoingCallConnectionDescriptionWebrtc : NSObject
@@ -75,6 +76,7 @@ typedef NS_ENUM(int32_t, OngoingCallDataSavingWebrtc) {
- (void)setIsMuted:(bool)isMuted;
- (void)setNetworkType:(OngoingCallNetworkTypeWebrtc)networkType;
- (void)getRemoteCameraView:(void (^_Nonnull)(UIView * _Nullable))completion;
@end

View File

@@ -322,5 +322,18 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
}
}
- (void)getRemoteCameraView:(void (^_Nonnull)(UIView * _Nullable))completion {
if (_tgVoip) {
VideoMetalView *remoteRenderer = [[VideoMetalView alloc] initWithFrame:CGRectMake(0.0f, 0.0f, 320.0f, 240.0f)];
remoteRenderer.videoContentMode = UIViewContentModeScaleAspectFill;
_tgVoip->AttachVideoView(remoteRenderer);
dispatch_async(dispatch_get_main_queue(), ^{
completion(remoteRenderer);
});
}
}
@end

View File

@@ -7,6 +7,8 @@ objc_library(
"Sources/**/*.m",
"Sources/**/*.mm",
"Sources/**/*.h",
"Sources/**/*.cpp",
"Sources/**/*.h",
]),
hdrs = glob([
"PublicHeaders/**/*.h",

View File

@@ -4,8 +4,9 @@
#include <memory>
#include "api/scoped_refptr.h"
#include "api/proxy.h"
#include "api/peer_connection_factory_proxy.h"
#include "rtc_base/thread.h"
#include "api/peer_connection_interface.h"
#include "api/task_queue/default_task_queue_factory.h"
#include "media/engine/webrtc_media_engine.h"
#include "sdk/objc/native/api/audio_device_module.h"
@@ -23,6 +24,9 @@
#include "sdk/objc/api/RTCVideoRendererAdapter.h"
#include "sdk/objc/native/api/video_frame.h"
#include "tg_peer_connection.h"
#include "tg_peer_connection_factory.h"
#include "VideoCameraCapturer.h"
#import "VideoMetalView.h"
@@ -179,10 +183,10 @@ public:
std::unique_ptr<rtc::Thread> _networkThread;
std::unique_ptr<rtc::Thread> _workerThread;
std::unique_ptr<rtc::Thread> _signalingThread;
rtc::scoped_refptr<webrtc::PeerConnectionFactoryInterface> _nativeFactory;
rtc::scoped_refptr<webrtc::TgPeerConnectionFactoryInterface> _nativeFactory;
std::unique_ptr<PeerConnectionObserverImpl> _observer;
rtc::scoped_refptr<webrtc::PeerConnectionInterface> _peerConnection;
rtc::scoped_refptr<webrtc::TgPeerConnectionInterface> _peerConnection;
std::unique_ptr<webrtc::MediaConstraints> _nativeConstraints;
bool _hasStartedRtcEventLog;
@@ -207,7 +211,7 @@ public:
_networkThread = rtc::Thread::CreateWithSocketServer();
_networkThread->SetName("network_thread", _networkThread.get());
BOOL result = _networkThread->Start();
bool result = _networkThread->Start();
assert(result);
_workerThread = rtc::Thread::Create();
@@ -239,7 +243,19 @@ public:
std::make_unique<webrtc::RtcEventLogFactory>(dependencies.task_queue_factory.get());
dependencies.network_controller_factory = nil;
dependencies.media_transport_factory = nil;
_nativeFactory = webrtc::CreateModularPeerConnectionFactory(std::move(dependencies));
rtc::scoped_refptr<webrtc::TgPeerConnectionFactory> pc_factory(
new rtc::RefCountedObject<webrtc::TgPeerConnectionFactory>(
std::move(dependencies)));
// Call Initialize synchronously but make sure it is executed on
// |signaling_thread|.
webrtc::MethodCall<webrtc::TgPeerConnectionFactory, bool> call(pc_factory.get(), &webrtc::TgPeerConnectionFactory::Initialize);
result = call.Marshal(RTC_FROM_HERE, pc_factory->signaling_thread());
if (!result) {
return nil;
}
_nativeFactory = webrtc::TgPeerConnectionFactoryProxy::Create(pc_factory->signaling_thread(), pc_factory);
webrtc::PeerConnectionInterface::RTCConfiguration config;
config.sdp_semantics = webrtc::SdpSemantics::kUnifiedPlan;
@@ -263,6 +279,7 @@ public:
//config.type = webrtc::PeerConnectionInterface::kRelay;
_observer.reset(new PeerConnectionObserverImpl(_discoveredIceCandidate, _connectionStateChanged));
_peerConnection = _nativeFactory->CreatePeerConnection(config, nullptr, nullptr, _observer.get());
assert(_peerConnection != nullptr);
@@ -320,7 +337,7 @@ public:
AVCaptureDeviceFormat *bestFormat = nil;
for (AVCaptureDeviceFormat *format in sortedFormats) {
CMVideoDimensions dimensions = CMVideoFormatDescriptionGetDimensions(format.formatDescription);
if (dimensions.width >= 600 || dimensions.height >= 600) {
if (dimensions.width >= 1000 || dimensions.height >= 1000) {
bestFormat = format;
break;
}

View File

@@ -0,0 +1,811 @@
/*
* Copyright 2011 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "tg_dtls_transport.h"
#include <algorithm>
#include <memory>
#include <utility>
#include "api/rtc_event_log/rtc_event_log.h"
#include "logging/rtc_event_log/events/rtc_event_dtls_transport_state.h"
#include "logging/rtc_event_log/events/rtc_event_dtls_writable_state.h"
#include "p2p/base/packet_transport_internal.h"
#include "rtc_base/buffer.h"
#include "rtc_base/checks.h"
#include "rtc_base/dscp.h"
#include "rtc_base/logging.h"
#include "rtc_base/message_queue.h"
#include "rtc_base/rtc_certificate.h"
#include "rtc_base/ssl_stream_adapter.h"
#include "rtc_base/stream.h"
#include "rtc_base/thread.h"
namespace cricket {
// We don't pull the RTP constants from rtputils.h, to avoid a layer violation.
static const size_t kDtlsRecordHeaderLen = 13;
static const size_t kMaxDtlsPacketLen = 2048;
static const size_t kMinRtpPacketLen = 12;
// Maximum number of pending packets in the queue. Packets are read immediately
// after they have been written, so a capacity of "1" is sufficient.
static const size_t kMaxPendingPackets = 1;
// Minimum and maximum values for the initial DTLS handshake timeout. We'll pick
// an initial timeout based on ICE RTT estimates, but clamp it to this range.
static const int kMinHandshakeTimeout = 50;
static const int kMaxHandshakeTimeout = 3000;
static bool IsDtlsPacket(const char* data, size_t len) {
const uint8_t* u = reinterpret_cast<const uint8_t*>(data);
return (len >= kDtlsRecordHeaderLen && (u[0] > 19 && u[0] < 64));
}
static bool IsDtlsClientHelloPacket(const char* data, size_t len) {
if (!IsDtlsPacket(data, len)) {
return false;
}
const uint8_t* u = reinterpret_cast<const uint8_t*>(data);
return len > 17 && u[0] == 22 && u[13] == 1;
}
static bool IsRtpPacket(const char* data, size_t len) {
const uint8_t* u = reinterpret_cast<const uint8_t*>(data);
return (len >= kMinRtpPacketLen && (u[0] & 0xC0) == 0x80);
}
/*StreamInterfaceChannel::StreamInterfaceChannel(
IceTransportInternal* ice_transport)
: ice_transport_(ice_transport),
state_(rtc::SS_OPEN),
packets_(kMaxPendingPackets, kMaxDtlsPacketLen) {}
rtc::StreamResult StreamInterfaceChannel::Read(void* buffer,
size_t buffer_len,
size_t* read,
int* error) {
if (state_ == rtc::SS_CLOSED)
return rtc::SR_EOS;
if (state_ == rtc::SS_OPENING)
return rtc::SR_BLOCK;
if (!packets_.ReadFront(buffer, buffer_len, read)) {
return rtc::SR_BLOCK;
}
return rtc::SR_SUCCESS;
}
rtc::StreamResult StreamInterfaceChannel::Write(const void* data,
size_t data_len,
size_t* written,
int* error) {
// Always succeeds, since this is an unreliable transport anyway.
// TODO(zhihuang): Should this block if ice_transport_'s temporarily
// unwritable?
rtc::PacketOptions packet_options;
ice_transport_->SendPacket(static_cast<const char*>(data), data_len,
packet_options);
if (written) {
*written = data_len;
}
return rtc::SR_SUCCESS;
}
bool StreamInterfaceChannel::OnPacketReceived(const char* data, size_t size) {
// We force a read event here to ensure that we don't overflow our queue.
bool ret = packets_.WriteBack(data, size, NULL);
RTC_CHECK(ret) << "Failed to write packet to queue.";
if (ret) {
SignalEvent(this, rtc::SE_READ, 0);
}
return ret;
}
rtc::StreamState StreamInterfaceChannel::GetState() const {
return state_;
}
void StreamInterfaceChannel::Close() {
packets_.Clear();
state_ = rtc::SS_CLOSED;
}*/
TgDtlsTransport::TgDtlsTransport(IceTransportInternal* ice_transport,
const webrtc::CryptoOptions& crypto_options,
webrtc::RtcEventLog* event_log)
: transport_name_(ice_transport->transport_name()),
component_(ice_transport->component()),
ice_transport_(ice_transport),
downward_(NULL),
srtp_ciphers_(crypto_options.GetSupportedDtlsSrtpCryptoSuites()),
ssl_max_version_(rtc::SSL_PROTOCOL_DTLS_12),
crypto_options_(crypto_options),
event_log_(event_log) {
RTC_DCHECK(ice_transport_);
ConnectToIceTransport();
}
TgDtlsTransport::~TgDtlsTransport() = default;
const webrtc::CryptoOptions& TgDtlsTransport::crypto_options() const {
return crypto_options_;
}
DtlsTransportState TgDtlsTransport::dtls_state() const {
return dtls_state_;
}
const std::string& TgDtlsTransport::transport_name() const {
return transport_name_;
}
int TgDtlsTransport::component() const {
return component_;
}
bool TgDtlsTransport::IsDtlsActive() const {
return dtls_active_;
}
bool TgDtlsTransport::SetLocalCertificate(
const rtc::scoped_refptr<rtc::RTCCertificate>& certificate) {
if (dtls_active_) {
if (certificate == local_certificate_) {
// This may happen during renegotiation.
RTC_LOG(LS_INFO) << ToString() << ": Ignoring identical DTLS identity";
return true;
} else {
RTC_LOG(LS_ERROR) << ToString()
<< ": Can't change DTLS local identity in this state";
return false;
}
}
if (certificate) {
local_certificate_ = certificate;
dtls_active_ = true;
} else {
RTC_LOG(LS_INFO) << ToString()
<< ": NULL DTLS identity supplied. Not doing DTLS";
}
return true;
}
rtc::scoped_refptr<rtc::RTCCertificate> TgDtlsTransport::GetLocalCertificate()
const {
return local_certificate_;
}
bool TgDtlsTransport::SetSslMaxProtocolVersion(rtc::SSLProtocolVersion version) {
if (dtls_active_) {
RTC_LOG(LS_ERROR) << "Not changing max. protocol version "
"while DTLS is negotiating";
return false;
}
ssl_max_version_ = version;
return true;
}
bool TgDtlsTransport::SetDtlsRole(rtc::SSLRole role) {
if (dtls_) {
RTC_DCHECK(dtls_role_);
if (*dtls_role_ != role) {
RTC_LOG(LS_ERROR)
<< "SSL Role can't be reversed after the session is setup.";
return false;
}
return true;
}
dtls_role_ = role;
return true;
}
bool TgDtlsTransport::GetDtlsRole(rtc::SSLRole* role) const {
if (!dtls_role_) {
return false;
}
*role = *dtls_role_;
return true;
}
bool TgDtlsTransport::GetSslCipherSuite(int* cipher) {
if (dtls_state() != DTLS_TRANSPORT_CONNECTED) {
return false;
}
return dtls_->GetSslCipherSuite(cipher);
}
bool TgDtlsTransport::SetRemoteFingerprint(const std::string& digest_alg,
const uint8_t* digest,
size_t digest_len) {
rtc::Buffer remote_fingerprint_value(digest, digest_len);
// Once we have the local certificate, the same remote fingerprint can be set
// multiple times.
if (dtls_active_ && remote_fingerprint_value_ == remote_fingerprint_value &&
!digest_alg.empty()) {
// This may happen during renegotiation.
RTC_LOG(LS_INFO) << ToString()
<< ": Ignoring identical remote DTLS fingerprint";
return true;
}
// If the other side doesn't support DTLS, turn off |dtls_active_|.
// TODO(deadbeef): Remove this. It's dangerous, because it relies on higher
// level code to ensure DTLS is actually used, but there are tests that
// depend on it, for the case where an m= section is rejected. In that case
// SetRemoteFingerprint shouldn't even be called though.
if (digest_alg.empty()) {
RTC_DCHECK(!digest_len);
RTC_LOG(LS_INFO) << ToString() << ": Other side didn't support DTLS.";
dtls_active_ = false;
return true;
}
// Otherwise, we must have a local certificate before setting remote
// fingerprint.
if (!dtls_active_) {
RTC_LOG(LS_ERROR) << ToString()
<< ": Can't set DTLS remote settings in this state.";
return false;
}
// At this point we know we are doing DTLS
bool fingerprint_changing = remote_fingerprint_value_.size() > 0u;
remote_fingerprint_value_ = std::move(remote_fingerprint_value);
remote_fingerprint_algorithm_ = digest_alg;
if (dtls_ && !fingerprint_changing) {
// This can occur if DTLS is set up before a remote fingerprint is
// received. For instance, if we set up DTLS due to receiving an early
// ClientHello.
rtc::SSLPeerCertificateDigestError err;
if (!dtls_->SetPeerCertificateDigest(
remote_fingerprint_algorithm_,
reinterpret_cast<unsigned char*>(remote_fingerprint_value_.data()),
remote_fingerprint_value_.size(), &err)) {
RTC_LOG(LS_ERROR) << ToString()
<< ": Couldn't set DTLS certificate digest.";
set_dtls_state(DTLS_TRANSPORT_FAILED);
// If the error is "verification failed", don't return false, because
// this means the fingerprint was formatted correctly but didn't match
// the certificate from the DTLS handshake. Thus the DTLS state should go
// to "failed", but SetRemoteDescription shouldn't fail.
return err == rtc::SSLPeerCertificateDigestError::VERIFICATION_FAILED;
}
return true;
}
// If the fingerprint is changing, we'll tear down the DTLS association and
// create a new one, resetting our state.
if (dtls_ && fingerprint_changing) {
dtls_.reset(nullptr);
set_dtls_state(DTLS_TRANSPORT_NEW);
set_writable(false);
}
if (!SetupDtls()) {
set_dtls_state(DTLS_TRANSPORT_FAILED);
return false;
}
return true;
}
std::unique_ptr<rtc::SSLCertChain> TgDtlsTransport::GetRemoteSSLCertChain()
const {
if (!dtls_) {
return nullptr;
}
return dtls_->GetPeerSSLCertChain();
}
bool TgDtlsTransport::ExportKeyingMaterial(const std::string& label,
const uint8_t* context,
size_t context_len,
bool use_context,
uint8_t* result,
size_t result_len) {
return (dtls_.get())
? dtls_->ExportKeyingMaterial(label, context, context_len,
use_context, result, result_len)
: false;
}
bool TgDtlsTransport::SetupDtls() {
RTC_DCHECK(dtls_role_);
StreamInterfaceChannel* downward = new StreamInterfaceChannel(ice_transport_);
dtls_.reset(rtc::SSLStreamAdapter::Create(downward));
if (!dtls_) {
RTC_LOG(LS_ERROR) << ToString() << ": Failed to create DTLS adapter.";
delete downward;
return false;
}
downward_ = downward;
dtls_->SetIdentity(local_certificate_->identity()->GetReference());
dtls_->SetMode(rtc::SSL_MODE_DTLS);
dtls_->SetMaxProtocolVersion(ssl_max_version_);
dtls_->SetServerRole(*dtls_role_);
dtls_->SignalEvent.connect(this, &TgDtlsTransport::OnDtlsEvent);
dtls_->SignalSSLHandshakeError.connect(this,
&TgDtlsTransport::OnDtlsHandshakeError);
if (remote_fingerprint_value_.size() &&
!dtls_->SetPeerCertificateDigest(
remote_fingerprint_algorithm_,
reinterpret_cast<unsigned char*>(remote_fingerprint_value_.data()),
remote_fingerprint_value_.size())) {
RTC_LOG(LS_ERROR) << ToString()
<< ": Couldn't set DTLS certificate digest.";
return false;
}
// Set up DTLS-SRTP, if it's been enabled.
if (!srtp_ciphers_.empty()) {
if (!dtls_->SetDtlsSrtpCryptoSuites(srtp_ciphers_)) {
RTC_LOG(LS_ERROR) << ToString() << ": Couldn't set DTLS-SRTP ciphers.";
return false;
}
} else {
RTC_LOG(LS_INFO) << ToString() << ": Not using DTLS-SRTP.";
}
RTC_LOG(LS_INFO) << ToString() << ": DTLS setup complete.";
// If the underlying ice_transport is already writable at this point, we may
// be able to start DTLS right away.
MaybeStartDtls();
return true;
}
bool TgDtlsTransport::GetSrtpCryptoSuite(int* cipher) {
if (dtls_state() != DTLS_TRANSPORT_CONNECTED) {
return false;
}
return dtls_->GetDtlsSrtpCryptoSuite(cipher);
}
bool TgDtlsTransport::GetSslVersionBytes(int* version) const {
if (dtls_state() != DTLS_TRANSPORT_CONNECTED) {
return false;
}
return dtls_->GetSslVersionBytes(version);
}
// Called from upper layers to send a media packet.
int TgDtlsTransport::SendPacket(const char* data,
size_t size,
const rtc::PacketOptions& options,
int flags) {
if (!dtls_active_) {
// Not doing DTLS.
return ice_transport_->SendPacket(data, size, options);
}
switch (dtls_state()) {
case DTLS_TRANSPORT_NEW:
// Can't send data until the connection is active.
// TODO(ekr@rtfm.com): assert here if dtls_ is NULL?
return -1;
case DTLS_TRANSPORT_CONNECTING:
// Can't send data until the connection is active.
return -1;
case DTLS_TRANSPORT_CONNECTED:
if (flags & PF_SRTP_BYPASS) {
RTC_DCHECK(!srtp_ciphers_.empty());
if (!IsRtpPacket(data, size)) {
return -1;
}
return ice_transport_->SendPacket(data, size, options);
} else {
return (dtls_->WriteAll(data, size, NULL, NULL) == rtc::SR_SUCCESS)
? static_cast<int>(size)
: -1;
}
case DTLS_TRANSPORT_FAILED:
case DTLS_TRANSPORT_CLOSED:
// Can't send anything when we're closed.
return -1;
default:
RTC_NOTREACHED();
return -1;
}
}
IceTransportInternal* TgDtlsTransport::ice_transport() {
return ice_transport_;
}
bool TgDtlsTransport::IsDtlsConnected() {
return dtls_ && dtls_->IsTlsConnected();
}
bool TgDtlsTransport::receiving() const {
return receiving_;
}
bool TgDtlsTransport::writable() const {
return writable_;
}
int TgDtlsTransport::GetError() {
return ice_transport_->GetError();
}
absl::optional<rtc::NetworkRoute> TgDtlsTransport::network_route() const {
return ice_transport_->network_route();
}
bool TgDtlsTransport::GetOption(rtc::Socket::Option opt, int* value) {
return ice_transport_->GetOption(opt, value);
}
int TgDtlsTransport::SetOption(rtc::Socket::Option opt, int value) {
return ice_transport_->SetOption(opt, value);
}
void TgDtlsTransport::ConnectToIceTransport() {
RTC_DCHECK(ice_transport_);
ice_transport_->SignalWritableState.connect(this,
&TgDtlsTransport::OnWritableState);
ice_transport_->SignalReadPacket.connect(this, &TgDtlsTransport::OnReadPacket);
ice_transport_->SignalSentPacket.connect(this, &TgDtlsTransport::OnSentPacket);
ice_transport_->SignalReadyToSend.connect(this,
&TgDtlsTransport::OnReadyToSend);
ice_transport_->SignalReceivingState.connect(
this, &TgDtlsTransport::OnReceivingState);
ice_transport_->SignalNetworkRouteChanged.connect(
this, &TgDtlsTransport::OnNetworkRouteChanged);
}
// The state transition logic here is as follows:
// (1) If we're not doing DTLS-SRTP, then the state is just the
// state of the underlying impl()
// (2) If we're doing DTLS-SRTP:
// - Prior to the DTLS handshake, the state is neither receiving nor
// writable
// - When the impl goes writable for the first time we
// start the DTLS handshake
// - Once the DTLS handshake completes, the state is that of the
// impl again
void TgDtlsTransport::OnWritableState(rtc::PacketTransportInternal* transport) {
RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DCHECK(transport == ice_transport_);
RTC_LOG(LS_VERBOSE) << ToString()
<< ": ice_transport writable state changed to "
<< ice_transport_->writable();
if (!dtls_active_) {
// Not doing DTLS.
// Note: SignalWritableState fired by set_writable.
set_writable(ice_transport_->writable());
return;
}
switch (dtls_state()) {
case DTLS_TRANSPORT_NEW:
MaybeStartDtls();
break;
case DTLS_TRANSPORT_CONNECTED:
// Note: SignalWritableState fired by set_writable.
set_writable(ice_transport_->writable());
break;
case DTLS_TRANSPORT_CONNECTING:
// Do nothing.
break;
case DTLS_TRANSPORT_FAILED:
case DTLS_TRANSPORT_CLOSED:
// Should not happen. Do nothing.
break;
}
}
void TgDtlsTransport::OnReceivingState(rtc::PacketTransportInternal* transport) {
RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DCHECK(transport == ice_transport_);
RTC_LOG(LS_VERBOSE) << ToString()
<< ": ice_transport "
"receiving state changed to "
<< ice_transport_->receiving();
if (!dtls_active_ || dtls_state() == DTLS_TRANSPORT_CONNECTED) {
// Note: SignalReceivingState fired by set_receiving.
set_receiving(ice_transport_->receiving());
}
}
void TgDtlsTransport::OnReadPacket(rtc::PacketTransportInternal* transport,
const char* data,
size_t size,
const int64_t& packet_time_us,
int flags) {
RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DCHECK(transport == ice_transport_);
RTC_DCHECK(flags == 0);
if (!dtls_active_) {
// Not doing DTLS.
SignalReadPacket(this, data, size, packet_time_us, 0);
return;
}
switch (dtls_state()) {
case DTLS_TRANSPORT_NEW:
if (dtls_) {
RTC_LOG(LS_INFO) << ToString()
<< ": Packet received before DTLS started.";
} else {
RTC_LOG(LS_WARNING) << ToString()
<< ": Packet received before we know if we are "
"doing DTLS or not.";
}
// Cache a client hello packet received before DTLS has actually started.
if (IsDtlsClientHelloPacket(data, size)) {
RTC_LOG(LS_INFO) << ToString()
<< ": Caching DTLS ClientHello packet until DTLS is "
"started.";
cached_client_hello_.SetData(data, size);
// If we haven't started setting up DTLS yet (because we don't have a
// remote fingerprint/role), we can use the client hello as a clue that
// the peer has chosen the client role, and proceed with the handshake.
// The fingerprint will be verified when it's set.
if (!dtls_ && local_certificate_) {
SetDtlsRole(rtc::SSL_SERVER);
SetupDtls();
}
} else {
RTC_LOG(LS_INFO) << ToString()
<< ": Not a DTLS ClientHello packet; dropping.";
}
break;
case DTLS_TRANSPORT_CONNECTING:
case DTLS_TRANSPORT_CONNECTED:
// We should only get DTLS or SRTP packets; STUN's already been demuxed.
// Is this potentially a DTLS packet?
if (IsDtlsPacket(data, size)) {
if (!HandleDtlsPacket(data, size)) {
RTC_LOG(LS_ERROR) << ToString() << ": Failed to handle DTLS packet.";
return;
}
} else {
// Not a DTLS packet; our handshake should be complete by now.
if (dtls_state() != DTLS_TRANSPORT_CONNECTED) {
RTC_LOG(LS_ERROR) << ToString()
<< ": Received non-DTLS packet before DTLS "
"complete.";
return;
}
// And it had better be a SRTP packet.
if (!IsRtpPacket(data, size)) {
RTC_LOG(LS_ERROR)
<< ToString() << ": Received unexpected non-DTLS packet.";
return;
}
// Sanity check.
RTC_DCHECK(!srtp_ciphers_.empty());
// Signal this upwards as a bypass packet.
SignalReadPacket(this, data, size, packet_time_us, PF_SRTP_BYPASS);
}
break;
case DTLS_TRANSPORT_FAILED:
case DTLS_TRANSPORT_CLOSED:
// This shouldn't be happening. Drop the packet.
break;
}
}
void TgDtlsTransport::OnSentPacket(rtc::PacketTransportInternal* transport,
const rtc::SentPacket& sent_packet) {
RTC_DCHECK_RUN_ON(&thread_checker_);
SignalSentPacket(this, sent_packet);
}
void TgDtlsTransport::OnReadyToSend(rtc::PacketTransportInternal* transport) {
RTC_DCHECK_RUN_ON(&thread_checker_);
if (writable()) {
SignalReadyToSend(this);
}
}
void TgDtlsTransport::OnDtlsEvent(rtc::StreamInterface* dtls, int sig, int err) {
RTC_DCHECK_RUN_ON(&thread_checker_);
RTC_DCHECK(dtls == dtls_.get());
if (sig & rtc::SE_OPEN) {
// This is the first time.
RTC_LOG(LS_INFO) << ToString() << ": DTLS handshake complete.";
if (dtls_->GetState() == rtc::SS_OPEN) {
// The check for OPEN shouldn't be necessary but let's make
// sure we don't accidentally frob the state if it's closed.
set_dtls_state(DTLS_TRANSPORT_CONNECTED);
set_writable(true);
}
}
if (sig & rtc::SE_READ) {
char buf[kMaxDtlsPacketLen];
size_t read;
int read_error;
rtc::StreamResult ret;
// The underlying DTLS stream may have received multiple DTLS records in
// one packet, so read all of them.
do {
ret = dtls_->Read(buf, sizeof(buf), &read, &read_error);
if (ret == rtc::SR_SUCCESS) {
SignalReadPacket(this, buf, read, rtc::TimeMicros(), 0);
} else if (ret == rtc::SR_EOS) {
// Remote peer shut down the association with no error.
RTC_LOG(LS_INFO) << ToString() << ": DTLS transport closed by remote";
set_writable(false);
set_dtls_state(DTLS_TRANSPORT_CLOSED);
} else if (ret == rtc::SR_ERROR) {
// Remote peer shut down the association with an error.
RTC_LOG(LS_INFO)
<< ToString()
<< ": Closed by remote with DTLS transport error, code="
<< read_error;
set_writable(false);
set_dtls_state(DTLS_TRANSPORT_FAILED);
}
} while (ret == rtc::SR_SUCCESS);
}
if (sig & rtc::SE_CLOSE) {
RTC_DCHECK(sig == rtc::SE_CLOSE); // SE_CLOSE should be by itself.
set_writable(false);
if (!err) {
RTC_LOG(LS_INFO) << ToString() << ": DTLS transport closed";
set_dtls_state(DTLS_TRANSPORT_CLOSED);
} else {
RTC_LOG(LS_INFO) << ToString() << ": DTLS transport error, code=" << err;
set_dtls_state(DTLS_TRANSPORT_FAILED);
}
}
}
void TgDtlsTransport::OnNetworkRouteChanged(
absl::optional<rtc::NetworkRoute> network_route) {
RTC_DCHECK_RUN_ON(&thread_checker_);
SignalNetworkRouteChanged(network_route);
}
void TgDtlsTransport::MaybeStartDtls() {
if (dtls_ && ice_transport_->writable()) {
ConfigureHandshakeTimeout();
if (dtls_->StartSSL()) {
// This should never fail:
// Because we are operating in a nonblocking mode and all
// incoming packets come in via OnReadPacket(), which rejects
// packets in this state, the incoming queue must be empty. We
// ignore write errors, thus any errors must be because of
// configuration and therefore are our fault.
RTC_NOTREACHED() << "StartSSL failed.";
RTC_LOG(LS_ERROR) << ToString() << ": Couldn't start DTLS handshake";
set_dtls_state(DTLS_TRANSPORT_FAILED);
return;
}
RTC_LOG(LS_INFO) << ToString() << ": DtlsTransport: Started DTLS handshake";
set_dtls_state(DTLS_TRANSPORT_CONNECTING);
// Now that the handshake has started, we can process a cached ClientHello
// (if one exists).
if (cached_client_hello_.size()) {
if (*dtls_role_ == rtc::SSL_SERVER) {
RTC_LOG(LS_INFO) << ToString()
<< ": Handling cached DTLS ClientHello packet.";
if (!HandleDtlsPacket(cached_client_hello_.data<char>(),
cached_client_hello_.size())) {
RTC_LOG(LS_ERROR) << ToString() << ": Failed to handle DTLS packet.";
}
} else {
RTC_LOG(LS_WARNING) << ToString()
<< ": Discarding cached DTLS ClientHello packet "
"because we don't have the server role.";
}
cached_client_hello_.Clear();
}
}
}
// Called from OnReadPacket when a DTLS packet is received.
bool TgDtlsTransport::HandleDtlsPacket(const char* data, size_t size) {
// Sanity check we're not passing junk that
// just looks like DTLS.
const uint8_t* tmp_data = reinterpret_cast<const uint8_t*>(data);
size_t tmp_size = size;
while (tmp_size > 0) {
if (tmp_size < kDtlsRecordHeaderLen)
return false; // Too short for the header
size_t record_len = (tmp_data[11] << 8) | (tmp_data[12]);
if ((record_len + kDtlsRecordHeaderLen) > tmp_size)
return false; // Body too short
tmp_data += record_len + kDtlsRecordHeaderLen;
tmp_size -= record_len + kDtlsRecordHeaderLen;
}
// Looks good. Pass to the SIC which ends up being passed to
// the DTLS stack.
return downward_->OnPacketReceived(data, size);
}
void TgDtlsTransport::set_receiving(bool receiving) {
if (receiving_ == receiving) {
return;
}
receiving_ = receiving;
SignalReceivingState(this);
}
void TgDtlsTransport::set_writable(bool writable) {
if (writable_ == writable) {
return;
}
if (event_log_) {
event_log_->Log(
std::make_unique<webrtc::RtcEventDtlsWritableState>(writable));
}
RTC_LOG(LS_VERBOSE) << ToString() << ": set_writable to: " << writable;
writable_ = writable;
if (writable_) {
SignalReadyToSend(this);
}
SignalWritableState(this);
}
void TgDtlsTransport::set_dtls_state(DtlsTransportState state) {
if (dtls_state_ == state) {
return;
}
if (event_log_) {
event_log_->Log(std::make_unique<webrtc::RtcEventDtlsTransportState>(
ConvertDtlsTransportState(state)));
}
RTC_LOG(LS_VERBOSE) << ToString() << ": set_dtls_state from:" << dtls_state_
<< " to " << state;
dtls_state_ = state;
SignalDtlsState(this, state);
}
void TgDtlsTransport::OnDtlsHandshakeError(rtc::SSLHandshakeError error) {
SignalDtlsHandshakeError(error);
}
void TgDtlsTransport::ConfigureHandshakeTimeout() {
RTC_DCHECK(dtls_);
absl::optional<int> rtt = ice_transport_->GetRttEstimate();
if (rtt) {
// Limit the timeout to a reasonable range in case the ICE RTT takes
// extreme values.
int initial_timeout = std::max(kMinHandshakeTimeout,
std::min(kMaxHandshakeTimeout, 2 * (*rtt)));
RTC_LOG(LS_INFO) << ToString() << ": configuring DTLS handshake timeout "
<< initial_timeout << " based on ICE RTT " << *rtt;
dtls_->SetInitialRetransmissionTimeout(initial_timeout);
} else {
RTC_LOG(LS_INFO)
<< ToString()
<< ": no RTT estimate - using default DTLS handshake timeout";
}
}
} // namespace cricket

View File

@@ -0,0 +1,229 @@
/*
* Copyright 2011 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef TG_P2P_BASE_DTLS_TRANSPORT_H_
#define TG_P2P_BASE_DTLS_TRANSPORT_H_
#include <memory>
#include <string>
#include <vector>
#include "p2p/base/dtls_transport.h"
#include "api/crypto/crypto_options.h"
#include "p2p/base/dtls_transport_internal.h"
#include "p2p/base/ice_transport_internal.h"
#include "rtc_base/buffer.h"
#include "rtc_base/buffer_queue.h"
#include "rtc_base/constructor_magic.h"
#include "rtc_base/ssl_stream_adapter.h"
#include "rtc_base/stream.h"
#include "rtc_base/strings/string_builder.h"
#include "rtc_base/thread_checker.h"
namespace rtc {
class PacketTransportInternal;
}
namespace cricket {
// This class provides a DTLS SSLStreamAdapter inside a TransportChannel-style
// packet-based interface, wrapping an existing TransportChannel instance
// (e.g a P2PTransportChannel)
// Here's the way this works:
//
// DtlsTransport {
// SSLStreamAdapter* dtls_ {
// StreamInterfaceChannel downward_ {
// IceTransportInternal* ice_transport_;
// }
// }
// }
//
// - Data which comes into DtlsTransport from the underlying
// ice_transport_ via OnReadPacket() is checked for whether it is DTLS
// or not, and if it is, is passed to DtlsTransport::HandleDtlsPacket,
// which pushes it into to downward_. dtls_ is listening for events on
// downward_, so it immediately calls downward_->Read().
//
// - Data written to DtlsTransport is passed either to downward_ or directly
// to ice_transport_, depending on whether DTLS is negotiated and whether
// the flags include PF_SRTP_BYPASS
//
// - The SSLStreamAdapter writes to downward_->Write() which translates it
// into packet writes on ice_transport_.
//
// This class is not thread safe; all methods must be called on the same thread
// as the constructor.
class TgDtlsTransport : public DtlsTransportInternal {
public:
// |ice_transport| is the ICE transport this DTLS transport is wrapping. It
// must outlive this DTLS transport.
//
// |crypto_options| are the options used for the DTLS handshake. This affects
// whether GCM crypto suites are negotiated.
//
// |event_log| is an optional RtcEventLog for logging state changes. It should
// outlive the DtlsTransport.
explicit TgDtlsTransport(IceTransportInternal* ice_transport,
const webrtc::CryptoOptions& crypto_options,
webrtc::RtcEventLog* event_log);
~TgDtlsTransport() override;
const webrtc::CryptoOptions& crypto_options() const override;
DtlsTransportState dtls_state() const override;
const std::string& transport_name() const override;
int component() const override;
// DTLS is active if a local certificate was set. Otherwise this acts in a
// "passthrough" mode, sending packets directly through the underlying ICE
// transport.
// TODO(deadbeef): Remove this weirdness, and handle it in the upper layers.
bool IsDtlsActive() const override;
// SetLocalCertificate is what makes DTLS active. It must be called before
// SetRemoteFinterprint.
// TODO(deadbeef): Once DtlsTransport no longer has the concept of being
// "active" or not (acting as a passthrough if not active), just require this
// certificate on construction or "Start".
bool SetLocalCertificate(
const rtc::scoped_refptr<rtc::RTCCertificate>& certificate) override;
rtc::scoped_refptr<rtc::RTCCertificate> GetLocalCertificate() const override;
// SetRemoteFingerprint must be called after SetLocalCertificate, and any
// other methods like SetDtlsRole. It's what triggers the actual DTLS setup.
// TODO(deadbeef): Rename to "Start" like in ORTC?
bool SetRemoteFingerprint(const std::string& digest_alg,
const uint8_t* digest,
size_t digest_len) override;
// Called to send a packet (via DTLS, if turned on).
int SendPacket(const char* data,
size_t size,
const rtc::PacketOptions& options,
int flags) override;
bool GetOption(rtc::Socket::Option opt, int* value) override;
bool SetSslMaxProtocolVersion(rtc::SSLProtocolVersion version) override;
// Find out which TLS version was negotiated
bool GetSslVersionBytes(int* version) const override;
// Find out which DTLS-SRTP cipher was negotiated
bool GetSrtpCryptoSuite(int* cipher) override;
bool GetDtlsRole(rtc::SSLRole* role) const override;
bool SetDtlsRole(rtc::SSLRole role) override;
// Find out which DTLS cipher was negotiated
bool GetSslCipherSuite(int* cipher) override;
// Once DTLS has been established, this method retrieves the certificate
// chain in use by the remote peer, for use in external identity
// verification.
std::unique_ptr<rtc::SSLCertChain> GetRemoteSSLCertChain() const override;
// Once DTLS has established (i.e., this ice_transport is writable), this
// method extracts the keys negotiated during the DTLS handshake, for use in
// external encryption. DTLS-SRTP uses this to extract the needed SRTP keys.
// See the SSLStreamAdapter documentation for info on the specific parameters.
bool ExportKeyingMaterial(const std::string& label,
const uint8_t* context,
size_t context_len,
bool use_context,
uint8_t* result,
size_t result_len) override;
IceTransportInternal* ice_transport() override;
// For informational purposes. Tells if the DTLS handshake has finished.
// This may be true even if writable() is false, if the remote fingerprint
// has not yet been verified.
bool IsDtlsConnected();
bool receiving() const override;
bool writable() const override;
int GetError() override;
absl::optional<rtc::NetworkRoute> network_route() const override;
int SetOption(rtc::Socket::Option opt, int value) override;
std::string ToString() const {
const absl::string_view RECEIVING_ABBREV[2] = {"_", "R"};
const absl::string_view WRITABLE_ABBREV[2] = {"_", "W"};
rtc::StringBuilder sb;
sb << "DtlsTransport[" << transport_name_ << "|" << component_ << "|"
<< RECEIVING_ABBREV[receiving()] << WRITABLE_ABBREV[writable()] << "]";
return sb.Release();
}
private:
void ConnectToIceTransport();
void OnWritableState(rtc::PacketTransportInternal* transport);
void OnReadPacket(rtc::PacketTransportInternal* transport,
const char* data,
size_t size,
const int64_t& packet_time_us,
int flags);
void OnSentPacket(rtc::PacketTransportInternal* transport,
const rtc::SentPacket& sent_packet);
void OnReadyToSend(rtc::PacketTransportInternal* transport);
void OnReceivingState(rtc::PacketTransportInternal* transport);
void OnDtlsEvent(rtc::StreamInterface* stream_, int sig, int err);
void OnNetworkRouteChanged(absl::optional<rtc::NetworkRoute> network_route);
bool SetupDtls();
void MaybeStartDtls();
bool HandleDtlsPacket(const char* data, size_t size);
void OnDtlsHandshakeError(rtc::SSLHandshakeError error);
void ConfigureHandshakeTimeout();
void set_receiving(bool receiving);
void set_writable(bool writable);
// Sets the DTLS state, signaling if necessary.
void set_dtls_state(DtlsTransportState state);
rtc::ThreadChecker thread_checker_;
std::string transport_name_;
int component_;
DtlsTransportState dtls_state_ = DTLS_TRANSPORT_NEW;
// Underlying ice_transport, not owned by this class.
IceTransportInternal* ice_transport_;
std::unique_ptr<rtc::SSLStreamAdapter> dtls_; // The DTLS stream
StreamInterfaceChannel*
downward_; // Wrapper for ice_transport_, owned by dtls_.
std::vector<int> srtp_ciphers_; // SRTP ciphers to use with DTLS.
bool dtls_active_ = false;
rtc::scoped_refptr<rtc::RTCCertificate> local_certificate_;
absl::optional<rtc::SSLRole> dtls_role_;
rtc::SSLProtocolVersion ssl_max_version_;
webrtc::CryptoOptions crypto_options_;
rtc::Buffer remote_fingerprint_value_;
std::string remote_fingerprint_algorithm_;
// Cached DTLS ClientHello packet that was received before we started the
// DTLS handshake. This could happen if the hello was received before the
// ice transport became writable, or before a remote fingerprint was received.
rtc::Buffer cached_client_hello_;
bool receiving_ = false;
bool writable_ = false;
webrtc::RtcEventLog* const event_log_;
RTC_DISALLOW_COPY_AND_ASSIGN(TgDtlsTransport);
};
} // namespace cricket
#endif // P2P_BASE_DTLS_TRANSPORT_H_

View File

@@ -0,0 +1,868 @@
/*
* Copyright 2018 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "tg_jsep_transport.h"
#include <stddef.h>
#include <stdint.h>
#include <memory>
#include <type_traits>
#include <utility> // for std::pair
#include "api/array_view.h"
#include "api/candidate.h"
#include "p2p/base/p2p_constants.h"
#include "p2p/base/p2p_transport_channel.h"
#include "pc/sctp_data_channel_transport.h"
#include "rtc_base/checks.h"
#include "rtc_base/copy_on_write_buffer.h"
#include "rtc_base/logging.h"
#include "rtc_base/strings/string_builder.h"
using webrtc::SdpType;
namespace cricket {
static bool VerifyIceParams(const TgJsepTransportDescription& jsep_description) {
// For legacy protocols.
// TODO(zhihuang): Remove this once the legacy protocol is no longer
// supported.
if (jsep_description.transport_desc.ice_ufrag.empty() &&
jsep_description.transport_desc.ice_pwd.empty()) {
return true;
}
if (jsep_description.transport_desc.ice_ufrag.length() <
ICE_UFRAG_MIN_LENGTH ||
jsep_description.transport_desc.ice_ufrag.length() >
ICE_UFRAG_MAX_LENGTH) {
return false;
}
if (jsep_description.transport_desc.ice_pwd.length() < ICE_PWD_MIN_LENGTH ||
jsep_description.transport_desc.ice_pwd.length() > ICE_PWD_MAX_LENGTH) {
return false;
}
return true;
}
TgJsepTransportDescription::TgJsepTransportDescription() {}
TgJsepTransportDescription::TgJsepTransportDescription(
bool rtcp_mux_enabled,
const std::vector<CryptoParams>& cryptos,
const std::vector<int>& encrypted_header_extension_ids,
int rtp_abs_sendtime_extn_id,
const TransportDescription& transport_desc,
absl::optional<std::string> media_alt_protocol,
absl::optional<std::string> data_alt_protocol)
: rtcp_mux_enabled(rtcp_mux_enabled),
cryptos(cryptos),
encrypted_header_extension_ids(encrypted_header_extension_ids),
rtp_abs_sendtime_extn_id(rtp_abs_sendtime_extn_id),
transport_desc(transport_desc),
media_alt_protocol(media_alt_protocol),
data_alt_protocol(data_alt_protocol) {}
TgJsepTransportDescription::TgJsepTransportDescription(
const TgJsepTransportDescription& from)
: rtcp_mux_enabled(from.rtcp_mux_enabled),
cryptos(from.cryptos),
encrypted_header_extension_ids(from.encrypted_header_extension_ids),
rtp_abs_sendtime_extn_id(from.rtp_abs_sendtime_extn_id),
transport_desc(from.transport_desc),
media_alt_protocol(from.media_alt_protocol),
data_alt_protocol(from.data_alt_protocol) {}
TgJsepTransportDescription::~TgJsepTransportDescription() = default;
TgJsepTransportDescription& TgJsepTransportDescription::operator=(
const TgJsepTransportDescription& from) {
if (this == &from) {
return *this;
}
rtcp_mux_enabled = from.rtcp_mux_enabled;
cryptos = from.cryptos;
encrypted_header_extension_ids = from.encrypted_header_extension_ids;
rtp_abs_sendtime_extn_id = from.rtp_abs_sendtime_extn_id;
transport_desc = from.transport_desc;
media_alt_protocol = from.media_alt_protocol;
data_alt_protocol = from.data_alt_protocol;
return *this;
}
TgJsepTransport::TgJsepTransport(
const std::string& mid,
const rtc::scoped_refptr<rtc::RTCCertificate>& local_certificate,
rtc::scoped_refptr<webrtc::IceTransportInterface> ice_transport,
rtc::scoped_refptr<webrtc::IceTransportInterface> rtcp_ice_transport,
std::unique_ptr<webrtc::TgRtpTransport> unencrypted_rtp_transport,
std::unique_ptr<webrtc::SrtpTransport> sdes_transport,
std::unique_ptr<webrtc::DtlsSrtpTransport> dtls_srtp_transport,
std::unique_ptr<webrtc::RtpTransportInternal> datagram_rtp_transport,
std::unique_ptr<DtlsTransportInternal> rtp_dtls_transport,
std::unique_ptr<DtlsTransportInternal> rtcp_dtls_transport,
std::unique_ptr<SctpTransportInternal> sctp_transport,
std::unique_ptr<webrtc::DatagramTransportInterface> datagram_transport,
webrtc::DataChannelTransportInterface* data_channel_transport)
: network_thread_(rtc::Thread::Current()),
mid_(mid),
local_certificate_(local_certificate),
ice_transport_(std::move(ice_transport)),
rtcp_ice_transport_(std::move(rtcp_ice_transport)),
unencrypted_rtp_transport_(std::move(unencrypted_rtp_transport)),
sdes_transport_(std::move(sdes_transport)),
dtls_srtp_transport_(std::move(dtls_srtp_transport)),
rtp_dtls_transport_(
rtp_dtls_transport ? new rtc::RefCountedObject<webrtc::DtlsTransport>(
std::move(rtp_dtls_transport))
: nullptr),
rtcp_dtls_transport_(
rtcp_dtls_transport
? new rtc::RefCountedObject<webrtc::DtlsTransport>(
std::move(rtcp_dtls_transport))
: nullptr),
sctp_data_channel_transport_(
sctp_transport ? std::make_unique<webrtc::SctpDataChannelTransport>(
sctp_transport.get())
: nullptr),
sctp_transport_(sctp_transport
? new rtc::RefCountedObject<webrtc::SctpTransport>(
std::move(sctp_transport))
: nullptr),
datagram_transport_(std::move(datagram_transport)),
datagram_rtp_transport_(std::move(datagram_rtp_transport)),
data_channel_transport_(data_channel_transport) {
RTC_DCHECK(ice_transport_);
RTC_DCHECK(rtp_dtls_transport_);
// |rtcp_ice_transport_| must be present iff |rtcp_dtls_transport_| is
// present.
RTC_DCHECK_EQ((rtcp_ice_transport_ != nullptr),
(rtcp_dtls_transport_ != nullptr));
// Verify the "only one out of these three can be set" invariant.
if (unencrypted_rtp_transport_) {
RTC_DCHECK(!sdes_transport);
RTC_DCHECK(!dtls_srtp_transport);
} else if (sdes_transport_) {
RTC_DCHECK(!unencrypted_rtp_transport);
RTC_DCHECK(!dtls_srtp_transport);
} else {
RTC_DCHECK(dtls_srtp_transport_);
RTC_DCHECK(!unencrypted_rtp_transport);
RTC_DCHECK(!sdes_transport);
}
if (sctp_transport_) {
sctp_transport_->SetDtlsTransport(rtp_dtls_transport_);
}
if (datagram_rtp_transport_ && default_rtp_transport()) {
composite_rtp_transport_ = std::make_unique<webrtc::CompositeRtpTransport>(
std::vector<webrtc::RtpTransportInternal*>{
datagram_rtp_transport_.get(), default_rtp_transport()});
}
if (data_channel_transport_ && sctp_data_channel_transport_) {
composite_data_channel_transport_ =
std::make_unique<webrtc::CompositeDataChannelTransport>(
std::vector<webrtc::DataChannelTransportInterface*>{
data_channel_transport_, sctp_data_channel_transport_.get()});
}
}
TgJsepTransport::~TgJsepTransport() {
if (sctp_transport_) {
sctp_transport_->Clear();
}
// Clear all DtlsTransports. There may be pointers to these from
// other places, so we can't assume they'll be deleted by the destructor.
rtp_dtls_transport_->Clear();
if (rtcp_dtls_transport_) {
rtcp_dtls_transport_->Clear();
}
// ICE will be the last transport to be deleted.
}
webrtc::RTCError TgJsepTransport::SetLocalJsepTransportDescription(
const TgJsepTransportDescription& jsep_description,
SdpType type) {
webrtc::RTCError error;
RTC_DCHECK_RUN_ON(network_thread_);
if (!VerifyIceParams(jsep_description)) {
return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER,
"Invalid ice-ufrag or ice-pwd length.");
}
if (!SetRtcpMux(jsep_description.rtcp_mux_enabled, type,
ContentSource::CS_LOCAL)) {
return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER,
"Failed to setup RTCP mux.");
}
// If doing SDES, setup the SDES crypto parameters.
{
rtc::CritScope scope(&accessor_lock_);
if (sdes_transport_) {
RTC_DCHECK(!unencrypted_rtp_transport_);
RTC_DCHECK(!dtls_srtp_transport_);
if (!SetSdes(jsep_description.cryptos,
jsep_description.encrypted_header_extension_ids, type,
ContentSource::CS_LOCAL)) {
return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER,
"Failed to setup SDES crypto parameters.");
}
} else if (dtls_srtp_transport_) {
RTC_DCHECK(!unencrypted_rtp_transport_);
RTC_DCHECK(!sdes_transport_);
dtls_srtp_transport_->UpdateRecvEncryptedHeaderExtensionIds(
jsep_description.encrypted_header_extension_ids);
}
}
bool ice_restarting =
local_description_ != nullptr &&
IceCredentialsChanged(local_description_->transport_desc.ice_ufrag,
local_description_->transport_desc.ice_pwd,
jsep_description.transport_desc.ice_ufrag,
jsep_description.transport_desc.ice_pwd);
local_description_.reset(new TgJsepTransportDescription(jsep_description));
rtc::SSLFingerprint* local_fp =
local_description_->transport_desc.identity_fingerprint.get();
if (!local_fp) {
local_certificate_ = nullptr;
} else {
error = VerifyCertificateFingerprint(local_certificate_, local_fp);
if (!error.ok()) {
local_description_.reset();
return error;
}
}
{
rtc::CritScope scope(&accessor_lock_);
RTC_DCHECK(rtp_dtls_transport_->internal());
SetLocalIceParameters(rtp_dtls_transport_->internal()->ice_transport());
if (rtcp_dtls_transport_) {
RTC_DCHECK(rtcp_dtls_transport_->internal());
SetLocalIceParameters(rtcp_dtls_transport_->internal()->ice_transport());
}
}
// If PRANSWER/ANSWER is set, we should decide transport protocol type.
if (type == SdpType::kPrAnswer || type == SdpType::kAnswer) {
error = NegotiateAndSetDtlsParameters(type);
NegotiateDatagramTransport(type);
}
if (!error.ok()) {
local_description_.reset();
return error;
}
{
rtc::CritScope scope(&accessor_lock_);
if (needs_ice_restart_ && ice_restarting) {
needs_ice_restart_ = false;
RTC_LOG(LS_VERBOSE) << "needs-ice-restart flag cleared for transport "
<< mid();
}
}
return webrtc::RTCError::OK();
}
webrtc::RTCError TgJsepTransport::SetRemoteJsepTransportDescription(
const TgJsepTransportDescription& jsep_description,
webrtc::SdpType type) {
webrtc::RTCError error;
RTC_DCHECK_RUN_ON(network_thread_);
if (!VerifyIceParams(jsep_description)) {
remote_description_.reset();
return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER,
"Invalid ice-ufrag or ice-pwd length.");
}
if (!SetRtcpMux(jsep_description.rtcp_mux_enabled, type,
ContentSource::CS_REMOTE)) {
return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER,
"Failed to setup RTCP mux.");
}
// If doing SDES, setup the SDES crypto parameters.
{
rtc::CritScope lock(&accessor_lock_);
if (sdes_transport_) {
RTC_DCHECK(!unencrypted_rtp_transport_);
RTC_DCHECK(!dtls_srtp_transport_);
if (!SetSdes(jsep_description.cryptos,
jsep_description.encrypted_header_extension_ids, type,
ContentSource::CS_REMOTE)) {
return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER,
"Failed to setup SDES crypto parameters.");
}
sdes_transport_->CacheRtpAbsSendTimeHeaderExtension(
jsep_description.rtp_abs_sendtime_extn_id);
} else if (dtls_srtp_transport_) {
RTC_DCHECK(!unencrypted_rtp_transport_);
RTC_DCHECK(!sdes_transport_);
dtls_srtp_transport_->UpdateSendEncryptedHeaderExtensionIds(
jsep_description.encrypted_header_extension_ids);
dtls_srtp_transport_->CacheRtpAbsSendTimeHeaderExtension(
jsep_description.rtp_abs_sendtime_extn_id);
}
}
remote_description_.reset(new TgJsepTransportDescription(jsep_description));
RTC_DCHECK(rtp_dtls_transport());
SetRemoteIceParameters(rtp_dtls_transport()->ice_transport());
if (rtcp_dtls_transport()) {
SetRemoteIceParameters(rtcp_dtls_transport()->ice_transport());
}
// If PRANSWER/ANSWER is set, we should decide transport protocol type.
if (type == SdpType::kPrAnswer || type == SdpType::kAnswer) {
error = NegotiateAndSetDtlsParameters(SdpType::kOffer);
NegotiateDatagramTransport(type);
}
if (!error.ok()) {
remote_description_.reset();
return error;
}
return webrtc::RTCError::OK();
}
webrtc::RTCError TgJsepTransport::AddRemoteCandidates(
const Candidates& candidates) {
RTC_DCHECK_RUN_ON(network_thread_);
if (!local_description_ || !remote_description_) {
return webrtc::RTCError(webrtc::RTCErrorType::INVALID_STATE,
mid() +
" is not ready to use the remote candidate "
"because the local or remote description is "
"not set.");
}
for (const cricket::Candidate& candidate : candidates) {
auto transport =
candidate.component() == cricket::ICE_CANDIDATE_COMPONENT_RTP
? rtp_dtls_transport_
: rtcp_dtls_transport_;
if (!transport) {
return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER,
"Candidate has an unknown component: " +
candidate.ToSensitiveString() + " for mid " +
mid());
}
RTC_DCHECK(transport->internal() && transport->internal()->ice_transport());
transport->internal()->ice_transport()->AddRemoteCandidate(candidate);
}
return webrtc::RTCError::OK();
}
void TgJsepTransport::SetNeedsIceRestartFlag() {
rtc::CritScope scope(&accessor_lock_);
if (!needs_ice_restart_) {
needs_ice_restart_ = true;
RTC_LOG(LS_VERBOSE) << "needs-ice-restart flag set for transport " << mid();
}
}
absl::optional<rtc::SSLRole> TgJsepTransport::GetDtlsRole() const {
RTC_DCHECK_RUN_ON(network_thread_);
rtc::CritScope scope(&accessor_lock_);
RTC_DCHECK(rtp_dtls_transport_);
RTC_DCHECK(rtp_dtls_transport_->internal());
rtc::SSLRole dtls_role;
if (!rtp_dtls_transport_->internal()->GetDtlsRole(&dtls_role)) {
return absl::optional<rtc::SSLRole>();
}
return absl::optional<rtc::SSLRole>(dtls_role);
}
absl::optional<OpaqueTransportParameters>
TgJsepTransport::GetTransportParameters() const {
rtc::CritScope scope(&accessor_lock_);
if (!datagram_transport()) {
return absl::nullopt;
}
OpaqueTransportParameters params;
params.parameters = datagram_transport()->GetTransportParameters();
return params;
}
bool TgJsepTransport::GetStats(TransportStats* stats) {
RTC_DCHECK_RUN_ON(network_thread_);
rtc::CritScope scope(&accessor_lock_);
stats->transport_name = mid();
stats->channel_stats.clear();
RTC_DCHECK(rtp_dtls_transport_->internal());
bool ret = GetTransportStats(rtp_dtls_transport_->internal(), stats);
if (rtcp_dtls_transport_) {
RTC_DCHECK(rtcp_dtls_transport_->internal());
ret &= GetTransportStats(rtcp_dtls_transport_->internal(), stats);
}
return ret;
}
webrtc::RTCError TgJsepTransport::VerifyCertificateFingerprint(
const rtc::RTCCertificate* certificate,
const rtc::SSLFingerprint* fingerprint) const {
RTC_DCHECK_RUN_ON(network_thread_);
if (!fingerprint) {
return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER,
"No fingerprint");
}
if (!certificate) {
return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER,
"Fingerprint provided but no identity available.");
}
std::unique_ptr<rtc::SSLFingerprint> fp_tmp =
rtc::SSLFingerprint::CreateUnique(fingerprint->algorithm,
*certificate->identity());
RTC_DCHECK(fp_tmp.get() != NULL);
if (*fp_tmp == *fingerprint) {
return webrtc::RTCError::OK();
}
char ss_buf[1024];
rtc::SimpleStringBuilder desc(ss_buf);
desc << "Local fingerprint does not match identity. Expected: ";
desc << fp_tmp->ToString();
desc << " Got: " << fingerprint->ToString();
return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER,
std::string(desc.str()));
}
void TgJsepTransport::SetActiveResetSrtpParams(bool active_reset_srtp_params) {
RTC_DCHECK_RUN_ON(network_thread_);
rtc::CritScope scope(&accessor_lock_);
if (dtls_srtp_transport_) {
RTC_LOG(INFO)
<< "Setting active_reset_srtp_params of DtlsSrtpTransport to: "
<< active_reset_srtp_params;
dtls_srtp_transport_->SetActiveResetSrtpParams(active_reset_srtp_params);
}
}
void TgJsepTransport::SetLocalIceParameters(IceTransportInternal* ice_transport) {
RTC_DCHECK_RUN_ON(network_thread_);
RTC_DCHECK(ice_transport);
RTC_DCHECK(local_description_);
ice_transport->SetIceParameters(
local_description_->transport_desc.GetIceParameters());
}
void TgJsepTransport::SetRemoteIceParameters(
IceTransportInternal* ice_transport) {
RTC_DCHECK_RUN_ON(network_thread_);
RTC_DCHECK(ice_transport);
RTC_DCHECK(remote_description_);
ice_transport->SetRemoteIceParameters(
remote_description_->transport_desc.GetIceParameters());
ice_transport->SetRemoteIceMode(remote_description_->transport_desc.ice_mode);
}
webrtc::RTCError TgJsepTransport::SetNegotiatedDtlsParameters(
DtlsTransportInternal* dtls_transport,
absl::optional<rtc::SSLRole> dtls_role,
rtc::SSLFingerprint* remote_fingerprint) {
RTC_DCHECK_RUN_ON(network_thread_);
RTC_DCHECK(dtls_transport);
// Set SSL role. Role must be set before fingerprint is applied, which
// initiates DTLS setup.
if (dtls_role && !dtls_transport->SetDtlsRole(*dtls_role)) {
return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER,
"Failed to set SSL role for the transport.");
}
// Apply remote fingerprint.
if (!remote_fingerprint ||
!dtls_transport->SetRemoteFingerprint(
remote_fingerprint->algorithm, remote_fingerprint->digest.cdata(),
remote_fingerprint->digest.size())) {
return webrtc::RTCError(webrtc::RTCErrorType::INVALID_PARAMETER,
"Failed to apply remote fingerprint.");
}
return webrtc::RTCError::OK();
}
bool TgJsepTransport::SetRtcpMux(bool enable,
webrtc::SdpType type,
ContentSource source) {
RTC_DCHECK_RUN_ON(network_thread_);
bool ret = false;
switch (type) {
case SdpType::kOffer:
ret = rtcp_mux_negotiator_.SetOffer(enable, source);
break;
case SdpType::kPrAnswer:
// This may activate RTCP muxing, but we don't yet destroy the transport
// because the final answer may deactivate it.
ret = rtcp_mux_negotiator_.SetProvisionalAnswer(enable, source);
break;
case SdpType::kAnswer:
ret = rtcp_mux_negotiator_.SetAnswer(enable, source);
if (ret && rtcp_mux_negotiator_.IsActive()) {
ActivateRtcpMux();
}
break;
default:
RTC_NOTREACHED();
}
if (!ret) {
return false;
}
auto transport = rtp_transport();
transport->SetRtcpMuxEnabled(rtcp_mux_negotiator_.IsActive());
return ret;
}
void TgJsepTransport::ActivateRtcpMux() {
{
// Don't hold the network_thread_ lock while calling other functions,
// since they might call other functions that call RTC_DCHECK_RUN_ON.
// TODO(https://crbug.com/webrtc/10318): Simplify when possible.
RTC_DCHECK_RUN_ON(network_thread_);
}
{
rtc::CritScope scope(&accessor_lock_);
if (unencrypted_rtp_transport_) {
RTC_DCHECK(!sdes_transport_);
RTC_DCHECK(!dtls_srtp_transport_);
unencrypted_rtp_transport_->SetRtcpPacketTransport(nullptr);
} else if (sdes_transport_) {
RTC_DCHECK(!unencrypted_rtp_transport_);
RTC_DCHECK(!dtls_srtp_transport_);
sdes_transport_->SetRtcpPacketTransport(nullptr);
} else if (dtls_srtp_transport_) {
RTC_DCHECK(dtls_srtp_transport_);
RTC_DCHECK(!unencrypted_rtp_transport_);
RTC_DCHECK(!sdes_transport_);
dtls_srtp_transport_->SetDtlsTransports(rtp_dtls_transport(),
/*rtcp_dtls_transport=*/nullptr);
}
rtcp_dtls_transport_ = nullptr; // Destroy this reference.
}
// Notify the JsepTransportController to update the aggregate states.
SignalRtcpMuxActive();
}
bool TgJsepTransport::SetSdes(const std::vector<CryptoParams>& cryptos,
const std::vector<int>& encrypted_extension_ids,
webrtc::SdpType type,
ContentSource source) {
RTC_DCHECK_RUN_ON(network_thread_);
rtc::CritScope scope(&accessor_lock_);
bool ret = false;
ret = sdes_negotiator_.Process(cryptos, type, source);
if (!ret) {
return ret;
}
if (source == ContentSource::CS_LOCAL) {
recv_extension_ids_ = encrypted_extension_ids;
} else {
send_extension_ids_ = encrypted_extension_ids;
}
// If setting an SDES answer succeeded, apply the negotiated parameters
// to the SRTP transport.
if ((type == SdpType::kPrAnswer || type == SdpType::kAnswer) && ret) {
if (sdes_negotiator_.send_cipher_suite() &&
sdes_negotiator_.recv_cipher_suite()) {
RTC_DCHECK(send_extension_ids_);
RTC_DCHECK(recv_extension_ids_);
ret = sdes_transport_->SetRtpParams(
*(sdes_negotiator_.send_cipher_suite()),
sdes_negotiator_.send_key().data(),
static_cast<int>(sdes_negotiator_.send_key().size()),
*(send_extension_ids_), *(sdes_negotiator_.recv_cipher_suite()),
sdes_negotiator_.recv_key().data(),
static_cast<int>(sdes_negotiator_.recv_key().size()),
*(recv_extension_ids_));
} else {
RTC_LOG(LS_INFO) << "No crypto keys are provided for SDES.";
if (type == SdpType::kAnswer) {
// Explicitly reset the |sdes_transport_| if no crypto param is
// provided in the answer. No need to call |ResetParams()| for
// |sdes_negotiator_| because it resets the params inside |SetAnswer|.
sdes_transport_->ResetParams();
}
}
}
return ret;
}
webrtc::RTCError TgJsepTransport::NegotiateAndSetDtlsParameters(
SdpType local_description_type) {
RTC_DCHECK_RUN_ON(network_thread_);
if (!local_description_ || !remote_description_) {
return webrtc::RTCError(webrtc::RTCErrorType::INVALID_STATE,
"Applying an answer transport description "
"without applying any offer.");
}
std::unique_ptr<rtc::SSLFingerprint> remote_fingerprint;
absl::optional<rtc::SSLRole> negotiated_dtls_role;
rtc::SSLFingerprint* local_fp =
local_description_->transport_desc.identity_fingerprint.get();
rtc::SSLFingerprint* remote_fp =
remote_description_->transport_desc.identity_fingerprint.get();
if (remote_fp && local_fp) {
remote_fingerprint = std::make_unique<rtc::SSLFingerprint>(*remote_fp);
webrtc::RTCError error =
NegotiateDtlsRole(local_description_type,
local_description_->transport_desc.connection_role,
remote_description_->transport_desc.connection_role,
&negotiated_dtls_role);
if (!error.ok()) {
return error;
}
} else if (local_fp && (local_description_type == SdpType::kAnswer)) {
return webrtc::RTCError(
webrtc::RTCErrorType::INVALID_PARAMETER,
"Local fingerprint supplied when caller didn't offer DTLS.");
} else {
// We are not doing DTLS
remote_fingerprint = std::make_unique<rtc::SSLFingerprint>(
"", rtc::ArrayView<const uint8_t>());
}
// Now that we have negotiated everything, push it downward.
// Note that we cache the result so that if we have race conditions
// between future SetRemote/SetLocal invocations and new transport
// creation, we have the negotiation state saved until a new
// negotiation happens.
RTC_DCHECK(rtp_dtls_transport());
webrtc::RTCError error = SetNegotiatedDtlsParameters(
rtp_dtls_transport(), negotiated_dtls_role, remote_fingerprint.get());
if (!error.ok()) {
return error;
}
if (rtcp_dtls_transport()) {
error = SetNegotiatedDtlsParameters(
rtcp_dtls_transport(), negotiated_dtls_role, remote_fingerprint.get());
}
return error;
}
webrtc::RTCError TgJsepTransport::NegotiateDtlsRole(
SdpType local_description_type,
ConnectionRole local_connection_role,
ConnectionRole remote_connection_role,
absl::optional<rtc::SSLRole>* negotiated_dtls_role) {
// From RFC 4145, section-4.1, The following are the values that the
// 'setup' attribute can take in an offer/answer exchange:
// Offer Answer
// ________________
// active passive / holdconn
// passive active / holdconn
// actpass active / passive / holdconn
// holdconn holdconn
//
// Set the role that is most conformant with RFC 5763, Section 5, bullet 1
// The endpoint MUST use the setup attribute defined in [RFC4145].
// The endpoint that is the offerer MUST use the setup attribute
// value of setup:actpass and be prepared to receive a client_hello
// before it receives the answer. The answerer MUST use either a
// setup attribute value of setup:active or setup:passive. Note that
// if the answerer uses setup:passive, then the DTLS handshake will
// not begin until the answerer is received, which adds additional
// latency. setup:active allows the answer and the DTLS handshake to
// occur in parallel. Thus, setup:active is RECOMMENDED. Whichever
// party is active MUST initiate a DTLS handshake by sending a
// ClientHello over each flow (host/port quartet).
// IOW - actpass and passive modes should be treated as server and
// active as client.
bool is_remote_server = false;
if (local_description_type == SdpType::kOffer) {
if (local_connection_role != CONNECTIONROLE_ACTPASS) {
return webrtc::RTCError(
webrtc::RTCErrorType::INVALID_PARAMETER,
"Offerer must use actpass value for setup attribute.");
}
if (remote_connection_role == CONNECTIONROLE_ACTIVE ||
remote_connection_role == CONNECTIONROLE_PASSIVE ||
remote_connection_role == CONNECTIONROLE_NONE) {
is_remote_server = (remote_connection_role == CONNECTIONROLE_PASSIVE);
} else {
return webrtc::RTCError(
webrtc::RTCErrorType::INVALID_PARAMETER,
"Answerer must use either active or passive value "
"for setup attribute.");
}
// If remote is NONE or ACTIVE it will act as client.
} else {
if (remote_connection_role != CONNECTIONROLE_ACTPASS &&
remote_connection_role != CONNECTIONROLE_NONE) {
// Accept a remote role attribute that's not "actpass", but matches the
// current negotiated role. This is allowed by dtls-sdp, though our
// implementation will never generate such an offer as it's not
// recommended.
//
// See https://datatracker.ietf.org/doc/html/draft-ietf-mmusic-dtls-sdp,
// section 5.5.
auto current_dtls_role = GetDtlsRole();
if (!current_dtls_role ||
(*current_dtls_role == rtc::SSL_CLIENT &&
remote_connection_role == CONNECTIONROLE_ACTIVE) ||
(*current_dtls_role == rtc::SSL_SERVER &&
remote_connection_role == CONNECTIONROLE_PASSIVE)) {
return webrtc::RTCError(
webrtc::RTCErrorType::INVALID_PARAMETER,
"Offerer must use actpass value or current negotiated role for "
"setup attribute.");
}
}
if (local_connection_role == CONNECTIONROLE_ACTIVE ||
local_connection_role == CONNECTIONROLE_PASSIVE) {
is_remote_server = (local_connection_role == CONNECTIONROLE_ACTIVE);
} else {
return webrtc::RTCError(
webrtc::RTCErrorType::INVALID_PARAMETER,
"Answerer must use either active or passive value "
"for setup attribute.");
}
// If local is passive, local will act as server.
}
*negotiated_dtls_role =
(is_remote_server ? rtc::SSL_CLIENT : rtc::SSL_SERVER);
return webrtc::RTCError::OK();
}
bool TgJsepTransport::GetTransportStats(DtlsTransportInternal* dtls_transport,
TransportStats* stats) {
RTC_DCHECK_RUN_ON(network_thread_);
rtc::CritScope scope(&accessor_lock_);
RTC_DCHECK(dtls_transport);
TransportChannelStats substats;
if (rtcp_dtls_transport_) {
substats.component = dtls_transport == rtcp_dtls_transport_->internal()
? ICE_CANDIDATE_COMPONENT_RTCP
: ICE_CANDIDATE_COMPONENT_RTP;
} else {
substats.component = ICE_CANDIDATE_COMPONENT_RTP;
}
dtls_transport->GetSslVersionBytes(&substats.ssl_version_bytes);
dtls_transport->GetSrtpCryptoSuite(&substats.srtp_crypto_suite);
dtls_transport->GetSslCipherSuite(&substats.ssl_cipher_suite);
substats.dtls_state = dtls_transport->dtls_state();
if (!dtls_transport->ice_transport()->GetStats(
&substats.ice_transport_stats)) {
return false;
}
stats->channel_stats.push_back(substats);
return true;
}
void TgJsepTransport::NegotiateDatagramTransport(SdpType type) {
RTC_DCHECK(type == SdpType::kAnswer || type == SdpType::kPrAnswer);
rtc::CritScope lock(&accessor_lock_);
if (!datagram_transport_) {
return; // No need to negotiate the use of datagram transport.
}
bool compatible_datagram_transport =
remote_description_->transport_desc.opaque_parameters &&
remote_description_->transport_desc.opaque_parameters ==
local_description_->transport_desc.opaque_parameters;
bool use_datagram_transport_for_media =
compatible_datagram_transport &&
remote_description_->media_alt_protocol ==
remote_description_->transport_desc.opaque_parameters->protocol &&
remote_description_->media_alt_protocol ==
local_description_->media_alt_protocol;
bool use_datagram_transport_for_data =
compatible_datagram_transport &&
remote_description_->data_alt_protocol ==
remote_description_->transport_desc.opaque_parameters->protocol &&
remote_description_->data_alt_protocol ==
local_description_->data_alt_protocol;
RTC_LOG(LS_INFO)
<< "Negotiating datagram transport, use_datagram_transport_for_media="
<< use_datagram_transport_for_media
<< ", use_datagram_transport_for_data=" << use_datagram_transport_for_data
<< " answer type=" << (type == SdpType::kAnswer ? "answer" : "pr_answer");
// A provisional or full or answer lets the peer start sending on one of the
// transports.
if (composite_rtp_transport_) {
composite_rtp_transport_->SetSendTransport(
use_datagram_transport_for_media ? datagram_rtp_transport_.get()
: default_rtp_transport());
}
if (composite_data_channel_transport_) {
composite_data_channel_transport_->SetSendTransport(
use_datagram_transport_for_data ? data_channel_transport_
: sctp_data_channel_transport_.get());
}
if (type != SdpType::kAnswer) {
return;
}
if (composite_rtp_transport_) {
if (use_datagram_transport_for_media) {
// Negotiated use of datagram transport for RTP, so remove the
// non-datagram RTP transport.
composite_rtp_transport_->RemoveTransport(default_rtp_transport());
if (unencrypted_rtp_transport_) {
unencrypted_rtp_transport_ = nullptr;
} else if (sdes_transport_) {
sdes_transport_ = nullptr;
} else {
dtls_srtp_transport_ = nullptr;
}
} else {
composite_rtp_transport_->RemoveTransport(datagram_rtp_transport_.get());
datagram_rtp_transport_ = nullptr;
}
}
if (composite_data_channel_transport_) {
if (use_datagram_transport_for_data) {
// Negotiated use of datagram transport for data channels, so remove the
// non-datagram data channel transport.
composite_data_channel_transport_->RemoveTransport(
sctp_data_channel_transport_.get());
sctp_data_channel_transport_ = nullptr;
sctp_transport_ = nullptr;
} else {
composite_data_channel_transport_->RemoveTransport(
data_channel_transport_);
data_channel_transport_ = nullptr;
}
} else if (data_channel_transport_ && !use_datagram_transport_for_data) {
// The datagram transport has been rejected without a fallback. We still
// need to inform the application and delete it.
SignalDataChannelTransportNegotiated(this, nullptr);
data_channel_transport_ = nullptr;
}
if (!use_datagram_transport_for_media && !use_datagram_transport_for_data) {
// Datagram transport is not being used for anything, so clean it up.
datagram_transport_ = nullptr;
}
}
} // namespace cricket

View File

@@ -0,0 +1,417 @@
/*
* Copyright 2018 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef TG_PC_JSEP_TRANSPORT_H_
#define TG_PC_JSEP_TRANSPORT_H_
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "absl/types/optional.h"
#include "api/candidate.h"
#include "api/ice_transport_interface.h"
#include "api/jsep.h"
#include "api/transport/datagram_transport_interface.h"
#include "media/sctp/sctp_transport_internal.h"
#include "p2p/base/dtls_transport.h"
#include "p2p/base/p2p_constants.h"
#include "p2p/base/transport_info.h"
#include "pc/composite_data_channel_transport.h"
#include "pc/composite_rtp_transport.h"
#include "pc/dtls_srtp_transport.h"
#include "pc/dtls_transport.h"
#include "pc/rtcp_mux_filter.h"
#include "pc/rtp_transport.h"
#include "pc/sctp_transport.h"
#include "pc/session_description.h"
#include "pc/srtp_filter.h"
#include "pc/srtp_transport.h"
#include "pc/transport_stats.h"
#include "rtc_base/constructor_magic.h"
#include "rtc_base/message_queue.h"
#include "rtc_base/rtc_certificate.h"
#include "rtc_base/ssl_stream_adapter.h"
#include "rtc_base/third_party/sigslot/sigslot.h"
#include "rtc_base/thread_checker.h"
#include "tg_rtp_transport.h"
namespace cricket {
class DtlsTransportInternal;
struct TgJsepTransportDescription {
public:
TgJsepTransportDescription();
TgJsepTransportDescription(
bool rtcp_mux_enabled,
const std::vector<CryptoParams>& cryptos,
const std::vector<int>& encrypted_header_extension_ids,
int rtp_abs_sendtime_extn_id,
const TransportDescription& transport_description,
absl::optional<std::string> media_alt_protocol,
absl::optional<std::string> data_alt_protocol);
TgJsepTransportDescription(const TgJsepTransportDescription& from);
~TgJsepTransportDescription();
TgJsepTransportDescription& operator=(const TgJsepTransportDescription& from);
bool rtcp_mux_enabled = true;
std::vector<CryptoParams> cryptos;
std::vector<int> encrypted_header_extension_ids;
int rtp_abs_sendtime_extn_id = -1;
// TODO(zhihuang): Add the ICE and DTLS related variables and methods from
// TransportDescription and remove this extra layer of abstraction.
TransportDescription transport_desc;
// Alt-protocols that apply to this TgJsepTransport. Presence indicates a
// request to use an alternative protocol for media and/or data. The
// alt-protocol is handled by a datagram transport. If one or both of these
// values are present, TgJsepTransport will attempt to negotiate use of the
// datagram transport for media and/or data.
absl::optional<std::string> media_alt_protocol;
absl::optional<std::string> data_alt_protocol;
};
// Helper class used by TgJsepTransportController that processes
// TransportDescriptions. A TransportDescription represents the
// transport-specific properties of an SDP m= section, processed according to
// JSEP. Each transport consists of DTLS and ICE transport channels for RTP
// (and possibly RTCP, if rtcp-mux isn't used).
//
// On Threading: TgJsepTransport performs work solely on the network thread, and
// so its methods should only be called on the network thread.
class TgJsepTransport : public sigslot::has_slots<> {
public:
// |mid| is just used for log statements in order to identify the Transport.
// Note that |local_certificate| is allowed to be null since a remote
// description may be set before a local certificate is generated.
TgJsepTransport(
const std::string& mid,
const rtc::scoped_refptr<rtc::RTCCertificate>& local_certificate,
rtc::scoped_refptr<webrtc::IceTransportInterface> ice_transport,
rtc::scoped_refptr<webrtc::IceTransportInterface> rtcp_ice_transport,
std::unique_ptr<webrtc::TgRtpTransport> unencrypted_rtp_transport,
std::unique_ptr<webrtc::SrtpTransport> sdes_transport,
std::unique_ptr<webrtc::DtlsSrtpTransport> dtls_srtp_transport,
std::unique_ptr<webrtc::RtpTransportInternal> datagram_rtp_transport,
std::unique_ptr<DtlsTransportInternal> rtp_dtls_transport,
std::unique_ptr<DtlsTransportInternal> rtcp_dtls_transport,
std::unique_ptr<SctpTransportInternal> sctp_transport,
std::unique_ptr<webrtc::DatagramTransportInterface> datagram_transport,
webrtc::DataChannelTransportInterface* data_channel_transport);
~TgJsepTransport() override;
// Returns the MID of this transport. This is only used for logging.
const std::string& mid() const { return mid_; }
// Must be called before applying local session description.
// Needed in order to verify the local fingerprint.
void SetLocalCertificate(
const rtc::scoped_refptr<rtc::RTCCertificate>& local_certificate) {
RTC_DCHECK_RUN_ON(network_thread_);
local_certificate_ = local_certificate;
}
// Return the local certificate provided by SetLocalCertificate.
rtc::scoped_refptr<rtc::RTCCertificate> GetLocalCertificate() const {
RTC_DCHECK_RUN_ON(network_thread_);
return local_certificate_;
}
webrtc::RTCError SetLocalJsepTransportDescription(
const TgJsepTransportDescription& jsep_description,
webrtc::SdpType type);
// Set the remote TransportDescription to be used by DTLS and ICE channels
// that are part of this Transport.
webrtc::RTCError SetRemoteJsepTransportDescription(
const TgJsepTransportDescription& jsep_description,
webrtc::SdpType type);
webrtc::RTCError AddRemoteCandidates(const Candidates& candidates);
// Set the "needs-ice-restart" flag as described in JSEP. After the flag is
// set, offers should generate new ufrags/passwords until an ICE restart
// occurs.
//
// This and the below method can be called safely from any thread as long as
// SetXTransportDescription is not in progress.
void SetNeedsIceRestartFlag();
// Returns true if the ICE restart flag above was set, and no ICE restart has
// occurred yet for this transport (by applying a local description with
// changed ufrag/password).
bool needs_ice_restart() const {
rtc::CritScope scope(&accessor_lock_);
return needs_ice_restart_;
}
// Returns role if negotiated, or empty absl::optional if it hasn't been
// negotiated yet.
absl::optional<rtc::SSLRole> GetDtlsRole() const;
absl::optional<OpaqueTransportParameters> GetTransportParameters() const;
// TODO(deadbeef): Make this const. See comment in transportcontroller.h.
bool GetStats(TransportStats* stats);
const TgJsepTransportDescription* local_description() const {
RTC_DCHECK_RUN_ON(network_thread_);
return local_description_.get();
}
const TgJsepTransportDescription* remote_description() const {
RTC_DCHECK_RUN_ON(network_thread_);
return remote_description_.get();
}
webrtc::RtpTransportInternal* rtp_transport() const {
rtc::CritScope scope(&accessor_lock_);
if (composite_rtp_transport_) {
return composite_rtp_transport_.get();
} else if (datagram_rtp_transport_) {
return datagram_rtp_transport_.get();
} else {
return default_rtp_transport();
}
}
const DtlsTransportInternal* rtp_dtls_transport() const {
rtc::CritScope scope(&accessor_lock_);
if (rtp_dtls_transport_) {
return rtp_dtls_transport_->internal();
} else {
return nullptr;
}
}
DtlsTransportInternal* rtp_dtls_transport() {
rtc::CritScope scope(&accessor_lock_);
if (rtp_dtls_transport_) {
return rtp_dtls_transport_->internal();
} else {
return nullptr;
}
}
const DtlsTransportInternal* rtcp_dtls_transport() const {
rtc::CritScope scope(&accessor_lock_);
if (rtcp_dtls_transport_) {
return rtcp_dtls_transport_->internal();
} else {
return nullptr;
}
}
DtlsTransportInternal* rtcp_dtls_transport() {
rtc::CritScope scope(&accessor_lock_);
if (rtcp_dtls_transport_) {
return rtcp_dtls_transport_->internal();
} else {
return nullptr;
}
}
rtc::scoped_refptr<webrtc::DtlsTransport> RtpDtlsTransport() {
rtc::CritScope scope(&accessor_lock_);
return rtp_dtls_transport_;
}
rtc::scoped_refptr<webrtc::SctpTransport> SctpTransport() const {
rtc::CritScope scope(&accessor_lock_);
return sctp_transport_;
}
webrtc::DataChannelTransportInterface* data_channel_transport() const {
rtc::CritScope scope(&accessor_lock_);
if (composite_data_channel_transport_) {
return composite_data_channel_transport_.get();
} else if (sctp_data_channel_transport_) {
return sctp_data_channel_transport_.get();
}
return data_channel_transport_;
}
// Returns datagram transport, if available.
webrtc::DatagramTransportInterface* datagram_transport() const {
rtc::CritScope scope(&accessor_lock_);
return datagram_transport_.get();
}
// This is signaled when RTCP-mux becomes active and
// |rtcp_dtls_transport_| is destroyed. The TgJsepTransportController will
// handle the signal and update the aggregate transport states.
sigslot::signal<> SignalRtcpMuxActive;
// Signals that a data channel transport was negotiated and may be used to
// send data. The first parameter is |this|. The second parameter is the
// transport that was negotiated, or null if negotiation rejected the data
// channel transport. The third parameter (bool) indicates whether the
// negotiation was provisional or final. If true, it is provisional, if
// false, it is final.
sigslot::signal2<TgJsepTransport*, webrtc::DataChannelTransportInterface*>
SignalDataChannelTransportNegotiated;
// TODO(deadbeef): The methods below are only public for testing. Should make
// them utility functions or objects so they can be tested independently from
// this class.
// Returns an error if the certificate's identity does not match the
// fingerprint, or either is NULL.
webrtc::RTCError VerifyCertificateFingerprint(
const rtc::RTCCertificate* certificate,
const rtc::SSLFingerprint* fingerprint) const;
void SetActiveResetSrtpParams(bool active_reset_srtp_params);
private:
bool SetRtcpMux(bool enable, webrtc::SdpType type, ContentSource source);
void ActivateRtcpMux();
bool SetSdes(const std::vector<CryptoParams>& cryptos,
const std::vector<int>& encrypted_extension_ids,
webrtc::SdpType type,
ContentSource source);
// Negotiates and sets the DTLS parameters based on the current local and
// remote transport description, such as the DTLS role to use, and whether
// DTLS should be activated.
//
// Called when an answer TransportDescription is applied.
webrtc::RTCError NegotiateAndSetDtlsParameters(
webrtc::SdpType local_description_type);
// Negotiates the DTLS role based off the offer and answer as specified by
// RFC 4145, section-4.1. Returns an RTCError if role cannot be determined
// from the local description and remote description.
webrtc::RTCError NegotiateDtlsRole(
webrtc::SdpType local_description_type,
ConnectionRole local_connection_role,
ConnectionRole remote_connection_role,
absl::optional<rtc::SSLRole>* negotiated_dtls_role);
// Pushes down the ICE parameters from the local description, such
// as the ICE ufrag and pwd.
void SetLocalIceParameters(IceTransportInternal* ice);
// Pushes down the ICE parameters from the remote description.
void SetRemoteIceParameters(IceTransportInternal* ice);
// Pushes down the DTLS parameters obtained via negotiation.
webrtc::RTCError SetNegotiatedDtlsParameters(
DtlsTransportInternal* dtls_transport,
absl::optional<rtc::SSLRole> dtls_role,
rtc::SSLFingerprint* remote_fingerprint);
bool GetTransportStats(DtlsTransportInternal* dtls_transport,
TransportStats* stats);
// Deactivates, signals removal, and deletes |composite_rtp_transport_| if the
// current state of negotiation is sufficient to determine which rtp_transport
// and data channel transport to use.
void NegotiateDatagramTransport(webrtc::SdpType type)
RTC_RUN_ON(network_thread_);
// Returns the default (non-datagram) rtp transport, if any.
webrtc::RtpTransportInternal* default_rtp_transport() const
RTC_EXCLUSIVE_LOCKS_REQUIRED(accessor_lock_) {
if (dtls_srtp_transport_) {
return dtls_srtp_transport_.get();
} else if (sdes_transport_) {
return sdes_transport_.get();
} else if (unencrypted_rtp_transport_) {
return unencrypted_rtp_transport_.get();
} else {
return nullptr;
}
}
// Owning thread, for safety checks
const rtc::Thread* const network_thread_;
// Critical scope for fields accessed off-thread
// TODO(https://bugs.webrtc.org/10300): Stop doing this.
rtc::CriticalSection accessor_lock_;
const std::string mid_;
// needs-ice-restart bit as described in JSEP.
bool needs_ice_restart_ RTC_GUARDED_BY(accessor_lock_) = false;
rtc::scoped_refptr<rtc::RTCCertificate> local_certificate_
RTC_GUARDED_BY(network_thread_);
std::unique_ptr<TgJsepTransportDescription> local_description_
RTC_GUARDED_BY(network_thread_);
std::unique_ptr<TgJsepTransportDescription> remote_description_
RTC_GUARDED_BY(network_thread_);
// Ice transport which may be used by any of upper-layer transports (below).
// Owned by TgJsepTransport and guaranteed to outlive the transports below.
const rtc::scoped_refptr<webrtc::IceTransportInterface> ice_transport_;
const rtc::scoped_refptr<webrtc::IceTransportInterface> rtcp_ice_transport_;
// To avoid downcasting and make it type safe, keep three unique pointers for
// different SRTP mode and only one of these is non-nullptr.
std::unique_ptr<webrtc::TgRtpTransport> unencrypted_rtp_transport_
RTC_GUARDED_BY(accessor_lock_);
std::unique_ptr<webrtc::SrtpTransport> sdes_transport_
RTC_GUARDED_BY(accessor_lock_);
std::unique_ptr<webrtc::DtlsSrtpTransport> dtls_srtp_transport_
RTC_GUARDED_BY(accessor_lock_);
// If multiple RTP transports are in use, |composite_rtp_transport_| will be
// passed to callers. This is only valid for offer-only, receive-only
// scenarios, as it is not possible for the composite to correctly choose
// which transport to use for sending.
std::unique_ptr<webrtc::CompositeRtpTransport> composite_rtp_transport_
RTC_GUARDED_BY(accessor_lock_);
rtc::scoped_refptr<webrtc::DtlsTransport> rtp_dtls_transport_
RTC_GUARDED_BY(accessor_lock_);
rtc::scoped_refptr<webrtc::DtlsTransport> rtcp_dtls_transport_
RTC_GUARDED_BY(accessor_lock_);
rtc::scoped_refptr<webrtc::DtlsTransport> datagram_dtls_transport_
RTC_GUARDED_BY(accessor_lock_);
std::unique_ptr<webrtc::DataChannelTransportInterface>
sctp_data_channel_transport_ RTC_GUARDED_BY(accessor_lock_);
rtc::scoped_refptr<webrtc::SctpTransport> sctp_transport_
RTC_GUARDED_BY(accessor_lock_);
SrtpFilter sdes_negotiator_ RTC_GUARDED_BY(network_thread_);
RtcpMuxFilter rtcp_mux_negotiator_ RTC_GUARDED_BY(network_thread_);
// Cache the encrypted header extension IDs for SDES negoitation.
absl::optional<std::vector<int>> send_extension_ids_
RTC_GUARDED_BY(network_thread_);
absl::optional<std::vector<int>> recv_extension_ids_
RTC_GUARDED_BY(network_thread_);
// Optional datagram transport (experimental).
std::unique_ptr<webrtc::DatagramTransportInterface> datagram_transport_
RTC_GUARDED_BY(accessor_lock_);
std::unique_ptr<webrtc::RtpTransportInternal> datagram_rtp_transport_
RTC_GUARDED_BY(accessor_lock_);
// Non-SCTP data channel transport. Set to |datagram_transport_| if that
// transport should be used for data chanels. Unset otherwise.
webrtc::DataChannelTransportInterface* data_channel_transport_
RTC_GUARDED_BY(accessor_lock_) = nullptr;
// Composite data channel transport, used during negotiation.
std::unique_ptr<webrtc::CompositeDataChannelTransport>
composite_data_channel_transport_ RTC_GUARDED_BY(accessor_lock_);
RTC_DISALLOW_COPY_AND_ASSIGN(TgJsepTransport);
};
} // namespace cricket
#endif // PC_JSEP_TRANSPORT_H_

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,478 @@
/*
* Copyright 2017 The WebRTC Project Authors. All rights reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef TG_PC_JSEP_TRANSPORT_CONTROLLER_H_
#define TG_PC_JSEP_TRANSPORT_CONTROLLER_H_
#include <map>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "api/candidate.h"
#include "api/crypto/crypto_options.h"
#include "api/ice_transport_factory.h"
#include "api/peer_connection_interface.h"
#include "api/rtc_event_log/rtc_event_log.h"
#include "api/transport/media/media_transport_config.h"
#include "media/sctp/sctp_transport_internal.h"
#include "p2p/base/dtls_transport.h"
#include "p2p/base/dtls_transport_factory.h"
#include "p2p/base/p2p_transport_channel.h"
#include "pc/channel.h"
#include "pc/dtls_srtp_transport.h"
#include "pc/dtls_transport.h"
#include "pc/rtp_transport.h"
#include "pc/srtp_transport.h"
#include "rtc_base/async_invoker.h"
#include "rtc_base/constructor_magic.h"
#include "rtc_base/ref_counted_object.h"
#include "rtc_base/third_party/sigslot/sigslot.h"
#include "tg_jsep_transport.h"
#include "tg_rtp_transport.h"
namespace rtc {
class Thread;
class PacketTransportInternal;
} // namespace rtc
namespace webrtc {
class TgJsepTransportController : public sigslot::has_slots<> {
public:
// Used when the RtpTransport/DtlsTransport of the m= section is changed
// because the section is rejected or BUNDLE is enabled.
class Observer {
public:
virtual ~Observer() {}
// Returns true if media associated with |mid| was successfully set up to be
// demultiplexed on |rtp_transport|. Could return false if two bundled m=
// sections use the same SSRC, for example.
//
// If a data channel transport must be negotiated, |data_channel_transport|
// and |negotiation_state| indicate negotiation status. If
// |data_channel_transport| is null, the data channel transport should not
// be used. Otherwise, the value is a pointer to the transport to be used
// for data channels on |mid|, if any.
//
// The observer should not send data on |data_channel_transport| until
// |negotiation_state| is provisional or final. It should not delete
// |data_channel_transport| or any fallback transport until
// |negotiation_state| is final.
virtual bool OnTransportChanged(
const std::string& mid,
RtpTransportInternal* rtp_transport,
rtc::scoped_refptr<DtlsTransport> dtls_transport,
DataChannelTransportInterface* data_channel_transport) = 0;
};
struct Config {
// If |redetermine_role_on_ice_restart| is true, ICE role is redetermined
// upon setting a local transport description that indicates an ICE
// restart.
bool redetermine_role_on_ice_restart = true;
rtc::SSLProtocolVersion ssl_max_version = rtc::SSL_PROTOCOL_DTLS_12;
// |crypto_options| is used to determine if created DTLS transports
// negotiate GCM crypto suites or not.
webrtc::CryptoOptions crypto_options;
PeerConnectionInterface::BundlePolicy bundle_policy =
PeerConnectionInterface::kBundlePolicyBalanced;
PeerConnectionInterface::RtcpMuxPolicy rtcp_mux_policy =
PeerConnectionInterface::kRtcpMuxPolicyRequire;
bool disable_encryption = false;
bool enable_external_auth = false;
// Used to inject the ICE/DTLS transports created externally.
webrtc::IceTransportFactory* ice_transport_factory = nullptr;
cricket::DtlsTransportFactory* dtls_transport_factory = nullptr;
Observer* transport_observer = nullptr;
// Must be provided and valid for the lifetime of the
// TgJsepTransportController instance.
std::function<void(const rtc::CopyOnWriteBuffer& packet,
int64_t packet_time_us)>
rtcp_handler;
bool active_reset_srtp_params = false;
RtcEventLog* event_log = nullptr;
// Factory for SCTP transports.
cricket::SctpTransportInternalFactory* sctp_factory = nullptr;
// Whether an RtpMediaTransport should be created as default, when no
// MediaTransportFactory is provided.
bool use_rtp_media_transport = false;
// Use encrypted datagram transport to send packets.
bool use_datagram_transport = false;
// Use datagram transport's implementation of data channels instead of SCTP.
bool use_datagram_transport_for_data_channels = false;
// Whether |use_datagram_transport_for_data_channels| applies to outgoing
// calls. If true, |use_datagram_transport_for_data_channels| applies only
// to incoming calls.
bool use_datagram_transport_for_data_channels_receive_only = false;
// Optional media transport factory (experimental). If provided it will be
// used to create datagram_transport (as long as either
// |use_datagram_transport| or
// |use_datagram_transport_for_data_channels| is set to true). However,
// whether it will be used to send / receive audio and video frames instead
// of RTP is determined by |use_datagram_transport|. Note that currently
// datagram_transport co-exists with RTP / RTCP transports and may use the
// same underlying ICE transport.
MediaTransportFactory* media_transport_factory = nullptr;
};
// The ICE related events are signaled on the |signaling_thread|.
// All the transport related methods are called on the |network_thread|.
TgJsepTransportController(rtc::Thread* signaling_thread,
rtc::Thread* network_thread,
cricket::PortAllocator* port_allocator,
AsyncResolverFactory* async_resolver_factory,
Config config);
virtual ~TgJsepTransportController();
// The main method to be called; applies a description at the transport
// level, creating/destroying transport objects as needed and updating their
// properties. This includes RTP, DTLS, and ICE (but not SCTP). At least not
// yet? May make sense to in the future.
RTCError SetLocalDescription(SdpType type,
const cricket::SessionDescription* description);
RTCError SetRemoteDescription(SdpType type,
const cricket::SessionDescription* description);
// Get transports to be used for the provided |mid|. If bundling is enabled,
// calling GetRtpTransport for multiple MIDs may yield the same object.
RtpTransportInternal* GetRtpTransport(const std::string& mid) const;
cricket::DtlsTransportInternal* GetDtlsTransport(const std::string& mid);
const cricket::DtlsTransportInternal* GetRtcpDtlsTransport(
const std::string& mid) const;
// Gets the externally sharable version of the DtlsTransport.
rtc::scoped_refptr<webrtc::DtlsTransport> LookupDtlsTransportByMid(
const std::string& mid);
rtc::scoped_refptr<SctpTransport> GetSctpTransport(
const std::string& mid) const;
MediaTransportConfig GetMediaTransportConfig(const std::string& mid) const;
DataChannelTransportInterface* GetDataChannelTransport(
const std::string& mid) const;
/*********************
* ICE-related methods
********************/
// This method is public to allow PeerConnection to update it from
// SetConfiguration.
void SetIceConfig(const cricket::IceConfig& config);
// Set the "needs-ice-restart" flag as described in JSEP. After the flag is
// set, offers should generate new ufrags/passwords until an ICE restart
// occurs.
void SetNeedsIceRestartFlag();
// Returns true if the ICE restart flag above was set, and no ICE restart has
// occurred yet for this transport (by applying a local description with
// changed ufrag/password). If the transport has been deleted as a result of
// bundling, returns false.
bool NeedsIceRestart(const std::string& mid) const;
// Start gathering candidates for any new transports, or transports doing an
// ICE restart.
void MaybeStartGathering();
RTCError AddRemoteCandidates(
const std::string& mid,
const std::vector<cricket::Candidate>& candidates);
RTCError RemoveRemoteCandidates(
const std::vector<cricket::Candidate>& candidates);
/**********************
* DTLS-related methods
*********************/
// Specifies the identity to use in this session.
// Can only be called once.
bool SetLocalCertificate(
const rtc::scoped_refptr<rtc::RTCCertificate>& certificate);
rtc::scoped_refptr<rtc::RTCCertificate> GetLocalCertificate(
const std::string& mid) const;
// Caller owns returned certificate chain. This method mainly exists for
// stats reporting.
std::unique_ptr<rtc::SSLCertChain> GetRemoteSSLCertChain(
const std::string& mid) const;
// Get negotiated role, if one has been negotiated.
absl::optional<rtc::SSLRole> GetDtlsRole(const std::string& mid) const;
// TODO(deadbeef): GetStats isn't const because all the way down to
// OpenSSLStreamAdapter, GetSslCipherSuite and GetDtlsSrtpCryptoSuite are not
// const. Fix this.
bool GetStats(const std::string& mid, cricket::TransportStats* stats);
bool initial_offerer() const { return initial_offerer_ && *initial_offerer_; }
void SetActiveResetSrtpParams(bool active_reset_srtp_params);
// Allows to overwrite the settings from config. You may set or reset the
// media transport configuration on the jsep transport controller, as long as
// you did not call 'GetMediaTransport' or 'MaybeCreateJsepTransport'. Once
// Jsep transport is created, you can't change this setting.
void SetMediaTransportSettings(
bool use_datagram_transport,
bool use_datagram_transport_for_data_channels,
bool use_datagram_transport_for_data_channels_receive_only);
// TODO(elrello): For now the rollback only removes mid to transport mappings
// and deletes unused transports, but doesn't consider anything more complex.
void RollbackTransportForMids(const std::vector<std::string>& mids);
// Gets the transport parameters for the transport identified by |mid|.
// If |mid| is bundled, returns the parameters for the bundled transport.
// If the transport for |mid| has not been created yet, it may be allocated in
// order to generate transport parameters.
absl::optional<cricket::OpaqueTransportParameters> GetTransportParameters(
const std::string& mid);
// All of these signals are fired on the signaling thread.
// If any transport failed => failed,
// Else if all completed => completed,
// Else if all connected => connected,
// Else => connecting
sigslot::signal1<cricket::IceConnectionState> SignalIceConnectionState;
sigslot::signal1<PeerConnectionInterface::PeerConnectionState>
SignalConnectionState;
sigslot::signal1<PeerConnectionInterface::IceConnectionState>
SignalStandardizedIceConnectionState;
// If all transports done gathering => complete,
// Else if any are gathering => gathering,
// Else => new
sigslot::signal1<cricket::IceGatheringState> SignalIceGatheringState;
// (mid, candidates)
sigslot::signal2<const std::string&, const std::vector<cricket::Candidate>&>
SignalIceCandidatesGathered;
sigslot::signal1<const cricket::IceCandidateErrorEvent&>
SignalIceCandidateError;
sigslot::signal1<const std::vector<cricket::Candidate>&>
SignalIceCandidatesRemoved;
sigslot::signal1<const cricket::CandidatePairChangeEvent&>
SignalIceCandidatePairChanged;
sigslot::signal1<rtc::SSLHandshakeError> SignalDtlsHandshakeError;
private:
RTCError ApplyDescription_n(bool local,
SdpType type,
const cricket::SessionDescription* description);
RTCError ValidateAndMaybeUpdateBundleGroup(
bool local,
SdpType type,
const cricket::SessionDescription* description);
RTCError ValidateContent(const cricket::ContentInfo& content_info);
void HandleRejectedContent(const cricket::ContentInfo& content_info,
const cricket::SessionDescription* description);
bool HandleBundledContent(const cricket::ContentInfo& content_info);
bool SetTransportForMid(const std::string& mid,
cricket::TgJsepTransport* jsep_transport);
void RemoveTransportForMid(const std::string& mid);
cricket::TgJsepTransportDescription CreateJsepTransportDescription(
const cricket::ContentInfo& content_info,
const cricket::TransportInfo& transport_info,
const std::vector<int>& encrypted_extension_ids,
int rtp_abs_sendtime_extn_id,
absl::optional<std::string> media_alt_protocol,
absl::optional<std::string> data_alt_protocol);
absl::optional<std::string> bundled_mid() const {
absl::optional<std::string> bundled_mid;
if (bundle_group_ && bundle_group_->FirstContentName()) {
bundled_mid = *(bundle_group_->FirstContentName());
}
return bundled_mid;
}
bool IsBundled(const std::string& mid) const {
return bundle_group_ && bundle_group_->HasContentName(mid);
}
bool ShouldUpdateBundleGroup(SdpType type,
const cricket::SessionDescription* description);
std::vector<int> MergeEncryptedHeaderExtensionIdsForBundle(
const cricket::SessionDescription* description);
std::vector<int> GetEncryptedHeaderExtensionIds(
const cricket::ContentInfo& content_info);
// Extracts the alt-protocol settings that apply to the bundle group.
RTCError GetAltProtocolsForBundle(
const cricket::SessionDescription* description,
absl::optional<std::string>* media_alt_protocol,
absl::optional<std::string>* data_alt_protocol);
int GetRtpAbsSendTimeHeaderExtensionId(
const cricket::ContentInfo& content_info);
// This method takes the BUNDLE group into account. If the TgJsepTransport is
// destroyed because of BUNDLE, it would return the transport which other
// transports are bundled on (In current implementation, it is the first
// content in the BUNDLE group).
const cricket::TgJsepTransport* GetJsepTransportForMid(
const std::string& mid) const;
cricket::TgJsepTransport* GetJsepTransportForMid(const std::string& mid);
// Get the JsepTransport without considering the BUNDLE group. Return nullptr
// if the JsepTransport is destroyed.
const cricket::TgJsepTransport* GetJsepTransportByName(
const std::string& transport_name) const;
cricket::TgJsepTransport* GetJsepTransportByName(
const std::string& transport_name);
// Creates jsep transport. Noop if transport is already created.
// Transport is created either during SetLocalDescription (|local| == true) or
// during SetRemoteDescription (|local| == false). Passing |local| helps to
// differentiate initiator (caller) from answerer (callee).
RTCError MaybeCreateJsepTransport(
bool local,
const cricket::ContentInfo& content_info,
const cricket::SessionDescription& description);
// Creates datagram transport if config wants to use it, and a=x-mt line is
// present for the current media transport. Returned
// DatagramTransportInterface is not connected, and must be connected to ICE.
// You must call |GenerateOrGetLastMediaTransportOffer| on the caller before
// calling MaybeCreateDatagramTransport.
std::unique_ptr<webrtc::DatagramTransportInterface>
MaybeCreateDatagramTransport(const cricket::ContentInfo& content_info,
const cricket::SessionDescription& description,
bool local);
void MaybeDestroyJsepTransport(const std::string& mid);
void DestroyAllJsepTransports_n();
void SetIceRole_n(cricket::IceRole ice_role);
cricket::IceRole DetermineIceRole(
cricket::TgJsepTransport* jsep_transport,
const cricket::TransportInfo& transport_info,
SdpType type,
bool local);
std::unique_ptr<cricket::DtlsTransportInternal> CreateDtlsTransport(
const cricket::ContentInfo& content_info,
cricket::IceTransportInternal* ice,
DatagramTransportInterface* datagram_transport);
rtc::scoped_refptr<webrtc::IceTransportInterface> CreateIceTransport(
const std::string& transport_name,
bool rtcp);
std::unique_ptr<webrtc::TgRtpTransport> CreateUnencryptedRtpTransport(
const std::string& transport_name,
rtc::PacketTransportInternal* rtp_packet_transport,
rtc::PacketTransportInternal* rtcp_packet_transport);
std::unique_ptr<webrtc::SrtpTransport> CreateSdesTransport(
const std::string& transport_name,
cricket::DtlsTransportInternal* rtp_dtls_transport,
cricket::DtlsTransportInternal* rtcp_dtls_transport);
std::unique_ptr<webrtc::DtlsSrtpTransport> CreateDtlsSrtpTransport(
const std::string& transport_name,
cricket::DtlsTransportInternal* rtp_dtls_transport,
cricket::DtlsTransportInternal* rtcp_dtls_transport);
// Collect all the DtlsTransports, including RTP and RTCP, from the
// JsepTransports. JsepTransportController can iterate all the DtlsTransports
// and update the aggregate states.
std::vector<cricket::DtlsTransportInternal*> GetDtlsTransports();
// Handlers for signals from Transport.
void OnTransportWritableState_n(rtc::PacketTransportInternal* transport);
void OnTransportReceivingState_n(rtc::PacketTransportInternal* transport);
void OnTransportGatheringState_n(cricket::IceTransportInternal* transport);
void OnTransportCandidateGathered_n(cricket::IceTransportInternal* transport,
const cricket::Candidate& candidate);
void OnTransportCandidateError_n(
cricket::IceTransportInternal* transport,
const cricket::IceCandidateErrorEvent& event);
void OnTransportCandidatesRemoved_n(cricket::IceTransportInternal* transport,
const cricket::Candidates& candidates);
void OnTransportRoleConflict_n(cricket::IceTransportInternal* transport);
void OnTransportStateChanged_n(cricket::IceTransportInternal* transport);
void OnTransportCandidatePairChanged_n(
const cricket::CandidatePairChangeEvent& event);
void OnDataChannelTransportNegotiated_n(
cricket::TgJsepTransport* transport,
DataChannelTransportInterface* data_channel_transport);
void UpdateAggregateStates_n();
void OnRtcpPacketReceived_n(rtc::CopyOnWriteBuffer* packet,
int64_t packet_time_us);
void OnDtlsHandshakeError(rtc::SSLHandshakeError error);
rtc::Thread* const signaling_thread_ = nullptr;
rtc::Thread* const network_thread_ = nullptr;
cricket::PortAllocator* const port_allocator_ = nullptr;
AsyncResolverFactory* const async_resolver_factory_ = nullptr;
std::map<std::string, std::unique_ptr<cricket::TgJsepTransport>>
jsep_transports_by_name_;
// This keeps track of the mapping between media section
// (BaseChannel/SctpTransport) and the TgJsepTransport underneath.
std::map<std::string, cricket::TgJsepTransport*> mid_to_transport_;
// Aggregate states for Transports.
// standardized_ice_connection_state_ is intended to replace
// ice_connection_state, see bugs.webrtc.org/9308
cricket::IceConnectionState ice_connection_state_ =
cricket::kIceConnectionConnecting;
PeerConnectionInterface::IceConnectionState
standardized_ice_connection_state_ =
PeerConnectionInterface::kIceConnectionNew;
PeerConnectionInterface::PeerConnectionState combined_connection_state_ =
PeerConnectionInterface::PeerConnectionState::kNew;
cricket::IceGatheringState ice_gathering_state_ = cricket::kIceGatheringNew;
Config config_;
// Early on in the call we don't know if datagram transport is going to be
// used, but we need to get the server-supported parameters to add to an SDP.
// This server datagram transport will be promoted to the used datagram
// transport after the local description is set, and the ownership will be
// transferred to the actual TgJsepTransport. This "offer" datagram transport is
// not created if it's done on the party that provides answer. This offer
// datagram transport is only created once at the beginning of the connection,
// and never again.
std::unique_ptr<DatagramTransportInterface> offer_datagram_transport_ =
nullptr;
const cricket::SessionDescription* local_desc_ = nullptr;
const cricket::SessionDescription* remote_desc_ = nullptr;
absl::optional<bool> initial_offerer_;
absl::optional<cricket::ContentGroup> bundle_group_;
cricket::IceConfig ice_config_;
cricket::IceRole ice_role_ = cricket::ICEROLE_CONTROLLING;
uint64_t ice_tiebreaker_ = rtc::CreateRandomId64();
rtc::scoped_refptr<rtc::RTCCertificate> certificate_;
rtc::AsyncInvoker invoker_;
RTC_DISALLOW_COPY_AND_ASSIGN(TgJsepTransportController);
};
} // namespace webrtc
#endif // PC_JSEP_TRANSPORT_CONTROLLER_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,516 @@
/*
* Copyright 2004 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "tg_peer_connection_factory.h"
#include <memory>
#include <utility>
#include <vector>
#include "api/fec_controller.h"
#include "api/media_stream_proxy.h"
#include "api/media_stream_track_proxy.h"
#include "api/network_state_predictor.h"
#include "api/peer_connection_factory_proxy.h"
#include "api/peer_connection_proxy.h"
#include "api/rtc_event_log/rtc_event_log.h"
#include "api/transport/field_trial_based_config.h"
#include "api/transport/media/media_transport_interface.h"
#include "api/turn_customizer.h"
#include "api/units/data_rate.h"
#include "api/video_track_source_proxy.h"
#include "media/sctp/sctp_transport.h"
#include "p2p/base/basic_packet_socket_factory.h"
#include "p2p/base/default_ice_transport_factory.h"
#include "p2p/client/basic_port_allocator.h"
#include "pc/audio_track.h"
#include "pc/local_audio_source.h"
#include "pc/media_stream.h"
#include "pc/peer_connection.h"
#include "pc/rtp_parameters_conversion.h"
#include "pc/video_track.h"
#include "rtc_base/bind.h"
#include "rtc_base/checks.h"
#include "rtc_base/experiments/field_trial_parser.h"
#include "rtc_base/experiments/field_trial_units.h"
#include "rtc_base/numerics/safe_conversions.h"
#include "rtc_base/system/file_wrapper.h"
#include "tg_rtp_data_engine.h"
#include "tg_peer_connection.h"
namespace webrtc {
rtc::scoped_refptr<TgPeerConnectionInterface>
TgPeerConnectionFactoryInterface::CreatePeerConnection(
const PeerConnectionInterface::RTCConfiguration& configuration,
std::unique_ptr<cricket::PortAllocator> allocator,
std::unique_ptr<rtc::RTCCertificateGeneratorInterface> cert_generator,
PeerConnectionObserver* observer) {
return nullptr;
}
rtc::scoped_refptr<TgPeerConnectionInterface>
TgPeerConnectionFactoryInterface::CreatePeerConnection(
const PeerConnectionInterface::RTCConfiguration& configuration,
PeerConnectionDependencies dependencies) {
return nullptr;
}
RtpCapabilities TgPeerConnectionFactoryInterface::GetRtpSenderCapabilities(
cricket::MediaType kind) const {
return {};
}
RtpCapabilities TgPeerConnectionFactoryInterface::GetRtpReceiverCapabilities(
cricket::MediaType kind) const {
return {};
}
BEGIN_SIGNALING_PROXY_MAP(TgPeerConnection)
PROXY_SIGNALING_THREAD_DESTRUCTOR()
PROXY_METHOD0(rtc::scoped_refptr<StreamCollectionInterface>, local_streams)
PROXY_METHOD0(rtc::scoped_refptr<StreamCollectionInterface>, remote_streams)
PROXY_METHOD1(bool, AddStream, MediaStreamInterface*)
PROXY_METHOD1(void, RemoveStream, MediaStreamInterface*)
PROXY_METHOD2(RTCErrorOr<rtc::scoped_refptr<RtpSenderInterface>>,
AddTrack,
rtc::scoped_refptr<MediaStreamTrackInterface>,
const std::vector<std::string>&)
PROXY_METHOD1(bool, RemoveTrack, RtpSenderInterface*)
PROXY_METHOD1(RTCError, RemoveTrackNew, rtc::scoped_refptr<RtpSenderInterface>)
PROXY_METHOD1(RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>>,
AddTransceiver,
rtc::scoped_refptr<MediaStreamTrackInterface>)
PROXY_METHOD2(RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>>,
AddTransceiver,
rtc::scoped_refptr<MediaStreamTrackInterface>,
const RtpTransceiverInit&)
PROXY_METHOD1(RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>>,
AddTransceiver,
cricket::MediaType)
PROXY_METHOD2(RTCErrorOr<rtc::scoped_refptr<RtpTransceiverInterface>>,
AddTransceiver,
cricket::MediaType,
const RtpTransceiverInit&)
PROXY_METHOD2(rtc::scoped_refptr<RtpSenderInterface>,
CreateSender,
const std::string&,
const std::string&)
PROXY_CONSTMETHOD0(std::vector<rtc::scoped_refptr<RtpSenderInterface>>,
GetSenders)
PROXY_CONSTMETHOD0(std::vector<rtc::scoped_refptr<RtpReceiverInterface>>,
GetReceivers)
PROXY_CONSTMETHOD0(std::vector<rtc::scoped_refptr<RtpTransceiverInterface>>,
GetTransceivers)
PROXY_METHOD0(void, ClearStatsCache)
PROXY_METHOD2(rtc::scoped_refptr<DataChannelInterface>,
CreateDataChannel,
const std::string&,
const DataChannelInit*)
PROXY_CONSTMETHOD0(const SessionDescriptionInterface*, local_description)
PROXY_CONSTMETHOD0(const SessionDescriptionInterface*, remote_description)
PROXY_CONSTMETHOD0(const SessionDescriptionInterface*,
current_local_description)
PROXY_CONSTMETHOD0(const SessionDescriptionInterface*,
current_remote_description)
PROXY_CONSTMETHOD0(const SessionDescriptionInterface*,
pending_local_description)
PROXY_CONSTMETHOD0(const SessionDescriptionInterface*,
pending_remote_description)
PROXY_METHOD0(void, RestartIce)
PROXY_METHOD2(void,
CreateOffer,
CreateSessionDescriptionObserver*,
const PeerConnectionInterface::RTCOfferAnswerOptions&)
PROXY_METHOD2(void,
CreateAnswer,
CreateSessionDescriptionObserver*,
const PeerConnectionInterface::RTCOfferAnswerOptions&)
PROXY_METHOD2(void,
SetLocalDescription,
SetSessionDescriptionObserver*,
SessionDescriptionInterface*)
PROXY_METHOD1(void, SetLocalDescription, SetSessionDescriptionObserver*)
PROXY_METHOD2(void,
SetRemoteDescription,
SetSessionDescriptionObserver*,
SessionDescriptionInterface*)
PROXY_METHOD2(void,
SetRemoteDescription,
std::unique_ptr<SessionDescriptionInterface>,
rtc::scoped_refptr<SetRemoteDescriptionObserverInterface>)
PROXY_METHOD0(PeerConnectionInterface::RTCConfiguration, GetConfiguration)
PROXY_METHOD1(RTCError,
SetConfiguration,
const PeerConnectionInterface::RTCConfiguration&)
PROXY_METHOD1(bool, AddIceCandidate, const IceCandidateInterface*)
PROXY_METHOD2(void,
AddIceCandidate,
std::unique_ptr<IceCandidateInterface>,
std::function<void(RTCError)>)
PROXY_METHOD1(bool, RemoveIceCandidates, const std::vector<cricket::Candidate>&)
PROXY_METHOD1(RTCError, SetBitrate, const BitrateSettings&)
PROXY_METHOD1(void, SetAudioPlayout, bool)
PROXY_METHOD1(void, SetAudioRecording, bool)
PROXY_METHOD1(rtc::scoped_refptr<DtlsTransportInterface>,
LookupDtlsTransportByMid,
const std::string&)
PROXY_CONSTMETHOD0(rtc::scoped_refptr<SctpTransportInterface>, GetSctpTransport)
PROXY_METHOD0(PeerConnectionInterface::SignalingState, signaling_state)
PROXY_METHOD0(PeerConnectionInterface::IceConnectionState, ice_connection_state)
PROXY_METHOD0(PeerConnectionInterface::IceConnectionState, standardized_ice_connection_state)
PROXY_METHOD0(PeerConnectionInterface::PeerConnectionState, peer_connection_state)
PROXY_METHOD0(PeerConnectionInterface::IceGatheringState, ice_gathering_state)
PROXY_METHOD2(bool,
StartRtcEventLog,
std::unique_ptr<RtcEventLogOutput>,
int64_t)
PROXY_METHOD1(bool, StartRtcEventLog, std::unique_ptr<RtcEventLogOutput>)
PROXY_METHOD0(void, StopRtcEventLog)
PROXY_METHOD0(void, Close)
END_PROXY_MAP()
TgPeerConnectionFactory::TgPeerConnectionFactory(
PeerConnectionFactoryDependencies dependencies)
: wraps_current_thread_(false),
network_thread_(dependencies.network_thread),
worker_thread_(dependencies.worker_thread),
signaling_thread_(dependencies.signaling_thread),
task_queue_factory_(std::move(dependencies.task_queue_factory)),
media_engine_(std::move(dependencies.media_engine)),
call_factory_(std::move(dependencies.call_factory)),
event_log_factory_(std::move(dependencies.event_log_factory)),
fec_controller_factory_(std::move(dependencies.fec_controller_factory)),
network_state_predictor_factory_(
std::move(dependencies.network_state_predictor_factory)),
injected_network_controller_factory_(
std::move(dependencies.network_controller_factory)),
media_transport_factory_(std::move(dependencies.media_transport_factory)),
neteq_factory_(std::move(dependencies.neteq_factory)),
trials_(dependencies.trials ? std::move(dependencies.trials)
: std::make_unique<FieldTrialBasedConfig>()) {
if (!network_thread_) {
owned_network_thread_ = rtc::Thread::CreateWithSocketServer();
owned_network_thread_->SetName("pc_network_thread", nullptr);
owned_network_thread_->Start();
network_thread_ = owned_network_thread_.get();
}
if (!worker_thread_) {
owned_worker_thread_ = rtc::Thread::Create();
owned_worker_thread_->SetName("pc_worker_thread", nullptr);
owned_worker_thread_->Start();
worker_thread_ = owned_worker_thread_.get();
}
if (!signaling_thread_) {
signaling_thread_ = rtc::Thread::Current();
if (!signaling_thread_) {
// If this thread isn't already wrapped by an rtc::Thread, create a
// wrapper and own it in this class.
signaling_thread_ = rtc::ThreadManager::Instance()->WrapCurrentThread();
wraps_current_thread_ = true;
}
}
options_.disable_encryption = true;
}
TgPeerConnectionFactory::~TgPeerConnectionFactory() {
RTC_DCHECK(signaling_thread_->IsCurrent());
channel_manager_.reset(nullptr);
// Make sure |worker_thread_| and |signaling_thread_| outlive
// |default_socket_factory_| and |default_network_manager_|.
default_socket_factory_ = nullptr;
default_network_manager_ = nullptr;
if (wraps_current_thread_)
rtc::ThreadManager::Instance()->UnwrapCurrentThread();
}
bool TgPeerConnectionFactory::Initialize() {
RTC_DCHECK(signaling_thread_->IsCurrent());
rtc::InitRandom(rtc::Time32());
default_network_manager_.reset(new rtc::BasicNetworkManager());
if (!default_network_manager_) {
return false;
}
default_socket_factory_.reset(
new rtc::BasicPacketSocketFactory(network_thread_));
if (!default_socket_factory_) {
return false;
}
channel_manager_ = std::make_unique<cricket::ChannelManager>(
std::move(media_engine_), std::make_unique<cricket::TgRtpDataEngine>(),
worker_thread_, network_thread_);
channel_manager_->SetVideoRtxEnabled(true);
if (!channel_manager_->Init()) {
return false;
}
return true;
}
void TgPeerConnectionFactory::SetOptions(const PeerConnectionFactory::Options& options) {
options_ = options;
}
RtpCapabilities TgPeerConnectionFactory::GetRtpSenderCapabilities(
cricket::MediaType kind) const {
RTC_DCHECK_RUN_ON(signaling_thread_);
switch (kind) {
case cricket::MEDIA_TYPE_AUDIO: {
cricket::AudioCodecs cricket_codecs;
cricket::RtpHeaderExtensions cricket_extensions;
channel_manager_->GetSupportedAudioSendCodecs(&cricket_codecs);
channel_manager_->GetSupportedAudioRtpHeaderExtensions(
&cricket_extensions);
return ToRtpCapabilities(cricket_codecs, cricket_extensions);
}
case cricket::MEDIA_TYPE_VIDEO: {
cricket::VideoCodecs cricket_codecs;
cricket::RtpHeaderExtensions cricket_extensions;
channel_manager_->GetSupportedVideoCodecs(&cricket_codecs);
channel_manager_->GetSupportedVideoRtpHeaderExtensions(
&cricket_extensions);
return ToRtpCapabilities(cricket_codecs, cricket_extensions);
}
case cricket::MEDIA_TYPE_DATA:
return RtpCapabilities();
}
// Not reached; avoids compile warning.
FATAL();
}
RtpCapabilities TgPeerConnectionFactory::GetRtpReceiverCapabilities(
cricket::MediaType kind) const {
RTC_DCHECK_RUN_ON(signaling_thread_);
switch (kind) {
case cricket::MEDIA_TYPE_AUDIO: {
cricket::AudioCodecs cricket_codecs;
cricket::RtpHeaderExtensions cricket_extensions;
channel_manager_->GetSupportedAudioReceiveCodecs(&cricket_codecs);
channel_manager_->GetSupportedAudioRtpHeaderExtensions(
&cricket_extensions);
return ToRtpCapabilities(cricket_codecs, cricket_extensions);
}
case cricket::MEDIA_TYPE_VIDEO: {
cricket::VideoCodecs cricket_codecs;
cricket::RtpHeaderExtensions cricket_extensions;
channel_manager_->GetSupportedVideoCodecs(&cricket_codecs);
channel_manager_->GetSupportedVideoRtpHeaderExtensions(
&cricket_extensions);
return ToRtpCapabilities(cricket_codecs, cricket_extensions);
}
case cricket::MEDIA_TYPE_DATA:
return RtpCapabilities();
}
// Not reached; avoids compile warning.
FATAL();
}
rtc::scoped_refptr<AudioSourceInterface>
TgPeerConnectionFactory::CreateAudioSource(const cricket::AudioOptions& options) {
RTC_DCHECK(signaling_thread_->IsCurrent());
rtc::scoped_refptr<LocalAudioSource> source(
LocalAudioSource::Create(&options));
return source;
}
bool TgPeerConnectionFactory::StartAecDump(FILE* file, int64_t max_size_bytes) {
RTC_DCHECK(signaling_thread_->IsCurrent());
return channel_manager_->StartAecDump(FileWrapper(file), max_size_bytes);
}
void TgPeerConnectionFactory::StopAecDump() {
RTC_DCHECK(signaling_thread_->IsCurrent());
channel_manager_->StopAecDump();
}
rtc::scoped_refptr<TgPeerConnectionInterface>
TgPeerConnectionFactory::CreatePeerConnection(
const PeerConnectionInterface::RTCConfiguration& configuration,
std::unique_ptr<cricket::PortAllocator> allocator,
std::unique_ptr<rtc::RTCCertificateGeneratorInterface> cert_generator,
PeerConnectionObserver* observer) {
// Convert the legacy API into the new dependency structure.
PeerConnectionDependencies dependencies(observer);
dependencies.allocator = std::move(allocator);
dependencies.cert_generator = std::move(cert_generator);
// Pass that into the new API.
return CreatePeerConnection(configuration, std::move(dependencies));
}
rtc::scoped_refptr<TgPeerConnectionInterface>
TgPeerConnectionFactory::CreatePeerConnection(
const PeerConnectionInterface::RTCConfiguration& configuration,
PeerConnectionDependencies dependencies) {
RTC_DCHECK(signaling_thread_->IsCurrent());
RTC_DCHECK(!(dependencies.allocator && dependencies.packet_socket_factory))
<< "You can't set both allocator and packet_socket_factory; "
"the former is going away (see bugs.webrtc.org/7447";
// Set internal defaults if optional dependencies are not set.
if (!dependencies.cert_generator) {
dependencies.cert_generator =
std::make_unique<rtc::RTCCertificateGenerator>(signaling_thread_,
network_thread_);
}
if (!dependencies.allocator) {
rtc::PacketSocketFactory* packet_socket_factory;
if (dependencies.packet_socket_factory)
packet_socket_factory = dependencies.packet_socket_factory.get();
else
packet_socket_factory = default_socket_factory_.get();
network_thread_->Invoke<void>(RTC_FROM_HERE, [this, &configuration,
&dependencies,
&packet_socket_factory]() {
dependencies.allocator = std::make_unique<cricket::BasicPortAllocator>(
default_network_manager_.get(), packet_socket_factory,
configuration.turn_customizer);
});
}
if (!dependencies.ice_transport_factory) {
dependencies.ice_transport_factory =
std::make_unique<DefaultIceTransportFactory>();
}
// TODO(zstein): Once chromium injects its own AsyncResolverFactory, set
// |dependencies.async_resolver_factory| to a new
// |rtc::BasicAsyncResolverFactory| if no factory is provided.
network_thread_->Invoke<void>(
RTC_FROM_HERE,
rtc::Bind(&cricket::PortAllocator::SetNetworkIgnoreMask,
dependencies.allocator.get(), options_.network_ignore_mask));
std::unique_ptr<RtcEventLog> event_log =
worker_thread_->Invoke<std::unique_ptr<RtcEventLog>>(
RTC_FROM_HERE,
rtc::Bind(&TgPeerConnectionFactory::CreateRtcEventLog_w, this));
std::unique_ptr<Call> call = worker_thread_->Invoke<std::unique_ptr<Call>>(
RTC_FROM_HERE,
rtc::Bind(&TgPeerConnectionFactory::CreateCall_w, this, event_log.get()));
rtc::scoped_refptr<TgPeerConnection> pc(
new rtc::RefCountedObject<TgPeerConnection>(this, std::move(event_log),
std::move(call)));
if (!pc->Initialize(configuration, std::move(dependencies))) {
return nullptr;
}
return TgPeerConnectionProxy::Create(signaling_thread(), pc);
}
rtc::scoped_refptr<MediaStreamInterface>
TgPeerConnectionFactory::CreateLocalMediaStream(const std::string& stream_id) {
RTC_DCHECK(signaling_thread_->IsCurrent());
return MediaStreamProxy::Create(signaling_thread_,
MediaStream::Create(stream_id));
}
rtc::scoped_refptr<VideoTrackInterface> TgPeerConnectionFactory::CreateVideoTrack(
const std::string& id,
VideoTrackSourceInterface* source) {
RTC_DCHECK(signaling_thread_->IsCurrent());
rtc::scoped_refptr<VideoTrackInterface> track(
VideoTrack::Create(id, source, worker_thread_));
return VideoTrackProxy::Create(signaling_thread_, worker_thread_, track);
}
rtc::scoped_refptr<AudioTrackInterface> TgPeerConnectionFactory::CreateAudioTrack(
const std::string& id,
AudioSourceInterface* source) {
RTC_DCHECK(signaling_thread_->IsCurrent());
rtc::scoped_refptr<AudioTrackInterface> track(AudioTrack::Create(id, source));
return AudioTrackProxy::Create(signaling_thread_, track);
}
std::unique_ptr<cricket::SctpTransportInternalFactory>
TgPeerConnectionFactory::CreateSctpTransportInternalFactory() {
#ifdef HAVE_SCTP
return std::make_unique<cricket::SctpTransportFactory>(network_thread());
#else
return nullptr;
#endif
}
cricket::ChannelManager* TgPeerConnectionFactory::channel_manager() {
return channel_manager_.get();
}
std::unique_ptr<RtcEventLog> TgPeerConnectionFactory::CreateRtcEventLog_w() {
RTC_DCHECK_RUN_ON(worker_thread_);
auto encoding_type = RtcEventLog::EncodingType::Legacy;
if (IsTrialEnabled("WebRTC-RtcEventLogNewFormat"))
encoding_type = RtcEventLog::EncodingType::NewFormat;
return event_log_factory_
? event_log_factory_->CreateRtcEventLog(encoding_type)
: std::make_unique<RtcEventLogNull>();
}
std::unique_ptr<Call> TgPeerConnectionFactory::CreateCall_w(
RtcEventLog* event_log) {
RTC_DCHECK_RUN_ON(worker_thread_);
webrtc::Call::Config call_config(event_log);
if (!channel_manager_->media_engine() || !call_factory_) {
return nullptr;
}
call_config.audio_state =
channel_manager_->media_engine()->voice().GetAudioState();
FieldTrialParameter<DataRate> min_bandwidth("min", DataRate::kbps(30));
FieldTrialParameter<DataRate> start_bandwidth("start", DataRate::kbps(300));
FieldTrialParameter<DataRate> max_bandwidth("max", DataRate::kbps(2000));
ParseFieldTrial({&min_bandwidth, &start_bandwidth, &max_bandwidth},
trials_->Lookup("WebRTC-PcFactoryDefaultBitrates"));
call_config.bitrate_config.min_bitrate_bps =
rtc::saturated_cast<int>(min_bandwidth->bps());
call_config.bitrate_config.start_bitrate_bps =
rtc::saturated_cast<int>(start_bandwidth->bps());
call_config.bitrate_config.max_bitrate_bps =
rtc::saturated_cast<int>(max_bandwidth->bps());
call_config.fec_controller_factory = fec_controller_factory_.get();
call_config.task_queue_factory = task_queue_factory_.get();
call_config.network_state_predictor_factory =
network_state_predictor_factory_.get();
call_config.neteq_factory = neteq_factory_.get();
if (IsTrialEnabled("WebRTC-Bwe-InjectedCongestionController")) {
RTC_LOG(LS_INFO) << "Using injected network controller factory";
call_config.network_controller_factory =
injected_network_controller_factory_.get();
} else {
RTC_LOG(LS_INFO) << "Using default network controller factory";
}
call_config.trials = trials_.get();
return std::unique_ptr<Call>(call_factory_->CreateCall(call_config));
}
bool TgPeerConnectionFactory::IsTrialEnabled(absl::string_view key) const {
RTC_DCHECK(trials_);
return trials_->Lookup(key).find("Enabled") == 0;
}
} // namespace webrtc

View File

@@ -0,0 +1,261 @@
/*
* Copyright 2011 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef TG_PC_PEER_CONNECTION_FACTORY_H_
#define TG_PC_PEER_CONNECTION_FACTORY_H_
#include <memory>
#include <string>
#include "api/media_stream_interface.h"
#include "api/peer_connection_interface.h"
#include "api/scoped_refptr.h"
#include "api/transport/media/media_transport_interface.h"
#include "media/sctp/sctp_transport_internal.h"
#include "pc/channel_manager.h"
#include "rtc_base/rtc_certificate_generator.h"
#include "rtc_base/thread.h"
#include "pc/peer_connection_factory.h"
namespace rtc {
class BasicNetworkManager;
class BasicPacketSocketFactory;
} // namespace rtc
namespace webrtc {
class RtcEventLog;
class TgPeerConnection;
class TgPeerConnectionInterface;
class RTC_EXPORT TgPeerConnectionFactoryInterface
: public rtc::RefCountInterface {
public:
// Set the options to be used for subsequently created PeerConnections.
virtual void SetOptions(const PeerConnectionFactoryInterface::Options& options) = 0;
// The preferred way to create a new peer connection. Simply provide the
// configuration and a PeerConnectionDependencies structure.
// TODO(benwright): Make pure virtual once downstream mock PC factory classes
// are updated.
virtual rtc::scoped_refptr<TgPeerConnectionInterface> CreatePeerConnection(
const PeerConnectionInterface::RTCConfiguration& configuration,
PeerConnectionDependencies dependencies);
// Deprecated; |allocator| and |cert_generator| may be null, in which case
// default implementations will be used.
//
// |observer| must not be null.
//
// Note that this method does not take ownership of |observer|; it's the
// responsibility of the caller to delete it. It can be safely deleted after
// Close has been called on the returned PeerConnection, which ensures no
// more observer callbacks will be invoked.
virtual rtc::scoped_refptr<TgPeerConnectionInterface> CreatePeerConnection(
const PeerConnectionInterface::RTCConfiguration& configuration,
std::unique_ptr<cricket::PortAllocator> allocator,
std::unique_ptr<rtc::RTCCertificateGeneratorInterface> cert_generator,
PeerConnectionObserver* observer);
// Returns the capabilities of an RTP sender of type |kind|.
// If for some reason you pass in MEDIA_TYPE_DATA, returns an empty structure.
// TODO(orphis): Make pure virtual when all subclasses implement it.
virtual RtpCapabilities GetRtpSenderCapabilities(
cricket::MediaType kind) const;
// Returns the capabilities of an RTP receiver of type |kind|.
// If for some reason you pass in MEDIA_TYPE_DATA, returns an empty structure.
// TODO(orphis): Make pure virtual when all subclasses implement it.
virtual RtpCapabilities GetRtpReceiverCapabilities(
cricket::MediaType kind) const;
virtual rtc::scoped_refptr<MediaStreamInterface> CreateLocalMediaStream(
const std::string& stream_id) = 0;
// Creates an AudioSourceInterface.
// |options| decides audio processing settings.
virtual rtc::scoped_refptr<AudioSourceInterface> CreateAudioSource(
const cricket::AudioOptions& options) = 0;
// Creates a new local VideoTrack. The same |source| can be used in several
// tracks.
virtual rtc::scoped_refptr<VideoTrackInterface> CreateVideoTrack(
const std::string& label,
VideoTrackSourceInterface* source) = 0;
// Creates an new AudioTrack. At the moment |source| can be null.
virtual rtc::scoped_refptr<AudioTrackInterface> CreateAudioTrack(
const std::string& label,
AudioSourceInterface* source) = 0;
// Starts AEC dump using existing file. Takes ownership of |file| and passes
// it on to VoiceEngine (via other objects) immediately, which will take
// the ownerhip. If the operation fails, the file will be closed.
// A maximum file size in bytes can be specified. When the file size limit is
// reached, logging is stopped automatically. If max_size_bytes is set to a
// value <= 0, no limit will be used, and logging will continue until the
// StopAecDump function is called.
// TODO(webrtc:6463): Delete default implementation when downstream mocks
// classes are updated.
virtual bool StartAecDump(FILE* file, int64_t max_size_bytes) {
return false;
}
// Stops logging the AEC dump.
virtual void StopAecDump() = 0;
protected:
// Dtor and ctor protected as objects shouldn't be created or deleted via
// this interface.
TgPeerConnectionFactoryInterface() {}
~TgPeerConnectionFactoryInterface() override = default;
};
class TgPeerConnectionFactory: public TgPeerConnectionFactoryInterface {
public:
void SetOptions(const PeerConnectionFactoryInterface::Options& options);
rtc::scoped_refptr<TgPeerConnectionInterface> CreatePeerConnection(
const PeerConnectionInterface::RTCConfiguration& configuration,
std::unique_ptr<cricket::PortAllocator> allocator,
std::unique_ptr<rtc::RTCCertificateGeneratorInterface> cert_generator,
PeerConnectionObserver* observer);
rtc::scoped_refptr<TgPeerConnectionInterface> CreatePeerConnection(
const PeerConnectionInterface::RTCConfiguration& configuration,
PeerConnectionDependencies dependencies);
bool Initialize();
RtpCapabilities GetRtpSenderCapabilities(
cricket::MediaType kind) const;
RtpCapabilities GetRtpReceiverCapabilities(
cricket::MediaType kind) const;
rtc::scoped_refptr<MediaStreamInterface> CreateLocalMediaStream(
const std::string& stream_id);
rtc::scoped_refptr<AudioSourceInterface> CreateAudioSource(
const cricket::AudioOptions& options);
rtc::scoped_refptr<VideoTrackInterface> CreateVideoTrack(
const std::string& id,
VideoTrackSourceInterface* video_source);
rtc::scoped_refptr<AudioTrackInterface> CreateAudioTrack(
const std::string& id,
AudioSourceInterface* audio_source);
bool StartAecDump(FILE* file, int64_t max_size_bytes);
void StopAecDump();
virtual std::unique_ptr<cricket::SctpTransportInternalFactory>
CreateSctpTransportInternalFactory();
virtual cricket::ChannelManager* channel_manager();
rtc::Thread* signaling_thread() {
// This method can be called on a different thread when the factory is
// created in CreatePeerConnectionFactory().
return signaling_thread_;
}
rtc::Thread* worker_thread() { return worker_thread_; }
rtc::Thread* network_thread() { return network_thread_; }
const PeerConnectionFactoryInterface::Options& options() const { return options_; }
MediaTransportFactory* media_transport_factory() {
return media_transport_factory_.get();
}
protected:
// This structure allows simple management of all new dependencies being added
// to the PeerConnectionFactory.
explicit TgPeerConnectionFactory(
PeerConnectionFactoryDependencies dependencies);
// Hook to let testing framework insert actions between
// "new RTCPeerConnection" and "pc.Initialize"
virtual void ActionsBeforeInitializeForTesting(PeerConnectionInterface*) {}
virtual ~TgPeerConnectionFactory();
private:
bool IsTrialEnabled(absl::string_view key) const;
std::unique_ptr<RtcEventLog> CreateRtcEventLog_w();
std::unique_ptr<Call> CreateCall_w(RtcEventLog* event_log);
bool wraps_current_thread_;
rtc::Thread* network_thread_;
rtc::Thread* worker_thread_;
rtc::Thread* signaling_thread_;
std::unique_ptr<rtc::Thread> owned_network_thread_;
std::unique_ptr<rtc::Thread> owned_worker_thread_;
const std::unique_ptr<TaskQueueFactory> task_queue_factory_;
PeerConnectionFactoryInterface::Options options_;
std::unique_ptr<cricket::ChannelManager> channel_manager_;
std::unique_ptr<rtc::BasicNetworkManager> default_network_manager_;
std::unique_ptr<rtc::BasicPacketSocketFactory> default_socket_factory_;
std::unique_ptr<cricket::MediaEngineInterface> media_engine_;
std::unique_ptr<webrtc::CallFactoryInterface> call_factory_;
std::unique_ptr<RtcEventLogFactoryInterface> event_log_factory_;
std::unique_ptr<FecControllerFactoryInterface> fec_controller_factory_;
std::unique_ptr<NetworkStatePredictorFactoryInterface>
network_state_predictor_factory_;
std::unique_ptr<NetworkControllerFactoryInterface>
injected_network_controller_factory_;
std::unique_ptr<MediaTransportFactory> media_transport_factory_;
std::unique_ptr<NetEqFactory> neteq_factory_;
const std::unique_ptr<WebRtcKeyValueConfig> trials_;
};
BEGIN_SIGNALING_PROXY_MAP(TgPeerConnectionFactory)
PROXY_SIGNALING_THREAD_DESTRUCTOR()
PROXY_METHOD1(void, SetOptions, const PeerConnectionFactory::Options&)
PROXY_METHOD4(rtc::scoped_refptr<TgPeerConnectionInterface>,
CreatePeerConnection,
const PeerConnectionInterface::RTCConfiguration&,
std::unique_ptr<cricket::PortAllocator>,
std::unique_ptr<rtc::RTCCertificateGeneratorInterface>,
PeerConnectionObserver*)
PROXY_METHOD2(rtc::scoped_refptr<TgPeerConnectionInterface>,
CreatePeerConnection,
const PeerConnectionInterface::RTCConfiguration&,
PeerConnectionDependencies)
PROXY_CONSTMETHOD1(webrtc::RtpCapabilities,
GetRtpSenderCapabilities,
cricket::MediaType)
PROXY_CONSTMETHOD1(webrtc::RtpCapabilities,
GetRtpReceiverCapabilities,
cricket::MediaType)
PROXY_METHOD1(rtc::scoped_refptr<MediaStreamInterface>,
CreateLocalMediaStream,
const std::string&)
PROXY_METHOD1(rtc::scoped_refptr<AudioSourceInterface>,
CreateAudioSource,
const cricket::AudioOptions&)
PROXY_METHOD2(rtc::scoped_refptr<VideoTrackInterface>,
CreateVideoTrack,
const std::string&,
VideoTrackSourceInterface*)
PROXY_METHOD2(rtc::scoped_refptr<AudioTrackInterface>,
CreateAudioTrack,
const std::string&,
AudioSourceInterface*)
PROXY_METHOD2(bool, StartAecDump, FILE*, int64_t)
PROXY_METHOD0(void, StopAecDump)
END_PROXY_MAP()
} // namespace webrtc
#endif // PC_PEER_CONNECTION_FACTORY_H_

View File

@@ -0,0 +1,330 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "tg_rtp_data_engine.h"
#include <map>
#include "absl/strings/match.h"
#include "media/base/codec.h"
#include "media/base/media_constants.h"
#include "media/base/rtp_utils.h"
#include "media/base/stream_params.h"
#include "rtc_base/copy_on_write_buffer.h"
#include "rtc_base/data_rate_limiter.h"
#include "rtc_base/helpers.h"
#include "rtc_base/logging.h"
#include "rtc_base/sanitizer.h"
namespace cricket {
// We want to avoid IP fragmentation.
static const size_t kDataMaxRtpPacketLen = 1200U;
// We reserve space after the RTP header for future wiggle room.
static const unsigned char kReservedSpace[] = {0x00, 0x00, 0x00, 0x00};
// Amount of overhead SRTP may take. We need to leave room in the
// buffer for it, otherwise SRTP will fail later. If SRTP ever uses
// more than this, we need to increase this number.
static const size_t kMaxSrtpHmacOverhead = 16;
TgRtpDataEngine::TgRtpDataEngine() {
data_codecs_.push_back(
DataCodec(kGoogleRtpDataCodecPlType, kGoogleRtpDataCodecName));
}
DataMediaChannel* TgRtpDataEngine::CreateChannel(const MediaConfig& config) {
return new TgRtpDataMediaChannel(config);
}
static const DataCodec* FindCodecByName(const std::vector<DataCodec>& codecs,
const std::string& name) {
for (const DataCodec& codec : codecs) {
if (absl::EqualsIgnoreCase(name, codec.name))
return &codec;
}
return nullptr;
}
TgRtpDataMediaChannel::TgRtpDataMediaChannel(const MediaConfig& config)
: DataMediaChannel(config) {
Construct();
SetPreferredDscp(rtc::DSCP_AF41);
}
void TgRtpDataMediaChannel::Construct() {
sending_ = false;
receiving_ = false;
send_limiter_.reset(new rtc::DataRateLimiter(kDataMaxBandwidth / 8, 1.0));
}
TgRtpDataMediaChannel::~TgRtpDataMediaChannel() {
std::map<uint32_t, RtpClock*>::const_iterator iter;
for (iter = rtp_clock_by_send_ssrc_.begin();
iter != rtp_clock_by_send_ssrc_.end(); ++iter) {
delete iter->second;
}
}
const DataCodec* TgFindUnknownCodec(const std::vector<DataCodec>& codecs) {
DataCodec data_codec(kGoogleRtpDataCodecPlType, kGoogleRtpDataCodecName);
std::vector<DataCodec>::const_iterator iter;
for (iter = codecs.begin(); iter != codecs.end(); ++iter) {
if (!iter->Matches(data_codec)) {
return &(*iter);
}
}
return NULL;
}
const DataCodec* TgFindKnownCodec(const std::vector<DataCodec>& codecs) {
DataCodec data_codec(kGoogleRtpDataCodecPlType, kGoogleRtpDataCodecName);
std::vector<DataCodec>::const_iterator iter;
for (iter = codecs.begin(); iter != codecs.end(); ++iter) {
if (iter->Matches(data_codec)) {
return &(*iter);
}
}
return NULL;
}
bool TgRtpDataMediaChannel::SetRecvCodecs(const std::vector<DataCodec>& codecs) {
const DataCodec* unknown_codec = TgFindUnknownCodec(codecs);
if (unknown_codec) {
RTC_LOG(LS_WARNING) << "Failed to SetRecvCodecs because of unknown codec: "
<< unknown_codec->ToString();
return false;
}
recv_codecs_ = codecs;
return true;
}
bool TgRtpDataMediaChannel::SetSendCodecs(const std::vector<DataCodec>& codecs) {
const DataCodec* known_codec = TgFindKnownCodec(codecs);
if (!known_codec) {
RTC_LOG(LS_WARNING)
<< "Failed to SetSendCodecs because there is no known codec.";
return false;
}
send_codecs_ = codecs;
return true;
}
bool TgRtpDataMediaChannel::SetSendParameters(const DataSendParameters& params) {
return (SetSendCodecs(params.codecs) &&
SetMaxSendBandwidth(params.max_bandwidth_bps));
}
bool TgRtpDataMediaChannel::SetRecvParameters(const DataRecvParameters& params) {
return SetRecvCodecs(params.codecs);
}
bool TgRtpDataMediaChannel::AddSendStream(const StreamParams& stream) {
if (!stream.has_ssrcs()) {
return false;
}
if (GetStreamBySsrc(send_streams_, stream.first_ssrc())) {
RTC_LOG(LS_WARNING) << "Not adding data send stream '" << stream.id
<< "' with ssrc=" << stream.first_ssrc()
<< " because stream already exists.";
return false;
}
send_streams_.push_back(stream);
// TODO(pthatcher): This should be per-stream, not per-ssrc.
// And we should probably allow more than one per stream.
rtp_clock_by_send_ssrc_[stream.first_ssrc()] =
new RtpClock(kDataCodecClockrate, rtc::CreateRandomNonZeroId(),
rtc::CreateRandomNonZeroId());
RTC_LOG(LS_INFO) << "Added data send stream '" << stream.id
<< "' with ssrc=" << stream.first_ssrc();
return true;
}
bool TgRtpDataMediaChannel::RemoveSendStream(uint32_t ssrc) {
if (!GetStreamBySsrc(send_streams_, ssrc)) {
return false;
}
RemoveStreamBySsrc(&send_streams_, ssrc);
delete rtp_clock_by_send_ssrc_[ssrc];
rtp_clock_by_send_ssrc_.erase(ssrc);
return true;
}
bool TgRtpDataMediaChannel::AddRecvStream(const StreamParams& stream) {
if (!stream.has_ssrcs()) {
return false;
}
if (GetStreamBySsrc(recv_streams_, stream.first_ssrc())) {
RTC_LOG(LS_WARNING) << "Not adding data recv stream '" << stream.id
<< "' with ssrc=" << stream.first_ssrc()
<< " because stream already exists.";
return false;
}
recv_streams_.push_back(stream);
RTC_LOG(LS_INFO) << "Added data recv stream '" << stream.id
<< "' with ssrc=" << stream.first_ssrc();
return true;
}
bool TgRtpDataMediaChannel::RemoveRecvStream(uint32_t ssrc) {
RemoveStreamBySsrc(&recv_streams_, ssrc);
return true;
}
// Not implemented.
void TgRtpDataMediaChannel::ResetUnsignaledRecvStream() {}
void TgRtpDataMediaChannel::OnPacketReceived(rtc::CopyOnWriteBuffer packet,
int64_t /* packet_time_us */) {
RtpHeader header;
if (!GetRtpHeader(packet.cdata(), packet.size(), &header)) {
return;
}
size_t header_length;
if (!GetRtpHeaderLen(packet.cdata(), packet.size(), &header_length)) {
return;
}
const char* data =
packet.cdata<char>() + header_length + sizeof(kReservedSpace);
size_t data_len = packet.size() - header_length - sizeof(kReservedSpace);
if (!receiving_) {
RTC_LOG(LS_WARNING) << "Not receiving packet " << header.ssrc << ":"
<< header.seq_num << " before SetReceive(true) called.";
return;
}
if (!FindCodecById(recv_codecs_, header.payload_type)) {
return;
}
if (!GetStreamBySsrc(recv_streams_, header.ssrc)) {
RTC_LOG(LS_WARNING) << "Received packet for unknown ssrc: " << header.ssrc;
return;
}
// Uncomment this for easy debugging.
// const auto* found_stream = GetStreamBySsrc(recv_streams_, header.ssrc);
// RTC_LOG(LS_INFO) << "Received packet"
// << " groupid=" << found_stream.groupid
// << ", ssrc=" << header.ssrc
// << ", seqnum=" << header.seq_num
// << ", timestamp=" << header.timestamp
// << ", len=" << data_len;
ReceiveDataParams params;
params.ssrc = header.ssrc;
params.seq_num = header.seq_num;
params.timestamp = header.timestamp;
SignalDataReceived(params, data, data_len);
}
bool TgRtpDataMediaChannel::SetMaxSendBandwidth(int bps) {
if (bps <= 0) {
bps = kDataMaxBandwidth;
}
send_limiter_.reset(new rtc::DataRateLimiter(bps / 8, 1.0));
RTC_LOG(LS_INFO) << "TgRtpDataMediaChannel::SetSendBandwidth to " << bps
<< "bps.";
return true;
}
bool TgRtpDataMediaChannel::SendData(const SendDataParams& params,
const rtc::CopyOnWriteBuffer& payload,
SendDataResult* result) {
if (result) {
// If we return true, we'll set this to SDR_SUCCESS.
*result = SDR_ERROR;
}
if (!sending_) {
RTC_LOG(LS_WARNING) << "Not sending packet with ssrc=" << params.ssrc
<< " len=" << payload.size()
<< " before SetSend(true).";
return false;
}
if (params.type != cricket::DMT_TEXT) {
RTC_LOG(LS_WARNING)
<< "Not sending data because binary type is unsupported.";
return false;
}
const StreamParams* found_stream =
GetStreamBySsrc(send_streams_, params.ssrc);
if (!found_stream) {
RTC_LOG(LS_WARNING) << "Not sending data because ssrc is unknown: "
<< params.ssrc;
return false;
}
const DataCodec* found_codec =
FindCodecByName(send_codecs_, kGoogleRtpDataCodecName);
if (!found_codec) {
RTC_LOG(LS_WARNING) << "Not sending data because codec is unknown: "
<< kGoogleRtpDataCodecName;
return false;
}
size_t packet_len = (kMinRtpPacketLen + sizeof(kReservedSpace) +
payload.size() + kMaxSrtpHmacOverhead);
if (packet_len > kDataMaxRtpPacketLen) {
return false;
}
double now =
rtc::TimeMicros() / static_cast<double>(rtc::kNumMicrosecsPerSec);
if (!send_limiter_->CanUse(packet_len, now)) {
RTC_LOG(LS_VERBOSE) << "Dropped data packet of len=" << packet_len
<< "; already sent " << send_limiter_->used_in_period()
<< "/" << send_limiter_->max_per_period();
return false;
}
RtpHeader header;
header.payload_type = found_codec->id;
header.ssrc = params.ssrc;
rtp_clock_by_send_ssrc_[header.ssrc]->Tick(now, &header.seq_num,
&header.timestamp);
rtc::CopyOnWriteBuffer packet(kMinRtpPacketLen, packet_len);
if (!SetRtpHeader(packet.data(), packet.size(), header)) {
return false;
}
packet.AppendData(kReservedSpace);
packet.AppendData(payload);
RTC_LOG(LS_VERBOSE) << "Sent RTP data packet: "
<< " stream=" << found_stream->id
<< " ssrc=" << header.ssrc
<< ", seqnum=" << header.seq_num
<< ", timestamp=" << header.timestamp
<< ", len=" << payload.size();
rtc::PacketOptions options;
options.info_signaled_after_sent.packet_type = rtc::PacketType::kData;
MediaChannel::SendPacket(&packet, options);
send_limiter_->Use(packet_len, now);
if (result) {
*result = SDR_SUCCESS;
}
return true;
}
} // namespace cricket

View File

@@ -0,0 +1,109 @@
/*
* Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef TG_MEDIA_BASE_RTP_DATA_ENGINE_H_
#define TG_MEDIA_BASE_RTP_DATA_ENGINE_H_
#include <map>
#include <memory>
#include <string>
#include <vector>
#include "media/base/codec.h"
#include "media/base/media_channel.h"
#include "media/base/media_constants.h"
#include "media/base/media_engine.h"
namespace rtc {
class DataRateLimiter;
}
namespace cricket {
class TgRtpDataEngine : public DataEngineInterface {
public:
TgRtpDataEngine();
virtual DataMediaChannel* CreateChannel(const MediaConfig& config);
virtual const std::vector<DataCodec>& data_codecs() { return data_codecs_; }
private:
std::vector<DataCodec> data_codecs_;
};
// Keep track of sequence number and timestamp of an RTP stream. The
// sequence number starts with a "random" value and increments. The
// timestamp starts with a "random" value and increases monotonically
// according to the clockrate.
class RtpClock {
public:
RtpClock(int clockrate, uint16_t first_seq_num, uint32_t timestamp_offset)
: clockrate_(clockrate),
last_seq_num_(first_seq_num),
timestamp_offset_(timestamp_offset) {}
// Given the current time (in number of seconds which must be
// monotonically increasing), Return the next sequence number and
// timestamp.
void Tick(double now, int* seq_num, uint32_t* timestamp);
private:
int clockrate_;
uint16_t last_seq_num_;
uint32_t timestamp_offset_;
};
class TgRtpDataMediaChannel : public DataMediaChannel {
public:
explicit TgRtpDataMediaChannel(const MediaConfig& config);
virtual ~TgRtpDataMediaChannel();
virtual bool SetSendParameters(const DataSendParameters& params);
virtual bool SetRecvParameters(const DataRecvParameters& params);
virtual bool AddSendStream(const StreamParams& sp);
virtual bool RemoveSendStream(uint32_t ssrc);
virtual bool AddRecvStream(const StreamParams& sp);
virtual bool RemoveRecvStream(uint32_t ssrc);
virtual void ResetUnsignaledRecvStream();
virtual bool SetSend(bool send) {
sending_ = send;
return true;
}
virtual bool SetReceive(bool receive) {
receiving_ = receive;
return true;
}
virtual void OnPacketReceived(rtc::CopyOnWriteBuffer packet,
int64_t packet_time_us);
virtual void OnReadyToSend(bool ready) {}
virtual bool SendData(const SendDataParams& params,
const rtc::CopyOnWriteBuffer& payload,
SendDataResult* result);
private:
void Construct();
bool SetMaxSendBandwidth(int bps);
bool SetSendCodecs(const std::vector<DataCodec>& codecs);
bool SetRecvCodecs(const std::vector<DataCodec>& codecs);
bool sending_;
bool receiving_;
std::vector<DataCodec> send_codecs_;
std::vector<DataCodec> recv_codecs_;
std::vector<StreamParams> send_streams_;
std::vector<StreamParams> recv_streams_;
std::map<uint32_t, RtpClock*> rtp_clock_by_send_ssrc_;
std::unique_ptr<rtc::DataRateLimiter> send_limiter_;
};
} // namespace cricket
#endif // MEDIA_BASE_RTP_DATA_ENGINE_H_

View File

@@ -0,0 +1,357 @@
/*
* Copyright 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "tg_rtp_sender.h"
#include <atomic>
#include <utility>
#include <vector>
#include "api/audio_options.h"
#include "api/media_stream_interface.h"
#include "media/base/media_engine.h"
#include "pc/peer_connection.h"
#include "pc/stats_collector.h"
#include "rtc_base/checks.h"
#include "rtc_base/helpers.h"
#include "rtc_base/location.h"
#include "rtc_base/logging.h"
#include "rtc_base/trace_event.h"
namespace webrtc {
namespace {
// This function is only expected to be called on the signaling thread.
// On the other hand, some test or even production setups may use
// several signaling threads.
int GenerateUniqueId() {
static std::atomic<int> g_unique_id{0};
return ++g_unique_id;
}
// Returns true if a "per-sender" encoding parameter contains a value that isn't
// its default. Currently max_bitrate_bps and bitrate_priority both are
// implemented "per-sender," meaning that these encoding parameters
// are used for the RtpSender as a whole, not for a specific encoding layer.
// This is done by setting these encoding parameters at index 0 of
// RtpParameters.encodings. This function can be used to check if these
// parameters are set at any index other than 0 of RtpParameters.encodings,
// because they are currently unimplemented to be used for a specific encoding
// layer.
bool PerSenderRtpEncodingParameterHasValue(
const RtpEncodingParameters& encoding_params) {
if (encoding_params.bitrate_priority != kDefaultBitratePriority ||
encoding_params.network_priority != kDefaultBitratePriority) {
return true;
}
return false;
}
void RemoveEncodingLayers(const std::vector<std::string>& rids,
std::vector<RtpEncodingParameters>* encodings) {
RTC_DCHECK(encodings);
encodings->erase(
std::remove_if(encodings->begin(), encodings->end(),
[&rids](const RtpEncodingParameters& encoding) {
return absl::c_linear_search(rids, encoding.rid);
}),
encodings->end());
}
RtpParameters RestoreEncodingLayers(
const RtpParameters& parameters,
const std::vector<std::string>& removed_rids,
const std::vector<RtpEncodingParameters>& all_layers) {
RTC_DCHECK_EQ(parameters.encodings.size() + removed_rids.size(),
all_layers.size());
RtpParameters result(parameters);
result.encodings.clear();
size_t index = 0;
for (const RtpEncodingParameters& encoding : all_layers) {
if (absl::c_linear_search(removed_rids, encoding.rid)) {
result.encodings.push_back(encoding);
continue;
}
result.encodings.push_back(parameters.encodings[index++]);
}
return result;
}
} // namespace
// Returns true if any RtpParameters member that isn't implemented contains a
// value.
bool TgUnimplementedRtpParameterHasValue(const RtpParameters& parameters) {
if (!parameters.mid.empty()) {
return true;
}
for (size_t i = 0; i < parameters.encodings.size(); ++i) {
// Encoding parameters that are per-sender should only contain value at
// index 0.
if (i != 0 &&
PerSenderRtpEncodingParameterHasValue(parameters.encodings[i])) {
return true;
}
}
return false;
}
TgLocalAudioSinkAdapter::TgLocalAudioSinkAdapter() : sink_(nullptr) {}
TgLocalAudioSinkAdapter::~TgLocalAudioSinkAdapter() {
rtc::CritScope lock(&lock_);
if (sink_)
sink_->OnClose();
}
void TgLocalAudioSinkAdapter::OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames) {
rtc::CritScope lock(&lock_);
if (sink_) {
sink_->OnData(audio_data, bits_per_sample, sample_rate, number_of_channels,
number_of_frames);
}
}
void TgLocalAudioSinkAdapter::SetSink(cricket::AudioSource::Sink* sink) {
rtc::CritScope lock(&lock_);
RTC_DCHECK(!sink || !sink_);
sink_ = sink;
}
rtc::scoped_refptr<TgAudioRtpSender> TgAudioRtpSender::Create(
rtc::Thread* worker_thread,
const std::string& id,
SetStreamsObserver* set_streams_observer) {
return rtc::scoped_refptr<TgAudioRtpSender>(
new rtc::RefCountedObject<TgAudioRtpSender>(worker_thread, id,
set_streams_observer));
}
TgAudioRtpSender::TgAudioRtpSender(rtc::Thread* worker_thread,
const std::string& id,
SetStreamsObserver* set_streams_observer)
: RtpSenderBase(worker_thread, id, set_streams_observer),
dtmf_sender_proxy_(DtmfSenderProxy::Create(
rtc::Thread::Current(),
DtmfSender::Create(rtc::Thread::Current(), this))),
sink_adapter_(new TgLocalAudioSinkAdapter()) {}
TgAudioRtpSender::~TgAudioRtpSender() {
// For DtmfSender.
SignalDestroyed();
Stop();
}
bool TgAudioRtpSender::CanInsertDtmf() {
if (!media_channel_) {
RTC_LOG(LS_ERROR) << "CanInsertDtmf: No audio channel exists.";
return false;
}
// Check that this RTP sender is active (description has been applied that
// matches an SSRC to its ID).
if (!ssrc_) {
RTC_LOG(LS_ERROR) << "CanInsertDtmf: Sender does not have SSRC.";
return false;
}
return worker_thread_->Invoke<bool>(
RTC_FROM_HERE, [&] { return voice_media_channel()->CanInsertDtmf(); });
}
bool TgAudioRtpSender::InsertDtmf(int code, int duration) {
if (!media_channel_) {
RTC_LOG(LS_ERROR) << "InsertDtmf: No audio channel exists.";
return false;
}
if (!ssrc_) {
RTC_LOG(LS_ERROR) << "InsertDtmf: Sender does not have SSRC.";
return false;
}
bool success = worker_thread_->Invoke<bool>(RTC_FROM_HERE, [&] {
return voice_media_channel()->InsertDtmf(ssrc_, code, duration);
});
if (!success) {
RTC_LOG(LS_ERROR) << "Failed to insert DTMF to channel.";
}
return success;
}
sigslot::signal0<>* TgAudioRtpSender::GetOnDestroyedSignal() {
return &SignalDestroyed;
}
void TgAudioRtpSender::OnChanged() {
TRACE_EVENT0("webrtc", "TgAudioRtpSender::OnChanged");
RTC_DCHECK(!stopped_);
if (cached_track_enabled_ != track_->enabled()) {
cached_track_enabled_ = track_->enabled();
if (can_send_track()) {
SetSend();
}
}
}
void TgAudioRtpSender::DetachTrack() {
RTC_DCHECK(track_);
audio_track()->RemoveSink(sink_adapter_.get());
}
void TgAudioRtpSender::AttachTrack() {
RTC_DCHECK(track_);
cached_track_enabled_ = track_->enabled();
audio_track()->AddSink(sink_adapter_.get());
}
void TgAudioRtpSender::AddTrackToStats() {
}
void TgAudioRtpSender::RemoveTrackFromStats() {
}
rtc::scoped_refptr<DtmfSenderInterface> TgAudioRtpSender::GetDtmfSender() const {
return dtmf_sender_proxy_;
}
void TgAudioRtpSender::SetSend() {
RTC_DCHECK(!stopped_);
RTC_DCHECK(can_send_track());
if (!media_channel_) {
RTC_LOG(LS_ERROR) << "SetAudioSend: No audio channel exists.";
return;
}
cricket::AudioOptions options;
#if !defined(WEBRTC_CHROMIUM_BUILD) && !defined(WEBRTC_WEBKIT_BUILD)
// TODO(tommi): Remove this hack when we move CreateAudioSource out of
// PeerConnection. This is a bit of a strange way to apply local audio
// options since it is also applied to all streams/channels, local or remote.
if (track_->enabled() && audio_track()->GetSource() &&
!audio_track()->GetSource()->remote()) {
options = audio_track()->GetSource()->options();
}
#endif
// |track_->enabled()| hops to the signaling thread, so call it before we hop
// to the worker thread or else it will deadlock.
bool track_enabled = track_->enabled();
bool success = worker_thread_->Invoke<bool>(RTC_FROM_HERE, [&] {
return voice_media_channel()->SetAudioSend(ssrc_, track_enabled, &options,
sink_adapter_.get());
});
if (!success) {
RTC_LOG(LS_ERROR) << "SetAudioSend: ssrc is incorrect: " << ssrc_;
}
}
void TgAudioRtpSender::ClearSend() {
RTC_DCHECK(ssrc_ != 0);
RTC_DCHECK(!stopped_);
if (!media_channel_) {
RTC_LOG(LS_WARNING) << "ClearAudioSend: No audio channel exists.";
return;
}
cricket::AudioOptions options;
bool success = worker_thread_->Invoke<bool>(RTC_FROM_HERE, [&] {
return voice_media_channel()->SetAudioSend(ssrc_, false, &options, nullptr);
});
if (!success) {
RTC_LOG(LS_WARNING) << "ClearAudioSend: ssrc is incorrect: " << ssrc_;
}
}
rtc::scoped_refptr<TgVideoRtpSender> TgVideoRtpSender::Create(
rtc::Thread* worker_thread,
const std::string& id,
SetStreamsObserver* set_streams_observer) {
return rtc::scoped_refptr<TgVideoRtpSender>(
new rtc::RefCountedObject<TgVideoRtpSender>(worker_thread, id,
set_streams_observer));
}
TgVideoRtpSender::TgVideoRtpSender(rtc::Thread* worker_thread,
const std::string& id,
SetStreamsObserver* set_streams_observer)
: RtpSenderBase(worker_thread, id, set_streams_observer) {}
TgVideoRtpSender::~TgVideoRtpSender() {
Stop();
}
void TgVideoRtpSender::OnChanged() {
TRACE_EVENT0("webrtc", "TgVideoRtpSender::OnChanged");
RTC_DCHECK(!stopped_);
if (cached_track_content_hint_ != video_track()->content_hint()) {
cached_track_content_hint_ = video_track()->content_hint();
if (can_send_track()) {
SetSend();
}
}
}
void TgVideoRtpSender::AttachTrack() {
RTC_DCHECK(track_);
cached_track_content_hint_ = video_track()->content_hint();
}
rtc::scoped_refptr<DtmfSenderInterface> TgVideoRtpSender::GetDtmfSender() const {
RTC_LOG(LS_ERROR) << "Tried to get DTMF sender from video sender.";
return nullptr;
}
void TgVideoRtpSender::SetSend() {
RTC_DCHECK(!stopped_);
RTC_DCHECK(can_send_track());
if (!media_channel_) {
RTC_LOG(LS_ERROR) << "SetVideoSend: No video channel exists.";
return;
}
cricket::VideoOptions options;
VideoTrackSourceInterface* source = video_track()->GetSource();
if (source) {
options.is_screencast = source->is_screencast();
options.video_noise_reduction = source->needs_denoising();
}
switch (cached_track_content_hint_) {
case VideoTrackInterface::ContentHint::kNone:
break;
case VideoTrackInterface::ContentHint::kFluid:
options.is_screencast = false;
break;
case VideoTrackInterface::ContentHint::kDetailed:
case VideoTrackInterface::ContentHint::kText:
options.is_screencast = true;
break;
}
bool success = worker_thread_->Invoke<bool>(RTC_FROM_HERE, [&] {
return video_media_channel()->SetVideoSend(ssrc_, &options, video_track());
});
RTC_DCHECK(success);
}
void TgVideoRtpSender::ClearSend() {
RTC_DCHECK(ssrc_ != 0);
RTC_DCHECK(!stopped_);
if (!media_channel_) {
RTC_LOG(LS_WARNING) << "SetVideoSend: No video channel exists.";
return;
}
// Allow SetVideoSend to fail since |enable| is false and |source| is null.
// This the normal case when the underlying media channel has already been
// deleted.
worker_thread_->Invoke<bool>(RTC_FROM_HERE, [&] {
return video_media_channel()->SetVideoSend(ssrc_, nullptr, nullptr);
});
}
} // namespace webrtc

View File

@@ -0,0 +1,176 @@
/*
* Copyright 2015 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
// This file contains classes that implement RtpSenderInterface.
// An RtpSender associates a MediaStreamTrackInterface with an underlying
// transport (provided by AudioProviderInterface/VideoProviderInterface)
#ifndef TG_PC_RTP_SENDER_H_
#define TG_PC_RTP_SENDER_H_
#include <memory>
#include <string>
#include <vector>
#include "api/media_stream_interface.h"
#include "api/rtp_sender_interface.h"
#include "media/base/audio_source.h"
#include "media/base/media_channel.h"
#include "pc/dtmf_sender.h"
#include "rtc_base/critical_section.h"
#include "pc/rtp_sender.h"
namespace webrtc {
class StatsCollector;
bool TgUnimplementedRtpParameterHasValue(const RtpParameters& parameters);
// TgLocalAudioSinkAdapter receives data callback as a sink to the local
// AudioTrack, and passes the data to the sink of AudioSource.
class TgLocalAudioSinkAdapter : public AudioTrackSinkInterface,
public cricket::AudioSource {
public:
TgLocalAudioSinkAdapter();
virtual ~TgLocalAudioSinkAdapter();
private:
// AudioSinkInterface implementation.
void OnData(const void* audio_data,
int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames) override;
// cricket::AudioSource implementation.
void SetSink(cricket::AudioSource::Sink* sink) override;
cricket::AudioSource::Sink* sink_;
// Critical section protecting |sink_|.
rtc::CriticalSection lock_;
};
class TgAudioRtpSender : public DtmfProviderInterface, public RtpSenderBase {
public:
// Construct an RtpSender for audio with the given sender ID.
// The sender is initialized with no track to send and no associated streams.
// StatsCollector provided so that Add/RemoveLocalAudioTrack can be called
// at the appropriate times.
// If |set_streams_observer| is not null, it is invoked when SetStreams()
// is called. |set_streams_observer| is not owned by this object. If not
// null, it must be valid at least until this sender becomes stopped.
static rtc::scoped_refptr<TgAudioRtpSender> Create(
rtc::Thread* worker_thread,
const std::string& id,
SetStreamsObserver* set_streams_observer);
virtual ~TgAudioRtpSender();
// DtmfSenderProvider implementation.
bool CanInsertDtmf() override;
bool InsertDtmf(int code, int duration) override;
sigslot::signal0<>* GetOnDestroyedSignal() override;
// ObserverInterface implementation.
void OnChanged() override;
cricket::MediaType media_type() const override {
return cricket::MEDIA_TYPE_AUDIO;
}
std::string track_kind() const override {
return MediaStreamTrackInterface::kAudioKind;
}
rtc::scoped_refptr<DtmfSenderInterface> GetDtmfSender() const override;
protected:
TgAudioRtpSender(rtc::Thread* worker_thread,
const std::string& id,
SetStreamsObserver* set_streams_observer);
void SetSend() override;
void ClearSend() override;
// Hooks to allow custom logic when tracks are attached and detached.
void AttachTrack() override;
void DetachTrack() override;
void AddTrackToStats() override;
void RemoveTrackFromStats() override;
private:
cricket::VoiceMediaChannel* voice_media_channel() {
return static_cast<cricket::VoiceMediaChannel*>(media_channel_);
}
rtc::scoped_refptr<AudioTrackInterface> audio_track() const {
return rtc::scoped_refptr<AudioTrackInterface>(
static_cast<AudioTrackInterface*>(track_.get()));
}
sigslot::signal0<> SignalDestroyed;
rtc::scoped_refptr<DtmfSenderInterface> dtmf_sender_proxy_;
bool cached_track_enabled_ = false;
// Used to pass the data callback from the |track_| to the other end of
// cricket::AudioSource.
std::unique_ptr<TgLocalAudioSinkAdapter> sink_adapter_;
};
class TgVideoRtpSender : public RtpSenderBase {
public:
// Construct an RtpSender for video with the given sender ID.
// The sender is initialized with no track to send and no associated streams.
// If |set_streams_observer| is not null, it is invoked when SetStreams()
// is called. |set_streams_observer| is not owned by this object. If not
// null, it must be valid at least until this sender becomes stopped.
static rtc::scoped_refptr<TgVideoRtpSender> Create(
rtc::Thread* worker_thread,
const std::string& id,
SetStreamsObserver* set_streams_observer);
virtual ~TgVideoRtpSender();
// ObserverInterface implementation
void OnChanged() override;
cricket::MediaType media_type() const override {
return cricket::MEDIA_TYPE_VIDEO;
}
std::string track_kind() const override {
return MediaStreamTrackInterface::kVideoKind;
}
rtc::scoped_refptr<DtmfSenderInterface> GetDtmfSender() const override;
protected:
TgVideoRtpSender(rtc::Thread* worker_thread,
const std::string& id,
SetStreamsObserver* set_streams_observer);
void SetSend() override;
void ClearSend() override;
// Hook to allow custom logic when tracks are attached.
void AttachTrack() override;
private:
cricket::VideoMediaChannel* video_media_channel() {
return static_cast<cricket::VideoMediaChannel*>(media_channel_);
}
rtc::scoped_refptr<VideoTrackInterface> video_track() const {
return rtc::scoped_refptr<VideoTrackInterface>(
static_cast<VideoTrackInterface*>(track_.get()));
}
VideoTrackInterface::ContentHint cached_track_content_hint_ =
VideoTrackInterface::ContentHint::kNone;
};
} // namespace webrtc
#endif // PC_RTP_SENDER_H_

View File

@@ -0,0 +1,292 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "tg_rtp_transport.h"
#include <errno.h>
#include <string>
#include <utility>
#include "api/rtp_headers.h"
#include "api/rtp_parameters.h"
#include "media/base/rtp_utils.h"
#include "modules/rtp_rtcp/source/rtp_packet_received.h"
#include "rtc_base/checks.h"
#include "rtc_base/copy_on_write_buffer.h"
#include "rtc_base/logging.h"
#include "rtc_base/third_party/sigslot/sigslot.h"
#include "rtc_base/trace_event.h"
namespace webrtc {
void TgRtpTransport::SetRtcpMuxEnabled(bool enable) {
rtcp_mux_enabled_ = enable;
MaybeSignalReadyToSend();
}
const std::string& TgRtpTransport::transport_name() const {
return rtp_packet_transport_->transport_name();
}
int TgRtpTransport::SetRtpOption(rtc::Socket::Option opt, int value) {
return rtp_packet_transport_->SetOption(opt, value);
}
int TgRtpTransport::SetRtcpOption(rtc::Socket::Option opt, int value) {
if (rtcp_packet_transport_) {
return rtcp_packet_transport_->SetOption(opt, value);
}
return -1;
}
void TgRtpTransport::SetRtpPacketTransport(
rtc::PacketTransportInternal* new_packet_transport) {
if (new_packet_transport == rtp_packet_transport_) {
return;
}
if (rtp_packet_transport_) {
rtp_packet_transport_->SignalReadyToSend.disconnect(this);
rtp_packet_transport_->SignalReadPacket.disconnect(this);
rtp_packet_transport_->SignalNetworkRouteChanged.disconnect(this);
rtp_packet_transport_->SignalWritableState.disconnect(this);
rtp_packet_transport_->SignalSentPacket.disconnect(this);
// Reset the network route of the old transport.
SignalNetworkRouteChanged(absl::optional<rtc::NetworkRoute>());
}
if (new_packet_transport) {
new_packet_transport->SignalReadyToSend.connect(
this, &TgRtpTransport::OnReadyToSend);
new_packet_transport->SignalReadPacket.connect(this,
&TgRtpTransport::OnReadPacket);
new_packet_transport->SignalNetworkRouteChanged.connect(
this, &TgRtpTransport::OnNetworkRouteChanged);
new_packet_transport->SignalWritableState.connect(
this, &TgRtpTransport::OnWritableState);
new_packet_transport->SignalSentPacket.connect(this,
&TgRtpTransport::OnSentPacket);
// Set the network route for the new transport.
SignalNetworkRouteChanged(new_packet_transport->network_route());
}
rtp_packet_transport_ = new_packet_transport;
// Assumes the transport is ready to send if it is writable. If we are wrong,
// ready to send will be updated the next time we try to send.
SetReadyToSend(false,
rtp_packet_transport_ && rtp_packet_transport_->writable());
}
void TgRtpTransport::SetRtcpPacketTransport(
rtc::PacketTransportInternal* new_packet_transport) {
if (new_packet_transport == rtcp_packet_transport_) {
return;
}
if (rtcp_packet_transport_) {
rtcp_packet_transport_->SignalReadyToSend.disconnect(this);
rtcp_packet_transport_->SignalReadPacket.disconnect(this);
rtcp_packet_transport_->SignalNetworkRouteChanged.disconnect(this);
rtcp_packet_transport_->SignalWritableState.disconnect(this);
rtcp_packet_transport_->SignalSentPacket.disconnect(this);
// Reset the network route of the old transport.
SignalNetworkRouteChanged(absl::optional<rtc::NetworkRoute>());
}
if (new_packet_transport) {
new_packet_transport->SignalReadyToSend.connect(
this, &TgRtpTransport::OnReadyToSend);
new_packet_transport->SignalReadPacket.connect(this,
&TgRtpTransport::OnReadPacket);
new_packet_transport->SignalNetworkRouteChanged.connect(
this, &TgRtpTransport::OnNetworkRouteChanged);
new_packet_transport->SignalWritableState.connect(
this, &TgRtpTransport::OnWritableState);
new_packet_transport->SignalSentPacket.connect(this,
&TgRtpTransport::OnSentPacket);
// Set the network route for the new transport.
SignalNetworkRouteChanged(new_packet_transport->network_route());
}
rtcp_packet_transport_ = new_packet_transport;
// Assumes the transport is ready to send if it is writable. If we are wrong,
// ready to send will be updated the next time we try to send.
SetReadyToSend(true,
rtcp_packet_transport_ && rtcp_packet_transport_->writable());
}
bool TgRtpTransport::IsWritable(bool rtcp) const {
rtc::PacketTransportInternal* transport = rtcp && !rtcp_mux_enabled_
? rtcp_packet_transport_
: rtp_packet_transport_;
return transport && transport->writable();
}
bool TgRtpTransport::SendRtpPacket(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options,
int flags) {
return SendPacket(false, packet, options, flags);
}
bool TgRtpTransport::SendRtcpPacket(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options,
int flags) {
return SendPacket(true, packet, options, flags);
}
bool TgRtpTransport::SendPacket(bool rtcp,
rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options,
int flags) {
rtc::PacketTransportInternal* transport = rtcp && !rtcp_mux_enabled_
? rtcp_packet_transport_
: rtp_packet_transport_;
int ret = transport->SendPacket(packet->cdata<char>(), packet->size(),
options, flags);
if (ret != static_cast<int>(packet->size())) {
if (transport->GetError() == ENOTCONN) {
RTC_LOG(LS_WARNING) << "Got ENOTCONN from transport.";
SetReadyToSend(rtcp, false);
}
return false;
}
return true;
}
void TgRtpTransport::UpdateRtpHeaderExtensionMap(
const cricket::RtpHeaderExtensions& header_extensions) {
header_extension_map_ = RtpHeaderExtensionMap(header_extensions);
}
bool TgRtpTransport::RegisterRtpDemuxerSink(const RtpDemuxerCriteria& criteria,
RtpPacketSinkInterface* sink) {
rtp_demuxer_.RemoveSink(sink);
if (!rtp_demuxer_.AddSink(criteria, sink)) {
RTC_LOG(LS_ERROR) << "Failed to register the sink for RTP demuxer.";
return false;
}
return true;
}
bool TgRtpTransport::UnregisterRtpDemuxerSink(RtpPacketSinkInterface* sink) {
if (!rtp_demuxer_.RemoveSink(sink)) {
RTC_LOG(LS_ERROR) << "Failed to unregister the sink for RTP demuxer.";
return false;
}
return true;
}
void TgRtpTransport::DemuxPacket(rtc::CopyOnWriteBuffer packet,
int64_t packet_time_us) {
webrtc::RtpPacketReceived parsed_packet(&header_extension_map_);
if (!parsed_packet.Parse(std::move(packet))) {
RTC_LOG(LS_ERROR)
<< "Failed to parse the incoming RTP packet before demuxing. Drop it.";
return;
}
if (packet_time_us != -1) {
parsed_packet.set_arrival_time_ms((packet_time_us + 500) / 1000);
}
if (!rtp_demuxer_.OnRtpPacket(parsed_packet)) {
RTC_LOG(LS_WARNING) << "Failed to demux RTP packet: "
<< RtpDemuxer::DescribePacket(parsed_packet);
}
}
bool TgRtpTransport::IsTransportWritable() {
auto rtcp_packet_transport =
rtcp_mux_enabled_ ? nullptr : rtcp_packet_transport_;
return rtp_packet_transport_ && rtp_packet_transport_->writable() &&
(!rtcp_packet_transport || rtcp_packet_transport->writable());
}
void TgRtpTransport::OnReadyToSend(rtc::PacketTransportInternal* transport) {
SetReadyToSend(transport == rtcp_packet_transport_, true);
}
void TgRtpTransport::OnNetworkRouteChanged(
absl::optional<rtc::NetworkRoute> network_route) {
SignalNetworkRouteChanged(network_route);
}
void TgRtpTransport::OnWritableState(
rtc::PacketTransportInternal* packet_transport) {
RTC_DCHECK(packet_transport == rtp_packet_transport_ ||
packet_transport == rtcp_packet_transport_);
SignalWritableState(IsTransportWritable());
}
void TgRtpTransport::OnSentPacket(rtc::PacketTransportInternal* packet_transport,
const rtc::SentPacket& sent_packet) {
RTC_DCHECK(packet_transport == rtp_packet_transport_ ||
packet_transport == rtcp_packet_transport_);
SignalSentPacket(sent_packet);
}
void TgRtpTransport::OnRtpPacketReceived(rtc::CopyOnWriteBuffer packet,
int64_t packet_time_us) {
DemuxPacket(packet, packet_time_us);
}
void TgRtpTransport::OnRtcpPacketReceived(rtc::CopyOnWriteBuffer packet,
int64_t packet_time_us) {
SignalRtcpPacketReceived(&packet, packet_time_us);
}
void TgRtpTransport::OnReadPacket(rtc::PacketTransportInternal* transport,
const char* data,
size_t len,
const int64_t& packet_time_us,
int flags) {
TRACE_EVENT0("webrtc", "TgRtpTransport::OnReadPacket");
// When using RTCP multiplexing we might get RTCP packets on the RTP
// transport. We check the RTP payload type to determine if it is RTCP.
auto array_view = rtc::MakeArrayView(data, len);
cricket::RtpPacketType packet_type = cricket::InferRtpPacketType(array_view);
// Filter out the packet that is neither RTP nor RTCP.
if (packet_type == cricket::RtpPacketType::kUnknown) {
return;
}
// Protect ourselves against crazy data.
if (!cricket::IsValidRtpPacketSize(packet_type, len)) {
RTC_LOG(LS_ERROR) << "Dropping incoming "
<< cricket::RtpPacketTypeToString(packet_type)
<< " packet: wrong size=" << len;
return;
}
rtc::CopyOnWriteBuffer packet(data, len);
if (packet_type == cricket::RtpPacketType::kRtcp) {
OnRtcpPacketReceived(std::move(packet), packet_time_us);
} else {
OnRtpPacketReceived(std::move(packet), packet_time_us);
}
}
void TgRtpTransport::SetReadyToSend(bool rtcp, bool ready) {
if (rtcp) {
rtcp_ready_to_send_ = ready;
} else {
rtp_ready_to_send_ = ready;
}
MaybeSignalReadyToSend();
}
void TgRtpTransport::MaybeSignalReadyToSend() {
bool ready_to_send =
rtp_ready_to_send_ && (rtcp_ready_to_send_ || rtcp_mux_enabled_);
if (ready_to_send != ready_to_send_) {
ready_to_send_ = ready_to_send;
SignalReadyToSend(ready_to_send);
}
}
} // namespace webrtc

View File

@@ -0,0 +1,133 @@
/*
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef TG_PC_RTP_TRANSPORT_H_
#define TG_PC_RTP_TRANSPORT_H_
#include <string>
#include "call/rtp_demuxer.h"
#include "modules/rtp_rtcp/include/rtp_header_extension_map.h"
#include "pc/rtp_transport_internal.h"
#include "rtc_base/third_party/sigslot/sigslot.h"
namespace rtc {
class CopyOnWriteBuffer;
struct PacketOptions;
class PacketTransportInternal;
} // namespace rtc
namespace webrtc {
class TgRtpTransport : public RtpTransportInternal {
public:
TgRtpTransport(const TgRtpTransport&) = delete;
TgRtpTransport& operator=(const TgRtpTransport&) = delete;
explicit TgRtpTransport(bool rtcp_mux_enabled)
: rtcp_mux_enabled_(rtcp_mux_enabled) {}
bool rtcp_mux_enabled() const override { return rtcp_mux_enabled_; }
void SetRtcpMuxEnabled(bool enable) override;
const std::string& transport_name() const override;
int SetRtpOption(rtc::Socket::Option opt, int value) override;
int SetRtcpOption(rtc::Socket::Option opt, int value) override;
rtc::PacketTransportInternal* rtp_packet_transport() const {
return rtp_packet_transport_;
}
void SetRtpPacketTransport(rtc::PacketTransportInternal* rtp);
rtc::PacketTransportInternal* rtcp_packet_transport() const {
return rtcp_packet_transport_;
}
void SetRtcpPacketTransport(rtc::PacketTransportInternal* rtcp);
bool IsReadyToSend() const override { return ready_to_send_; }
bool IsWritable(bool rtcp) const override;
bool SendRtpPacket(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options,
int flags) override;
bool SendRtcpPacket(rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options,
int flags) override;
bool IsSrtpActive() const override { return false; }
void UpdateRtpHeaderExtensionMap(
const cricket::RtpHeaderExtensions& header_extensions) override;
bool RegisterRtpDemuxerSink(const RtpDemuxerCriteria& criteria,
RtpPacketSinkInterface* sink) override;
bool UnregisterRtpDemuxerSink(RtpPacketSinkInterface* sink) override;
protected:
// These methods will be used in the subclasses.
void DemuxPacket(rtc::CopyOnWriteBuffer packet, int64_t packet_time_us);
bool SendPacket(bool rtcp,
rtc::CopyOnWriteBuffer* packet,
const rtc::PacketOptions& options,
int flags);
// Overridden by SrtpTransport.
virtual void OnNetworkRouteChanged(
absl::optional<rtc::NetworkRoute> network_route);
virtual void OnRtpPacketReceived(rtc::CopyOnWriteBuffer packet,
int64_t packet_time_us);
virtual void OnRtcpPacketReceived(rtc::CopyOnWriteBuffer packet,
int64_t packet_time_us);
// Overridden by SrtpTransport and DtlsSrtpTransport.
virtual void OnWritableState(rtc::PacketTransportInternal* packet_transport);
private:
void OnReadyToSend(rtc::PacketTransportInternal* transport);
void OnSentPacket(rtc::PacketTransportInternal* packet_transport,
const rtc::SentPacket& sent_packet);
void OnReadPacket(rtc::PacketTransportInternal* transport,
const char* data,
size_t len,
const int64_t& packet_time_us,
int flags);
// Updates "ready to send" for an individual channel and fires
// SignalReadyToSend.
void SetReadyToSend(bool rtcp, bool ready);
void MaybeSignalReadyToSend();
bool IsTransportWritable();
bool rtcp_mux_enabled_;
rtc::PacketTransportInternal* rtp_packet_transport_ = nullptr;
rtc::PacketTransportInternal* rtcp_packet_transport_ = nullptr;
bool ready_to_send_ = false;
bool rtp_ready_to_send_ = false;
bool rtcp_ready_to_send_ = false;
RtpDemuxer rtp_demuxer_;
// Used for identifying the MID for RtpDemuxer.
RtpHeaderExtensionMap header_extension_map_;
};
} // namespace webrtc
#endif // PC_RTP_TRANSPORT_H_

View File

@@ -0,0 +1,501 @@
/*
* Copyright 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#include "tg_webrtc_session_description_factory.h"
#include <stddef.h>
#include <memory>
#include <string>
#include <utility>
#include <vector>
#include "absl/algorithm/container.h"
#include "absl/types/optional.h"
#include "api/jsep.h"
#include "api/jsep_session_description.h"
#include "api/rtc_error.h"
#include "pc/session_description.h"
#include "rtc_base/checks.h"
#include "rtc_base/location.h"
#include "rtc_base/logging.h"
#include "rtc_base/ref_counted_object.h"
#include "rtc_base/ssl_identity.h"
#include "rtc_base/ssl_stream_adapter.h"
#include "rtc_base/string_encode.h"
#include "tg_peer_connection.h"
using cricket::MediaSessionOptions;
using rtc::UniqueRandomIdGenerator;
namespace webrtc {
namespace {
static const char kFailedDueToIdentityFailed[] =
" failed because DTLS identity request failed";
static const char kFailedDueToSessionShutdown[] =
" failed because the session was shut down";
static const uint64_t kInitSessionVersion = 2;
// Check that each sender has a unique ID.
static bool ValidMediaSessionOptions(
const cricket::MediaSessionOptions& session_options) {
std::vector<cricket::SenderOptions> sorted_senders;
for (const cricket::MediaDescriptionOptions& media_description_options :
session_options.media_description_options) {
sorted_senders.insert(sorted_senders.end(),
media_description_options.sender_options.begin(),
media_description_options.sender_options.end());
}
absl::c_sort(sorted_senders, [](const cricket::SenderOptions& sender1,
const cricket::SenderOptions& sender2) {
return sender1.track_id < sender2.track_id;
});
return absl::c_adjacent_find(sorted_senders,
[](const cricket::SenderOptions& sender1,
const cricket::SenderOptions& sender2) {
return sender1.track_id == sender2.track_id;
}) == sorted_senders.end();
}
enum {
MSG_CREATE_SESSIONDESCRIPTION_SUCCESS,
MSG_CREATE_SESSIONDESCRIPTION_FAILED,
MSG_USE_CONSTRUCTOR_CERTIFICATE
};
struct CreateSessionDescriptionMsg : public rtc::MessageData {
explicit CreateSessionDescriptionMsg(
webrtc::CreateSessionDescriptionObserver* observer,
RTCError error_in)
: observer(observer), error(std::move(error_in)) {}
rtc::scoped_refptr<webrtc::CreateSessionDescriptionObserver> observer;
RTCError error;
std::unique_ptr<webrtc::SessionDescriptionInterface> description;
};
} // namespace
void TgWebRtcCertificateGeneratorCallback::OnFailure() {
SignalRequestFailed();
}
void TgWebRtcCertificateGeneratorCallback::OnSuccess(
const rtc::scoped_refptr<rtc::RTCCertificate>& certificate) {
SignalCertificateReady(certificate);
}
// static
void TgWebRtcSessionDescriptionFactory::CopyCandidatesFromSessionDescription(
const SessionDescriptionInterface* source_desc,
const std::string& content_name,
SessionDescriptionInterface* dest_desc) {
if (!source_desc) {
return;
}
const cricket::ContentInfos& contents =
source_desc->description()->contents();
const cricket::ContentInfo* cinfo =
source_desc->description()->GetContentByName(content_name);
if (!cinfo) {
return;
}
size_t mediasection_index = static_cast<int>(cinfo - &contents[0]);
const IceCandidateCollection* source_candidates =
source_desc->candidates(mediasection_index);
const IceCandidateCollection* dest_candidates =
dest_desc->candidates(mediasection_index);
if (!source_candidates || !dest_candidates) {
return;
}
for (size_t n = 0; n < source_candidates->count(); ++n) {
const IceCandidateInterface* new_candidate = source_candidates->at(n);
if (!dest_candidates->HasCandidate(new_candidate)) {
dest_desc->AddCandidate(source_candidates->at(n));
}
}
}
TgWebRtcSessionDescriptionFactory::TgWebRtcSessionDescriptionFactory(
rtc::Thread* signaling_thread,
cricket::ChannelManager* channel_manager,
TgPeerConnection* pc,
const std::string& session_id,
std::unique_ptr<rtc::RTCCertificateGeneratorInterface> cert_generator,
const rtc::scoped_refptr<rtc::RTCCertificate>& certificate,
UniqueRandomIdGenerator* ssrc_generator)
: signaling_thread_(signaling_thread),
session_desc_factory_(channel_manager,
&transport_desc_factory_,
ssrc_generator),
// RFC 4566 suggested a Network Time Protocol (NTP) format timestamp
// as the session id and session version. To simplify, it should be fine
// to just use a random number as session id and start version from
// |kInitSessionVersion|.
session_version_(kInitSessionVersion),
cert_generator_(std::move(cert_generator)),
pc_(pc),
session_id_(session_id),
certificate_request_state_(CERTIFICATE_NOT_NEEDED) {
RTC_DCHECK(signaling_thread_);
RTC_DCHECK(!(cert_generator_ && certificate));
bool dtls_enabled = cert_generator_ || certificate;
// SRTP-SDES is disabled if DTLS is on.
SetSdesPolicy(dtls_enabled ? cricket::SEC_DISABLED : cricket::SEC_REQUIRED);
if (!dtls_enabled) {
RTC_LOG(LS_VERBOSE) << "DTLS-SRTP disabled.";
return;
}
if (certificate) {
// Use |certificate|.
certificate_request_state_ = CERTIFICATE_WAITING;
RTC_LOG(LS_VERBOSE) << "DTLS-SRTP enabled; has certificate parameter.";
// We already have a certificate but we wait to do |SetIdentity|; if we do
// it in the constructor then the caller has not had a chance to connect to
// |SignalCertificateReady|.
signaling_thread_->Post(
RTC_FROM_HERE, this, MSG_USE_CONSTRUCTOR_CERTIFICATE,
new rtc::ScopedRefMessageData<rtc::RTCCertificate>(certificate));
} else {
// Generate certificate.
certificate_request_state_ = CERTIFICATE_WAITING;
rtc::scoped_refptr<TgWebRtcCertificateGeneratorCallback> callback(
new rtc::RefCountedObject<TgWebRtcCertificateGeneratorCallback>());
callback->SignalRequestFailed.connect(
this, &TgWebRtcSessionDescriptionFactory::OnCertificateRequestFailed);
callback->SignalCertificateReady.connect(
this, &TgWebRtcSessionDescriptionFactory::SetCertificate);
rtc::KeyParams key_params = rtc::KeyParams();
RTC_LOG(LS_VERBOSE)
<< "DTLS-SRTP enabled; sending DTLS identity request (key type: "
<< key_params.type() << ").";
// Request certificate. This happens asynchronously, so that the caller gets
// a chance to connect to |SignalCertificateReady|.
cert_generator_->GenerateCertificateAsync(key_params, absl::nullopt,
callback);
}
}
TgWebRtcSessionDescriptionFactory::~TgWebRtcSessionDescriptionFactory() {
RTC_DCHECK(signaling_thread_->IsCurrent());
// Fail any requests that were asked for before identity generation completed.
FailPendingRequests(kFailedDueToSessionShutdown);
// Process all pending notifications in the message queue. If we don't do
// this, requests will linger and not know they succeeded or failed.
rtc::MessageList list;
signaling_thread_->Clear(this, rtc::MQID_ANY, &list);
for (auto& msg : list) {
if (msg.message_id != MSG_USE_CONSTRUCTOR_CERTIFICATE) {
OnMessage(&msg);
} else {
// Skip MSG_USE_CONSTRUCTOR_CERTIFICATE because we don't want to trigger
// SetIdentity-related callbacks in the destructor. This can be a problem
// when WebRtcSession listens to the callback but it was the WebRtcSession
// destructor that caused TgWebRtcSessionDescriptionFactory's destruction.
// The callback is then ignored, leaking memory allocated by OnMessage for
// MSG_USE_CONSTRUCTOR_CERTIFICATE.
delete msg.pdata;
}
}
}
void TgWebRtcSessionDescriptionFactory::CreateOffer(
CreateSessionDescriptionObserver* observer,
const PeerConnectionInterface::RTCOfferAnswerOptions& options,
const cricket::MediaSessionOptions& session_options) {
std::string error = "CreateOffer";
if (certificate_request_state_ == CERTIFICATE_FAILED) {
error += kFailedDueToIdentityFailed;
RTC_LOG(LS_ERROR) << error;
PostCreateSessionDescriptionFailed(observer, error);
return;
}
if (!ValidMediaSessionOptions(session_options)) {
error += " called with invalid session options";
RTC_LOG(LS_ERROR) << error;
PostCreateSessionDescriptionFailed(observer, error);
return;
}
TgCreateSessionDescriptionRequest request(
TgCreateSessionDescriptionRequest::kOffer, observer, session_options);
if (certificate_request_state_ == CERTIFICATE_WAITING) {
create_session_description_requests_.push(request);
} else {
RTC_DCHECK(certificate_request_state_ == CERTIFICATE_SUCCEEDED ||
certificate_request_state_ == CERTIFICATE_NOT_NEEDED);
InternalCreateOffer(request);
}
}
void TgWebRtcSessionDescriptionFactory::CreateAnswer(
CreateSessionDescriptionObserver* observer,
const cricket::MediaSessionOptions& session_options) {
std::string error = "CreateAnswer";
if (certificate_request_state_ == CERTIFICATE_FAILED) {
error += kFailedDueToIdentityFailed;
RTC_LOG(LS_ERROR) << error;
PostCreateSessionDescriptionFailed(observer, error);
return;
}
if (!pc_->remote_description()) {
error += " can't be called before SetRemoteDescription.";
RTC_LOG(LS_ERROR) << error;
PostCreateSessionDescriptionFailed(observer, error);
return;
}
if (pc_->remote_description()->GetType() != SdpType::kOffer) {
error += " failed because remote_description is not an offer.";
RTC_LOG(LS_ERROR) << error;
PostCreateSessionDescriptionFailed(observer, error);
return;
}
if (!ValidMediaSessionOptions(session_options)) {
error += " called with invalid session options.";
RTC_LOG(LS_ERROR) << error;
PostCreateSessionDescriptionFailed(observer, error);
return;
}
TgCreateSessionDescriptionRequest request(
TgCreateSessionDescriptionRequest::kAnswer, observer, session_options);
if (certificate_request_state_ == CERTIFICATE_WAITING) {
create_session_description_requests_.push(request);
} else {
RTC_DCHECK(certificate_request_state_ == CERTIFICATE_SUCCEEDED ||
certificate_request_state_ == CERTIFICATE_NOT_NEEDED);
InternalCreateAnswer(request);
}
}
void TgWebRtcSessionDescriptionFactory::SetSdesPolicy(
cricket::SecurePolicy secure_policy) {
session_desc_factory_.set_secure(secure_policy);
}
cricket::SecurePolicy TgWebRtcSessionDescriptionFactory::SdesPolicy() const {
return session_desc_factory_.secure();
}
void TgWebRtcSessionDescriptionFactory::OnMessage(rtc::Message* msg) {
switch (msg->message_id) {
case MSG_CREATE_SESSIONDESCRIPTION_SUCCESS: {
CreateSessionDescriptionMsg* param =
static_cast<CreateSessionDescriptionMsg*>(msg->pdata);
param->observer->OnSuccess(param->description.release());
delete param;
break;
}
case MSG_CREATE_SESSIONDESCRIPTION_FAILED: {
CreateSessionDescriptionMsg* param =
static_cast<CreateSessionDescriptionMsg*>(msg->pdata);
param->observer->OnFailure(std::move(param->error));
delete param;
break;
}
case MSG_USE_CONSTRUCTOR_CERTIFICATE: {
rtc::ScopedRefMessageData<rtc::RTCCertificate>* param =
static_cast<rtc::ScopedRefMessageData<rtc::RTCCertificate>*>(
msg->pdata);
RTC_LOG(LS_INFO) << "Using certificate supplied to the constructor.";
SetCertificate(param->data());
delete param;
break;
}
default:
RTC_NOTREACHED();
break;
}
}
void TgWebRtcSessionDescriptionFactory::InternalCreateOffer(
TgCreateSessionDescriptionRequest request) {
if (pc_->local_description()) {
// If the needs-ice-restart flag is set as described by JSEP, we should
// generate an offer with a new ufrag/password to trigger an ICE restart.
for (cricket::MediaDescriptionOptions& options :
request.options.media_description_options) {
if (pc_->NeedsIceRestart(options.mid)) {
options.transport_options.ice_restart = true;
}
}
}
std::unique_ptr<cricket::SessionDescription> desc =
session_desc_factory_.CreateOffer(
request.options, pc_->local_description()
? pc_->local_description()->description()
: nullptr);
if (!desc) {
PostCreateSessionDescriptionFailed(request.observer,
"Failed to initialize the offer.");
return;
}
// RFC 3264
// When issuing an offer that modifies the session,
// the "o=" line of the new SDP MUST be identical to that in the
// previous SDP, except that the version in the origin field MUST
// increment by one from the previous SDP.
// Just increase the version number by one each time when a new offer
// is created regardless if it's identical to the previous one or not.
// The |session_version_| is a uint64_t, the wrap around should not happen.
RTC_DCHECK(session_version_ + 1 > session_version_);
auto offer = std::make_unique<JsepSessionDescription>(
SdpType::kOffer, std::move(desc), session_id_,
rtc::ToString(session_version_++));
if (pc_->local_description()) {
for (const cricket::MediaDescriptionOptions& options :
request.options.media_description_options) {
if (!options.transport_options.ice_restart) {
CopyCandidatesFromSessionDescription(pc_->local_description(),
options.mid, offer.get());
}
}
}
PostCreateSessionDescriptionSucceeded(request.observer, std::move(offer));
}
void TgWebRtcSessionDescriptionFactory::InternalCreateAnswer(
TgCreateSessionDescriptionRequest request) {
if (pc_->remote_description()) {
for (cricket::MediaDescriptionOptions& options :
request.options.media_description_options) {
// According to http://tools.ietf.org/html/rfc5245#section-9.2.1.1
// an answer should also contain new ICE ufrag and password if an offer
// has been received with new ufrag and password.
options.transport_options.ice_restart =
pc_->IceRestartPending(options.mid);
// We should pass the current SSL role to the transport description
// factory, if there is already an existing ongoing session.
rtc::SSLRole ssl_role;
if (pc_->GetSslRole(options.mid, &ssl_role)) {
options.transport_options.prefer_passive_role =
(rtc::SSL_SERVER == ssl_role);
}
}
}
std::unique_ptr<cricket::SessionDescription> desc =
session_desc_factory_.CreateAnswer(
pc_->remote_description() ? pc_->remote_description()->description()
: nullptr,
request.options,
pc_->local_description() ? pc_->local_description()->description()
: nullptr);
if (!desc) {
PostCreateSessionDescriptionFailed(request.observer,
"Failed to initialize the answer.");
return;
}
// RFC 3264
// If the answer is different from the offer in any way (different IP
// addresses, ports, etc.), the origin line MUST be different in the answer.
// In that case, the version number in the "o=" line of the answer is
// unrelated to the version number in the o line of the offer.
// Get a new version number by increasing the |session_version_answer_|.
// The |session_version_| is a uint64_t, the wrap around should not happen.
RTC_DCHECK(session_version_ + 1 > session_version_);
auto answer = std::make_unique<JsepSessionDescription>(
SdpType::kAnswer, std::move(desc), session_id_,
rtc::ToString(session_version_++));
if (pc_->local_description()) {
// Include all local ICE candidates in the SessionDescription unless
// the remote peer has requested an ICE restart.
for (const cricket::MediaDescriptionOptions& options :
request.options.media_description_options) {
if (!options.transport_options.ice_restart) {
CopyCandidatesFromSessionDescription(pc_->local_description(),
options.mid, answer.get());
}
}
}
PostCreateSessionDescriptionSucceeded(request.observer, std::move(answer));
}
void TgWebRtcSessionDescriptionFactory::FailPendingRequests(
const std::string& reason) {
RTC_DCHECK(signaling_thread_->IsCurrent());
while (!create_session_description_requests_.empty()) {
const TgCreateSessionDescriptionRequest& request =
create_session_description_requests_.front();
PostCreateSessionDescriptionFailed(
request.observer,
((request.type == TgCreateSessionDescriptionRequest::kOffer)
? "CreateOffer"
: "CreateAnswer") +
reason);
create_session_description_requests_.pop();
}
}
void TgWebRtcSessionDescriptionFactory::PostCreateSessionDescriptionFailed(
CreateSessionDescriptionObserver* observer,
const std::string& error) {
CreateSessionDescriptionMsg* msg = new CreateSessionDescriptionMsg(
observer, RTCError(RTCErrorType::INTERNAL_ERROR, std::string(error)));
signaling_thread_->Post(RTC_FROM_HERE, this,
MSG_CREATE_SESSIONDESCRIPTION_FAILED, msg);
RTC_LOG(LS_ERROR) << "Create SDP failed: " << error;
}
void TgWebRtcSessionDescriptionFactory::PostCreateSessionDescriptionSucceeded(
CreateSessionDescriptionObserver* observer,
std::unique_ptr<SessionDescriptionInterface> description) {
CreateSessionDescriptionMsg* msg =
new CreateSessionDescriptionMsg(observer, RTCError::OK());
msg->description = std::move(description);
signaling_thread_->Post(RTC_FROM_HERE, this,
MSG_CREATE_SESSIONDESCRIPTION_SUCCESS, msg);
}
void TgWebRtcSessionDescriptionFactory::OnCertificateRequestFailed() {
RTC_DCHECK(signaling_thread_->IsCurrent());
RTC_LOG(LS_ERROR) << "Asynchronous certificate generation request failed.";
certificate_request_state_ = CERTIFICATE_FAILED;
FailPendingRequests(kFailedDueToIdentityFailed);
}
void TgWebRtcSessionDescriptionFactory::SetCertificate(
const rtc::scoped_refptr<rtc::RTCCertificate>& certificate) {
RTC_DCHECK(certificate);
RTC_LOG(LS_VERBOSE) << "Setting new certificate.";
certificate_request_state_ = CERTIFICATE_SUCCEEDED;
SignalCertificateReady(certificate);
transport_desc_factory_.set_certificate(certificate);
transport_desc_factory_.set_secure(cricket::SEC_ENABLED);
while (!create_session_description_requests_.empty()) {
if (create_session_description_requests_.front().type ==
TgCreateSessionDescriptionRequest::kOffer) {
InternalCreateOffer(create_session_description_requests_.front());
} else {
InternalCreateAnswer(create_session_description_requests_.front());
}
create_session_description_requests_.pop();
}
}
} // namespace webrtc

View File

@@ -0,0 +1,167 @@
/*
* Copyright 2013 The WebRTC project authors. All Rights Reserved.
*
* Use of this source code is governed by a BSD-style license
* that can be found in the LICENSE file in the root of the source
* tree. An additional intellectual property rights grant can be found
* in the file PATENTS. All contributing project authors may
* be found in the AUTHORS file in the root of the source tree.
*/
#ifndef TG_PC_WEBRTC_SESSION_DESCRIPTION_FACTORY_H_
#define TG_PC_WEBRTC_SESSION_DESCRIPTION_FACTORY_H_
#include <stdint.h>
#include <memory>
#include <queue>
#include <string>
#include "api/jsep.h"
#include "api/peer_connection_interface.h"
#include "api/scoped_refptr.h"
#include "p2p/base/transport_description.h"
#include "p2p/base/transport_description_factory.h"
#include "pc/media_session.h"
#include "pc/peer_connection_internal.h"
#include "rtc_base/constructor_magic.h"
#include "rtc_base/message_handler.h"
#include "rtc_base/message_queue.h"
#include "rtc_base/rtc_certificate.h"
#include "rtc_base/rtc_certificate_generator.h"
#include "rtc_base/third_party/sigslot/sigslot.h"
#include "rtc_base/thread.h"
#include "rtc_base/unique_id_generator.h"
namespace webrtc {
class TgPeerConnection;
// DTLS certificate request callback class.
class TgWebRtcCertificateGeneratorCallback
: public rtc::RTCCertificateGeneratorCallback,
public sigslot::has_slots<> {
public:
// |rtc::RTCCertificateGeneratorCallback| overrides.
void OnSuccess(
const rtc::scoped_refptr<rtc::RTCCertificate>& certificate) override;
void OnFailure() override;
sigslot::signal0<> SignalRequestFailed;
sigslot::signal1<const rtc::scoped_refptr<rtc::RTCCertificate>&>
SignalCertificateReady;
};
struct TgCreateSessionDescriptionRequest {
enum Type {
kOffer,
kAnswer,
};
TgCreateSessionDescriptionRequest(Type type,
CreateSessionDescriptionObserver* observer,
const cricket::MediaSessionOptions& options)
: type(type), observer(observer), options(options) {}
Type type;
rtc::scoped_refptr<CreateSessionDescriptionObserver> observer;
cricket::MediaSessionOptions options;
};
// This class is used to create offer/answer session description. Certificates
// for WebRtcSession/DTLS are either supplied at construction or generated
// asynchronously. It queues the create offer/answer request until the
// certificate generation has completed, i.e. when OnCertificateRequestFailed or
// OnCertificateReady is called.
class TgWebRtcSessionDescriptionFactory : public rtc::MessageHandler,
public sigslot::has_slots<> {
public:
// Can specify either a |cert_generator| or |certificate| to enable DTLS. If
// a certificate generator is given, starts generating the certificate
// asynchronously. If a certificate is given, will use that for identifying
// over DTLS. If neither is specified, DTLS is disabled.
TgWebRtcSessionDescriptionFactory(
rtc::Thread* signaling_thread,
cricket::ChannelManager* channel_manager,
TgPeerConnection* pc,
const std::string& session_id,
std::unique_ptr<rtc::RTCCertificateGeneratorInterface> cert_generator,
const rtc::scoped_refptr<rtc::RTCCertificate>& certificate,
rtc::UniqueRandomIdGenerator* ssrc_generator);
virtual ~TgWebRtcSessionDescriptionFactory();
static void CopyCandidatesFromSessionDescription(
const SessionDescriptionInterface* source_desc,
const std::string& content_name,
SessionDescriptionInterface* dest_desc);
void CreateOffer(
CreateSessionDescriptionObserver* observer,
const PeerConnectionInterface::RTCOfferAnswerOptions& options,
const cricket::MediaSessionOptions& session_options);
void CreateAnswer(CreateSessionDescriptionObserver* observer,
const cricket::MediaSessionOptions& session_options);
void SetSdesPolicy(cricket::SecurePolicy secure_policy);
cricket::SecurePolicy SdesPolicy() const;
void set_enable_encrypted_rtp_header_extensions(bool enable) {
session_desc_factory_.set_enable_encrypted_rtp_header_extensions(enable);
}
void set_is_unified_plan(bool is_unified_plan) {
session_desc_factory_.set_is_unified_plan(is_unified_plan);
}
sigslot::signal1<const rtc::scoped_refptr<rtc::RTCCertificate>&>
SignalCertificateReady;
// For testing.
bool waiting_for_certificate_for_testing() const {
return certificate_request_state_ == CERTIFICATE_WAITING;
}
private:
enum CertificateRequestState {
CERTIFICATE_NOT_NEEDED,
CERTIFICATE_WAITING,
CERTIFICATE_SUCCEEDED,
CERTIFICATE_FAILED,
};
// MessageHandler implementation.
virtual void OnMessage(rtc::Message* msg);
void InternalCreateOffer(TgCreateSessionDescriptionRequest request);
void InternalCreateAnswer(TgCreateSessionDescriptionRequest request);
// Posts failure notifications for all pending session description requests.
void FailPendingRequests(const std::string& reason);
void PostCreateSessionDescriptionFailed(
CreateSessionDescriptionObserver* observer,
const std::string& error);
void PostCreateSessionDescriptionSucceeded(
CreateSessionDescriptionObserver* observer,
std::unique_ptr<SessionDescriptionInterface> description);
void OnCertificateRequestFailed();
void SetCertificate(
const rtc::scoped_refptr<rtc::RTCCertificate>& certificate);
std::queue<TgCreateSessionDescriptionRequest>
create_session_description_requests_;
rtc::Thread* const signaling_thread_;
cricket::TransportDescriptionFactory transport_desc_factory_;
cricket::MediaSessionDescriptionFactory session_desc_factory_;
uint64_t session_version_;
const std::unique_ptr<rtc::RTCCertificateGeneratorInterface> cert_generator_;
// TODO(jiayl): remove the dependency on peer connection once bug 2264 is
// fixed.
TgPeerConnection* const pc_;
const std::string session_id_;
CertificateRequestState certificate_request_state_;
RTC_DISALLOW_COPY_AND_ASSIGN(TgWebRtcSessionDescriptionFactory);
};
} // namespace webrtc
#endif // PC_WEBRTC_SESSION_DESCRIPTION_FACTORY_H_

View File

@@ -23,7 +23,7 @@ if [ "$ARCH" == "x64" ]; then
OUT_DIR="ios_sim"
fi
gn gen out/$OUT_DIR --args="use_xcode_clang=true "" target_cpu=\"$ARCH\""' target_os="ios" is_debug=true is_component_build=false rtc_include_tests=false use_rtti=true rtc_use_x11=false use_custom_libcxx=false use_custom_libcxx_for_host=false rtc_include_builtin_video_codecs=false rtc_build_ssl=false rtc_build_examples=false rtc_build_tools=false ios_deployment_target="9.0" ios_enable_code_signing=false is_unsafe_developer_build=false rtc_enable_protobuf=false rtc_include_builtin_video_codecs=false rtc_use_gtk=false rtc_use_metal_rendering=true rtc_ssl_root="//openssl"'
gn gen out/$OUT_DIR --args="use_xcode_clang=true "" target_cpu=\"$ARCH\""' target_os="ios" is_debug=true is_component_build=false rtc_include_tests=false use_rtti=true rtc_use_x11=false use_custom_libcxx=false use_custom_libcxx_for_host=false rtc_build_ssl=false rtc_build_examples=false rtc_build_tools=false ios_deployment_target="9.0" ios_enable_code_signing=false is_unsafe_developer_build=false rtc_enable_protobuf=false rtc_include_builtin_video_codecs=true rtc_build_libvpx=true rtc_libvpx_build_vp9=true rtc_use_gtk=false rtc_use_metal_rendering=true rtc_ssl_root="//openssl"'
ninja -C out/$OUT_DIR framework_objc_static
popd