mirror of
https://github.com/Swiftgram/Telegram-iOS.git
synced 2025-07-20 18:21:10 +00:00
Merge branch 'master' of gitlab.com:peter-iakovlev/telegram-ios
This commit is contained in:
commit
fe6188db45
3
.gitmodules
vendored
3
.gitmodules
vendored
@ -23,3 +23,6 @@ url=https://github.com/bazelbuild/rules_apple.git
|
|||||||
[submodule "third-party/webrtc/webrtc-ios"]
|
[submodule "third-party/webrtc/webrtc-ios"]
|
||||||
path = third-party/webrtc/webrtc-ios
|
path = third-party/webrtc/webrtc-ios
|
||||||
url=https://github.com/ali-fareed/webrtc-ios.git
|
url=https://github.com/ali-fareed/webrtc-ios.git
|
||||||
|
[submodule "submodules/TgVoipWebrtc/tgcalls"]
|
||||||
|
path = submodules/TgVoipWebrtc/tgcalls
|
||||||
|
url=../tgcalls.git
|
||||||
|
@ -1007,6 +1007,9 @@ class ChatListItemNode: ItemListRevealOptionsItemNode {
|
|||||||
let contentImageFillSize = CGSize(width: 8.0, height: contentImageSize.height)
|
let contentImageFillSize = CGSize(width: 8.0, height: contentImageSize.height)
|
||||||
_ = contentImageFillSize
|
_ = contentImageFillSize
|
||||||
for message in messages {
|
for message in messages {
|
||||||
|
if contentImageSpecs.count >= 3 {
|
||||||
|
break
|
||||||
|
}
|
||||||
inner: for media in message.media {
|
inner: for media in message.media {
|
||||||
if let image = media as? TelegramMediaImage {
|
if let image = media as? TelegramMediaImage {
|
||||||
if let _ = largestImageRepresentation(image.representations) {
|
if let _ = largestImageRepresentation(image.representations) {
|
||||||
|
@ -210,7 +210,11 @@ final class CallControllerButtonsNode: ASDisplayNode {
|
|||||||
}
|
}
|
||||||
topButtons.append(.enableCamera(isCameraActive))
|
topButtons.append(.enableCamera(isCameraActive))
|
||||||
topButtons.append(.mute(self.isMuted))
|
topButtons.append(.mute(self.isMuted))
|
||||||
topButtons.append(.switchCamera)
|
if case .possible = videoState {
|
||||||
|
topButtons.append(.soundOutput(soundOutput))
|
||||||
|
} else {
|
||||||
|
topButtons.append(.switchCamera)
|
||||||
|
}
|
||||||
case .notAvailable:
|
case .notAvailable:
|
||||||
topButtons.append(.mute(self.isMuted))
|
topButtons.append(.mute(self.isMuted))
|
||||||
topButtons.append(.soundOutput(soundOutput))
|
topButtons.append(.soundOutput(soundOutput))
|
||||||
@ -302,7 +306,7 @@ final class CallControllerButtonsNode: ASDisplayNode {
|
|||||||
|
|
||||||
topButtons.append(.enableCamera(false))
|
topButtons.append(.enableCamera(false))
|
||||||
topButtons.append(.mute(self.isMuted))
|
topButtons.append(.mute(self.isMuted))
|
||||||
topButtons.append(.switchCamera)
|
topButtons.append(.soundOutput(soundOutput))
|
||||||
|
|
||||||
let topButtonsContentWidth = CGFloat(topButtons.count) * smallButtonSize
|
let topButtonsContentWidth = CGFloat(topButtons.count) * smallButtonSize
|
||||||
let topButtonsAvailableSpacingWidth = width - topButtonsContentWidth - minSmallButtonSideInset * 2.0
|
let topButtonsAvailableSpacingWidth = width - topButtonsContentWidth - minSmallButtonSideInset * 2.0
|
||||||
|
@ -87,6 +87,12 @@ private final class CallVideoNode: ASDisplayNode {
|
|||||||
|
|
||||||
self.videoView.view.frame = videoFrame
|
self.videoView.view.frame = videoFrame
|
||||||
|
|
||||||
|
if let effectView = self.effectView {
|
||||||
|
effectView.frame = videoFrame
|
||||||
|
transition.animatePositionAdditive(layer: effectView.layer, offset: CGPoint(x: previousVideoFrame.midX - videoFrame.midX, y: previousVideoFrame.midY - videoFrame.midY))
|
||||||
|
transition.animateTransformScale(view: effectView, from: previousVideoFrame.height / videoFrame.height)
|
||||||
|
}
|
||||||
|
|
||||||
transition.updateCornerRadius(layer: self.videoTransformContainer.layer, cornerRadius: self.currentCornerRadius)
|
transition.updateCornerRadius(layer: self.videoTransformContainer.layer, cornerRadius: self.currentCornerRadius)
|
||||||
if let effectView = self.effectView {
|
if let effectView = self.effectView {
|
||||||
transition.updateCornerRadius(layer: effectView.layer, cornerRadius: self.currentCornerRadius)
|
transition.updateCornerRadius(layer: effectView.layer, cornerRadius: self.currentCornerRadius)
|
||||||
|
@ -171,8 +171,9 @@ final class CallControllerStatusNode: ASDisplayNode {
|
|||||||
self.statusNode.frame = CGRect(origin: CGPoint(x: floor((constrainedWidth - statusMeasureLayout.size.width) / 2.0) + statusOffset, y: titleLayout.size.height + spacing), size: statusLayout.size)
|
self.statusNode.frame = CGRect(origin: CGPoint(x: floor((constrainedWidth - statusMeasureLayout.size.width) / 2.0) + statusOffset, y: titleLayout.size.height + spacing), size: statusLayout.size)
|
||||||
self.receptionNode.frame = CGRect(origin: CGPoint(x: self.statusNode.frame.minX - receptionNodeSize.width, y: titleLayout.size.height + spacing + 9.0), size: receptionNodeSize)
|
self.receptionNode.frame = CGRect(origin: CGPoint(x: self.statusNode.frame.minX - receptionNodeSize.width, y: titleLayout.size.height + spacing + 9.0), size: receptionNodeSize)
|
||||||
self.logoNode.isHidden = !statusDisplayLogo
|
self.logoNode.isHidden = !statusDisplayLogo
|
||||||
if let image = self.logoNode.image {
|
if let image = self.logoNode.image, let firstLineRect = statusMeasureLayout.linesRects().first {
|
||||||
self.logoNode.frame = CGRect(origin: CGPoint(x: self.statusNode.frame.minX - image.size.width - 7.0, y: self.statusNode.frame.minY + 5.0), size: image.size)
|
let firstLineOffset = floor((statusMeasureLayout.size.width - firstLineRect.width) / 2.0)
|
||||||
|
self.logoNode.frame = CGRect(origin: CGPoint(x: self.statusNode.frame.minX + firstLineOffset - image.size.width - 7.0, y: self.statusNode.frame.minY + 5.0), size: image.size)
|
||||||
}
|
}
|
||||||
|
|
||||||
return titleLayout.size.height + spacing + statusLayout.size.height
|
return titleLayout.size.height + spacing + statusLayout.size.height
|
||||||
|
@ -809,10 +809,11 @@ public final class PresentationCallImpl: PresentationCall {
|
|||||||
public func makeIncomingVideoView(completion: @escaping (PresentationCallVideoView?) -> Void) {
|
public func makeIncomingVideoView(completion: @escaping (PresentationCallVideoView?) -> Void) {
|
||||||
self.ongoingContext?.makeIncomingVideoView(completion: { view in
|
self.ongoingContext?.makeIncomingVideoView(completion: { view in
|
||||||
if let view = view {
|
if let view = view {
|
||||||
|
let setOnFirstFrameReceived = view.setOnFirstFrameReceived
|
||||||
completion(PresentationCallVideoView(
|
completion(PresentationCallVideoView(
|
||||||
view: view,
|
view: view.view,
|
||||||
setOnFirstFrameReceived: { [weak view] f in
|
setOnFirstFrameReceived: { f in
|
||||||
view?.setOnFirstFrameReceived(f)
|
setOnFirstFrameReceived(f)
|
||||||
}
|
}
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
@ -829,10 +830,11 @@ public final class PresentationCallImpl: PresentationCall {
|
|||||||
|
|
||||||
self.videoCapturer?.makeOutgoingVideoView(completion: { view in
|
self.videoCapturer?.makeOutgoingVideoView(completion: { view in
|
||||||
if let view = view {
|
if let view = view {
|
||||||
|
let setOnFirstFrameReceived = view.setOnFirstFrameReceived
|
||||||
completion(PresentationCallVideoView(
|
completion(PresentationCallVideoView(
|
||||||
view: view,
|
view: view.view,
|
||||||
setOnFirstFrameReceived: { [weak view] f in
|
setOnFirstFrameReceived: { f in
|
||||||
view?.setOnFirstFrameReceived(f)
|
setOnFirstFrameReceived(f)
|
||||||
}
|
}
|
||||||
))
|
))
|
||||||
} else {
|
} else {
|
||||||
|
@ -1111,13 +1111,13 @@ public final class AccountViewTracker {
|
|||||||
let combinedDisposable = MetaDisposable()
|
let combinedDisposable = MetaDisposable()
|
||||||
self.queue.async {
|
self.queue.async {
|
||||||
var addHole = false
|
var addHole = false
|
||||||
if let context = self.channelPollingContexts[peerId] {
|
/*if let context = self.channelPollingContexts[peerId] {
|
||||||
if context.subscribers.isEmpty {
|
if context.subscribers.isEmpty {
|
||||||
addHole = true
|
addHole = true
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
addHole = true
|
addHole = true
|
||||||
}
|
}*/
|
||||||
if addHole {
|
if addHole {
|
||||||
let _ = self.account?.postbox.transaction({ transaction -> Void in
|
let _ = self.account?.postbox.transaction({ transaction -> Void in
|
||||||
if transaction.getPeerChatListIndex(peerId) == nil {
|
if transaction.getPeerChatListIndex(peerId) == nil {
|
||||||
|
@ -218,7 +218,7 @@ private func parseConnectionSet(primary: Api.PhoneConnection, alternative: [Api.
|
|||||||
private final class CallSessionContext {
|
private final class CallSessionContext {
|
||||||
let peerId: PeerId
|
let peerId: PeerId
|
||||||
let isOutgoing: Bool
|
let isOutgoing: Bool
|
||||||
let type: CallSession.CallType
|
var type: CallSession.CallType
|
||||||
var state: CallSessionInternalState
|
var state: CallSessionInternalState
|
||||||
let subscribers = Bag<(CallSession) -> Void>()
|
let subscribers = Bag<(CallSession) -> Void>()
|
||||||
let signalingSubscribers = Bag<(Data) -> Void>()
|
let signalingSubscribers = Bag<(Data) -> Void>()
|
||||||
@ -576,6 +576,12 @@ private final class CallSessionManagerContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func updateCallType(internalId: CallSessionInternalId, type: CallSession.CallType) {
|
||||||
|
if let context = self.contexts[internalId] {
|
||||||
|
context.type = type
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func updateSession(_ call: Api.PhoneCall, completion: @escaping ((CallSessionRingingState, CallSession)?) -> Void) {
|
func updateSession(_ call: Api.PhoneCall, completion: @escaping ((CallSessionRingingState, CallSession)?) -> Void) {
|
||||||
var resultRingingState: (CallSessionRingingState, CallSession)?
|
var resultRingingState: (CallSessionRingingState, CallSession)?
|
||||||
|
|
||||||
@ -932,6 +938,12 @@ public final class CallSessionManager {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public func updateCallType(internalId: CallSessionInternalId, type: CallSession.CallType) {
|
||||||
|
self.withContext { context in
|
||||||
|
context.updateCallType(internalId: internalId, type: type)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public func updateVersions(versions: [String]) {
|
public func updateVersions(versions: [String]) {
|
||||||
self.withContext { context in
|
self.withContext { context in
|
||||||
context.updateVersions(versions: versions)
|
context.updateVersions(versions: versions)
|
||||||
|
@ -308,7 +308,18 @@ public final class OngoingCallVideoCapturer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public func makeOutgoingVideoView(completion: @escaping (OngoingCallContextPresentationCallVideoView?) -> Void) {
|
public func makeOutgoingVideoView(completion: @escaping (OngoingCallContextPresentationCallVideoView?) -> Void) {
|
||||||
self.impl.makeOutgoingVideoView(completion)
|
self.impl.makeOutgoingVideoView { view in
|
||||||
|
if let view = view {
|
||||||
|
completion(OngoingCallContextPresentationCallVideoView(
|
||||||
|
view: view,
|
||||||
|
setOnFirstFrameReceived: { [weak view] f in
|
||||||
|
view?.setOnFirstFrameReceived(f)
|
||||||
|
}
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
completion(nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public func setIsVideoEnabled(_ value: Bool) {
|
public func setIsVideoEnabled(_ value: Bool) {
|
||||||
@ -384,8 +395,17 @@ private extension OngoingCallContextState.State {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public protocol OngoingCallContextPresentationCallVideoView: UIView {
|
public final class OngoingCallContextPresentationCallVideoView {
|
||||||
func setOnFirstFrameReceived(_ onFirstFrameReceived: (() -> Void)?)
|
public let view: UIView
|
||||||
|
public let setOnFirstFrameReceived: ((() -> Void)?) -> Void
|
||||||
|
|
||||||
|
public init(
|
||||||
|
view: UIView,
|
||||||
|
setOnFirstFrameReceived: @escaping ((() -> Void)?) -> Void
|
||||||
|
) {
|
||||||
|
self.view = view
|
||||||
|
self.setOnFirstFrameReceived = setOnFirstFrameReceived
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public final class OngoingCallContext {
|
public final class OngoingCallContext {
|
||||||
@ -424,6 +444,8 @@ public final class OngoingCallContext {
|
|||||||
return self.contextState.get()
|
return self.contextState.get()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private var didReportCallAsVideo: Bool = false
|
||||||
|
|
||||||
private var signalingDataDisposable: Disposable?
|
private var signalingDataDisposable: Disposable?
|
||||||
|
|
||||||
private let receptionPromise = Promise<Int32?>(nil)
|
private let receptionPromise = Promise<Int32?>(nil)
|
||||||
@ -503,7 +525,7 @@ public final class OngoingCallContext {
|
|||||||
}, videoCapturer: video?.impl)
|
}, videoCapturer: video?.impl)
|
||||||
|
|
||||||
strongSelf.contextRef = Unmanaged.passRetained(OngoingCallThreadLocalContextHolder(context))
|
strongSelf.contextRef = Unmanaged.passRetained(OngoingCallThreadLocalContextHolder(context))
|
||||||
context.stateChanged = { state, videoState, remoteVideoState in
|
context.stateChanged = { [weak callSessionManager] state, videoState, remoteVideoState in
|
||||||
queue.async {
|
queue.async {
|
||||||
guard let strongSelf = self else {
|
guard let strongSelf = self else {
|
||||||
return
|
return
|
||||||
@ -531,6 +553,10 @@ public final class OngoingCallContext {
|
|||||||
@unknown default:
|
@unknown default:
|
||||||
mappedRemoteVideoState = .inactive
|
mappedRemoteVideoState = .inactive
|
||||||
}
|
}
|
||||||
|
if case .active = mappedVideoState, !strongSelf.didReportCallAsVideo {
|
||||||
|
strongSelf.didReportCallAsVideo = true
|
||||||
|
callSessionManager?.updateCallType(internalId: internalId, type: .video)
|
||||||
|
}
|
||||||
strongSelf.contextState.set(.single(OngoingCallContextState(state: mappedState, videoState: mappedVideoState, remoteVideoState: mappedRemoteVideoState)))
|
strongSelf.contextState.set(.single(OngoingCallContextState(state: mappedState, videoState: mappedVideoState, remoteVideoState: mappedRemoteVideoState)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -677,13 +703,21 @@ public final class OngoingCallContext {
|
|||||||
public func makeIncomingVideoView(completion: @escaping (OngoingCallContextPresentationCallVideoView?) -> Void) {
|
public func makeIncomingVideoView(completion: @escaping (OngoingCallContextPresentationCallVideoView?) -> Void) {
|
||||||
self.withContext { context in
|
self.withContext { context in
|
||||||
if let context = context as? OngoingCallThreadLocalContextWebrtc {
|
if let context = context as? OngoingCallThreadLocalContextWebrtc {
|
||||||
context.makeIncomingVideoView(completion)
|
context.makeIncomingVideoView { view in
|
||||||
|
if let view = view {
|
||||||
|
completion(OngoingCallContextPresentationCallVideoView(
|
||||||
|
view: view,
|
||||||
|
setOnFirstFrameReceived: { [weak view] f in
|
||||||
|
view?.setOnFirstFrameReceived(f)
|
||||||
|
}
|
||||||
|
))
|
||||||
|
} else {
|
||||||
|
completion(nil)
|
||||||
|
}
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
completion(nil)
|
completion(nil)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
extension OngoingCallThreadLocalContextWebrtcVideoView: OngoingCallContextPresentationCallVideoView {
|
|
||||||
}
|
|
||||||
|
@ -7,17 +7,21 @@ static_library(
|
|||||||
srcs = glob([
|
srcs = glob([
|
||||||
"Sources/**/*.m",
|
"Sources/**/*.m",
|
||||||
"Sources/**/*.mm",
|
"Sources/**/*.mm",
|
||||||
"Impl/**/*.cpp",
|
"tgcalls/tgcalls/**/*.cpp",
|
||||||
"Impl/**/*.mm",
|
"tgcalls/tgcalls/**/*.mm",
|
||||||
"Impl/**/*.m",
|
"tgcalls/tgcalls/**/*.m",
|
||||||
|
], exclude = [
|
||||||
|
"tgcalls/tgcalls/legacy/**",
|
||||||
|
"tgcalls/tgcalls/platform/tdesktop/**",
|
||||||
|
"tgcalls/tgcalls/platform/windows/**",
|
||||||
]),
|
]),
|
||||||
has_cpp = True,
|
has_cpp = True,
|
||||||
headers = merge_maps([
|
headers = merge_maps([
|
||||||
glob_sub_map("PublicHeaders/", [
|
glob_sub_map("PublicHeaders/", [
|
||||||
"PublicHeaders/**/*.h",
|
"PublicHeaders/**/*.h",
|
||||||
]),
|
]),
|
||||||
glob_sub_map("Impl/", [
|
glob_sub_map("tgcalls/tgcalls/", [
|
||||||
"Impl/**/*.h",
|
"tgcalls/tgcalls/**/*.h",
|
||||||
]),
|
]),
|
||||||
]),
|
]),
|
||||||
exported_headers = glob([
|
exported_headers = glob([
|
||||||
@ -29,6 +33,7 @@ static_library(
|
|||||||
webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/sdk/objc",
|
webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/sdk/objc",
|
||||||
webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/sdk/objc/base",
|
webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/sdk/objc/base",
|
||||||
webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/sdk/objc/components/renderer/metal",
|
webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/sdk/objc/components/renderer/metal",
|
||||||
|
webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/sdk/objc/components/renderer/opengl",
|
||||||
webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/sdk/objc/components/video_codec",
|
webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/sdk/objc/components/video_codec",
|
||||||
webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/third_party/libyuv/include",
|
webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/third_party/libyuv/include",
|
||||||
webrtc_include_prefix + "third-party/submodules/TgVoipWebrtc/PublicHeaders",
|
webrtc_include_prefix + "third-party/submodules/TgVoipWebrtc/PublicHeaders",
|
||||||
@ -36,7 +41,6 @@ static_library(
|
|||||||
"-DWEBRTC_MAC",
|
"-DWEBRTC_MAC",
|
||||||
"-DWEBRTC_POSIX",
|
"-DWEBRTC_POSIX",
|
||||||
"-DRTC_ENABLE_VP9",
|
"-DRTC_ENABLE_VP9",
|
||||||
"-DTGVOIP_NAMESPACE=tgvoip_webrtc",
|
|
||||||
],
|
],
|
||||||
deps = [
|
deps = [
|
||||||
"//submodules/MtProtoKit:MtProtoKit#shared",
|
"//submodules/MtProtoKit:MtProtoKit#shared",
|
||||||
|
@ -1,9 +1,4 @@
|
|||||||
|
|
||||||
cc_library(
|
|
||||||
name = "webrtc_lib",
|
|
||||||
srcs = ["libwebrtc.a"],
|
|
||||||
)
|
|
||||||
|
|
||||||
objc_library(
|
objc_library(
|
||||||
name = "TgVoipWebrtc",
|
name = "TgVoipWebrtc",
|
||||||
enable_modules = True,
|
enable_modules = True,
|
||||||
@ -12,21 +7,26 @@ objc_library(
|
|||||||
"Sources/**/*.m",
|
"Sources/**/*.m",
|
||||||
"Sources/**/*.mm",
|
"Sources/**/*.mm",
|
||||||
"Sources/**/*.h",
|
"Sources/**/*.h",
|
||||||
"Impl/**/*.h",
|
"tgcalls/tgcalls/**/*.h",
|
||||||
"Impl/**/*.cpp",
|
"tgcalls/tgcalls/**/*.cpp",
|
||||||
"Impl/**/*.mm",
|
"tgcalls/tgcalls/**/*.mm",
|
||||||
"Impl/**/*.m",
|
"tgcalls/tgcalls/**/*.m",
|
||||||
|
], exclude = [
|
||||||
|
"tgcalls/tgcalls/legacy/**",
|
||||||
|
"tgcalls/tgcalls/platform/tdesktop/**",
|
||||||
|
"tgcalls/tgcalls/platform/windows/**",
|
||||||
]),
|
]),
|
||||||
hdrs = glob([
|
hdrs = glob([
|
||||||
"PublicHeaders/**/*.h",
|
"PublicHeaders/**/*.h",
|
||||||
]),
|
]),
|
||||||
copts = [
|
copts = [
|
||||||
"-I{}/Impl".format(package_name()),
|
"-I{}/tgcalls/tgcalls".format(package_name()),
|
||||||
"-Ithird-party/webrtc/webrtc-ios/src",
|
"-Ithird-party/webrtc/webrtc-ios/src",
|
||||||
"-Ithird-party/webrtc/webrtc-ios/src/third_party/abseil-cpp",
|
"-Ithird-party/webrtc/webrtc-ios/src/third_party/abseil-cpp",
|
||||||
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc",
|
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc",
|
||||||
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/base",
|
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/base",
|
||||||
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/components/renderer/metal",
|
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/components/renderer/metal",
|
||||||
|
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/components/renderer/opengl",
|
||||||
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/components/video_codec",
|
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/components/video_codec",
|
||||||
"-Ithird-party/webrtc/webrtc-ios/src/third_party/libyuv/include",
|
"-Ithird-party/webrtc/webrtc-ios/src/third_party/libyuv/include",
|
||||||
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/api/video_codec",
|
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/api/video_codec",
|
||||||
@ -43,7 +43,6 @@ objc_library(
|
|||||||
deps = [
|
deps = [
|
||||||
"//third-party/webrtc:webrtc_lib",
|
"//third-party/webrtc:webrtc_lib",
|
||||||
"//submodules/MtProtoKit:MtProtoKit",
|
"//submodules/MtProtoKit:MtProtoKit",
|
||||||
"//submodules/Opus:opus",
|
|
||||||
"//submodules/openssl:openssl",
|
"//submodules/openssl:openssl",
|
||||||
],
|
],
|
||||||
sdk_frameworks = [
|
sdk_frameworks = [
|
||||||
|
@ -1,25 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Use of this source code is governed by a BSD-style license
|
|
||||||
* that can be found in the LICENSE file in the root of the source
|
|
||||||
* tree. An additional intellectual property rights grant can be found
|
|
||||||
* in the file PATENTS. All contributing project authors may
|
|
||||||
* be found in the AUTHORS file in the root of the source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#import <Foundation/Foundation.h>
|
|
||||||
|
|
||||||
#import "RTCMacros.h"
|
|
||||||
#import "RTCVideoDecoderFactory.h"
|
|
||||||
|
|
||||||
NS_ASSUME_NONNULL_BEGIN
|
|
||||||
|
|
||||||
/** This decoder factory include support for all codecs bundled with WebRTC. If using custom
|
|
||||||
* codecs, create custom implementations of RTCVideoEncoderFactory and RTCVideoDecoderFactory.
|
|
||||||
*/
|
|
||||||
RTC_OBJC_EXPORT
|
|
||||||
@interface TGRTCDefaultVideoDecoderFactory : NSObject <RTCVideoDecoderFactory>
|
|
||||||
@end
|
|
||||||
|
|
||||||
NS_ASSUME_NONNULL_END
|
|
@ -1,90 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2017 The WebRTC Project Authors. All rights reserved.
|
|
||||||
*
|
|
||||||
* Use of this source code is governed by a BSD-style license
|
|
||||||
* that can be found in the LICENSE file in the root of the source
|
|
||||||
* tree. An additional intellectual property rights grant can be found
|
|
||||||
* in the file PATENTS. All contributing project authors may
|
|
||||||
* be found in the AUTHORS file in the root of the source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#import "TGRTCDefaultVideoDecoderFactory.h"
|
|
||||||
|
|
||||||
#import "RTCH264ProfileLevelId.h"
|
|
||||||
#import "RTCVideoDecoderH264.h"
|
|
||||||
#import "api/video_codec/RTCVideoCodecConstants.h"
|
|
||||||
#import "api/video_codec/RTCVideoDecoderVP8.h"
|
|
||||||
#import "base/RTCVideoCodecInfo.h"
|
|
||||||
#if defined(RTC_ENABLE_VP9)
|
|
||||||
#import "api/video_codec/RTCVideoDecoderVP9.h"
|
|
||||||
#endif
|
|
||||||
#if !defined(DISABLE_H265)
|
|
||||||
#import "RTCH265ProfileLevelId.h"
|
|
||||||
#import "TGRTCVideoDecoderH265.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
@implementation TGRTCDefaultVideoDecoderFactory
|
|
||||||
|
|
||||||
- (NSArray<RTCVideoCodecInfo *> *)supportedCodecs {
|
|
||||||
NSDictionary<NSString *, NSString *> *constrainedHighParams = @{
|
|
||||||
@"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedHigh,
|
|
||||||
@"level-asymmetry-allowed" : @"1",
|
|
||||||
@"packetization-mode" : @"1",
|
|
||||||
};
|
|
||||||
RTCVideoCodecInfo *constrainedHighInfo =
|
|
||||||
[[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH264Name
|
|
||||||
parameters:constrainedHighParams];
|
|
||||||
|
|
||||||
NSDictionary<NSString *, NSString *> *constrainedBaselineParams = @{
|
|
||||||
@"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedBaseline,
|
|
||||||
@"level-asymmetry-allowed" : @"1",
|
|
||||||
@"packetization-mode" : @"1",
|
|
||||||
};
|
|
||||||
RTCVideoCodecInfo *constrainedBaselineInfo =
|
|
||||||
[[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH264Name
|
|
||||||
parameters:constrainedBaselineParams];
|
|
||||||
|
|
||||||
RTCVideoCodecInfo *vp8Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecVp8Name];
|
|
||||||
|
|
||||||
#if defined(RTC_ENABLE_VP9)
|
|
||||||
RTCVideoCodecInfo *vp9Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecVp9Name];
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if !defined(DISABLE_H265)
|
|
||||||
RTCVideoCodecInfo *h265Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH265Name];
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return @[
|
|
||||||
constrainedHighInfo,
|
|
||||||
constrainedBaselineInfo,
|
|
||||||
vp8Info,
|
|
||||||
#if defined(RTC_ENABLE_VP9)
|
|
||||||
vp9Info,
|
|
||||||
#endif
|
|
||||||
#if !defined(DISABLE_H265)
|
|
||||||
h265Info,
|
|
||||||
#endif
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (id<RTCVideoDecoder>)createDecoder:(RTCVideoCodecInfo *)info {
|
|
||||||
if ([info.name isEqualToString:kRTCVideoCodecH264Name]) {
|
|
||||||
return [[RTCVideoDecoderH264 alloc] init];
|
|
||||||
} else if ([info.name isEqualToString:kRTCVideoCodecVp8Name]) {
|
|
||||||
return [RTCVideoDecoderVP8 vp8Decoder];
|
|
||||||
#if defined(RTC_ENABLE_VP9)
|
|
||||||
} else if ([info.name isEqualToString:kRTCVideoCodecVp9Name]) {
|
|
||||||
return [RTCVideoDecoderVP9 vp9Decoder];
|
|
||||||
#endif
|
|
||||||
#if !defined(DISABLE_H265)
|
|
||||||
} else if (@available(iOS 11, *)) {
|
|
||||||
if ([info.name isEqualToString:kRTCVideoCodecH265Name]) {
|
|
||||||
return [[TGRTCVideoDecoderH265 alloc] init];
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
@end
|
|
@ -1,30 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Use of this source code is governed by a BSD-style license
|
|
||||||
* that can be found in the LICENSE file in the root of the source
|
|
||||||
* tree. An additional intellectual property rights grant can be found
|
|
||||||
* in the file PATENTS. All contributing project authors may
|
|
||||||
* be found in the AUTHORS file in the root of the source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#import <Foundation/Foundation.h>
|
|
||||||
|
|
||||||
#import "RTCMacros.h"
|
|
||||||
#import "RTCVideoEncoderFactory.h"
|
|
||||||
|
|
||||||
NS_ASSUME_NONNULL_BEGIN
|
|
||||||
|
|
||||||
/** This encoder factory include support for all codecs bundled with WebRTC. If using custom
|
|
||||||
* codecs, create custom implementations of RTCVideoEncoderFactory and RTCVideoDecoderFactory.
|
|
||||||
*/
|
|
||||||
RTC_OBJC_EXPORT
|
|
||||||
@interface TGRTCDefaultVideoEncoderFactory : NSObject <RTCVideoEncoderFactory>
|
|
||||||
|
|
||||||
@property(nonatomic, retain) RTCVideoCodecInfo *preferredCodec;
|
|
||||||
|
|
||||||
+ (NSArray<RTCVideoCodecInfo *> *)supportedCodecs;
|
|
||||||
|
|
||||||
@end
|
|
||||||
|
|
||||||
NS_ASSUME_NONNULL_END
|
|
@ -1,106 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2017 The WebRTC Project Authors. All rights reserved.
|
|
||||||
*
|
|
||||||
* Use of this source code is governed by a BSD-style license
|
|
||||||
* that can be found in the LICENSE file in the root of the source
|
|
||||||
* tree. An additional intellectual property rights grant can be found
|
|
||||||
* in the file PATENTS. All contributing project authors may
|
|
||||||
* be found in the AUTHORS file in the root of the source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#import "TGRTCDefaultVideoEncoderFactory.h"
|
|
||||||
|
|
||||||
#import "RTCH264ProfileLevelId.h"
|
|
||||||
#import "RTCVideoEncoderH264.h"
|
|
||||||
#import "api/video_codec/RTCVideoCodecConstants.h"
|
|
||||||
#import "api/video_codec/RTCVideoEncoderVP8.h"
|
|
||||||
#import "base/RTCVideoCodecInfo.h"
|
|
||||||
#if defined(RTC_ENABLE_VP9)
|
|
||||||
#import "api/video_codec/RTCVideoEncoderVP9.h"
|
|
||||||
#endif
|
|
||||||
#if !defined(DISABLE_H265)
|
|
||||||
#import "RTCH265ProfileLevelId.h"
|
|
||||||
#import "TGRTCVideoEncoderH265.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
@implementation TGRTCDefaultVideoEncoderFactory
|
|
||||||
|
|
||||||
@synthesize preferredCodec;
|
|
||||||
|
|
||||||
+ (NSArray<RTCVideoCodecInfo *> *)supportedCodecs {
|
|
||||||
NSDictionary<NSString *, NSString *> *constrainedHighParams = @{
|
|
||||||
@"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedHigh,
|
|
||||||
@"level-asymmetry-allowed" : @"1",
|
|
||||||
@"packetization-mode" : @"1",
|
|
||||||
};
|
|
||||||
RTCVideoCodecInfo *constrainedHighInfo =
|
|
||||||
[[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH264Name
|
|
||||||
parameters:constrainedHighParams];
|
|
||||||
|
|
||||||
NSDictionary<NSString *, NSString *> *constrainedBaselineParams = @{
|
|
||||||
@"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedBaseline,
|
|
||||||
@"level-asymmetry-allowed" : @"1",
|
|
||||||
@"packetization-mode" : @"1",
|
|
||||||
};
|
|
||||||
RTCVideoCodecInfo *constrainedBaselineInfo =
|
|
||||||
[[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH264Name
|
|
||||||
parameters:constrainedBaselineParams];
|
|
||||||
|
|
||||||
RTCVideoCodecInfo *vp8Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecVp8Name];
|
|
||||||
|
|
||||||
#if defined(RTC_ENABLE_VP9)
|
|
||||||
RTCVideoCodecInfo *vp9Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecVp9Name];
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#if !defined(DISABLE_H265)
|
|
||||||
RTCVideoCodecInfo *h265Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH265Name];
|
|
||||||
#endif
|
|
||||||
|
|
||||||
return @[
|
|
||||||
constrainedHighInfo,
|
|
||||||
constrainedBaselineInfo,
|
|
||||||
vp8Info,
|
|
||||||
#if defined(RTC_ENABLE_VP9)
|
|
||||||
vp9Info,
|
|
||||||
#endif
|
|
||||||
#if !defined(DISABLE_H265)
|
|
||||||
h265Info,
|
|
||||||
#endif
|
|
||||||
];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (id<RTCVideoEncoder>)createEncoder:(RTCVideoCodecInfo *)info {
|
|
||||||
if ([info.name isEqualToString:kRTCVideoCodecH264Name]) {
|
|
||||||
return [[RTCVideoEncoderH264 alloc] initWithCodecInfo:info];
|
|
||||||
} else if ([info.name isEqualToString:kRTCVideoCodecVp8Name]) {
|
|
||||||
return [RTCVideoEncoderVP8 vp8Encoder];
|
|
||||||
#if defined(RTC_ENABLE_VP9)
|
|
||||||
} else if ([info.name isEqualToString:kRTCVideoCodecVp9Name]) {
|
|
||||||
return [RTCVideoEncoderVP9 vp9Encoder];
|
|
||||||
#endif
|
|
||||||
#if !defined(DISABLE_H265)
|
|
||||||
} else if (@available(iOS 11, *)) {
|
|
||||||
if ([info.name isEqualToString:kRTCVideoCodecH265Name]) {
|
|
||||||
return [[TGRTCVideoEncoderH265 alloc] initWithCodecInfo:info];
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (NSArray<RTCVideoCodecInfo *> *)supportedCodecs {
|
|
||||||
NSMutableArray<RTCVideoCodecInfo *> *codecs = [[[self class] supportedCodecs] mutableCopy];
|
|
||||||
|
|
||||||
NSMutableArray<RTCVideoCodecInfo *> *orderedCodecs = [NSMutableArray array];
|
|
||||||
NSUInteger index = [codecs indexOfObject:self.preferredCodec];
|
|
||||||
if (index != NSNotFound) {
|
|
||||||
[orderedCodecs addObject:[codecs objectAtIndex:index]];
|
|
||||||
[codecs removeObjectAtIndex:index];
|
|
||||||
}
|
|
||||||
[orderedCodecs addObjectsFromArray:codecs];
|
|
||||||
|
|
||||||
return [orderedCodecs copy];
|
|
||||||
}
|
|
||||||
|
|
||||||
@end
|
|
@ -1,19 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Use of this source code is governed by a BSD-style license
|
|
||||||
* that can be found in the LICENSE file in the root of the source
|
|
||||||
* tree. An additional intellectual property rights grant can be found
|
|
||||||
* in the file PATENTS. All contributing project authors may
|
|
||||||
* be found in the AUTHORS file in the root of the source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#import <Foundation/Foundation.h>
|
|
||||||
|
|
||||||
#import "RTCMacros.h"
|
|
||||||
#import "RTCVideoDecoder.h"
|
|
||||||
|
|
||||||
RTC_OBJC_EXPORT
|
|
||||||
API_AVAILABLE(ios(11.0))
|
|
||||||
@interface TGRTCVideoDecoderH265 : NSObject <RTCVideoDecoder>
|
|
||||||
@end
|
|
@ -1,326 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Use of this source code is governed by a BSD-style license
|
|
||||||
* that can be found in the LICENSE file in the root of the source
|
|
||||||
* tree. An additional intellectual property rights grant can be found
|
|
||||||
* in the file PATENTS. All contributing project authors may
|
|
||||||
* be found in the AUTHORS file in the root of the source tree.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#import "TGRTCVideoDecoderH265.h"
|
|
||||||
|
|
||||||
#import <VideoToolbox/VideoToolbox.h>
|
|
||||||
|
|
||||||
#import "base/RTCVideoFrame.h"
|
|
||||||
#import "base/RTCVideoFrameBuffer.h"
|
|
||||||
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
|
|
||||||
#import "helpers.h"
|
|
||||||
#import "helpers/scoped_cftyperef.h"
|
|
||||||
|
|
||||||
#if defined(WEBRTC_IOS)
|
|
||||||
#import "helpers/UIDevice+RTCDevice.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "modules/video_coding/include/video_error_codes.h"
|
|
||||||
#include "rtc_base/checks.h"
|
|
||||||
#include "rtc_base/logging.h"
|
|
||||||
#include "rtc_base/time_utils.h"
|
|
||||||
#include "sdk/objc/components/video_codec/nalu_rewriter.h"
|
|
||||||
|
|
||||||
typedef void (^TGRTCVideoDecoderRequestKeyframeCallback)();
|
|
||||||
|
|
||||||
// Struct that we pass to the decoder per frame to decode. We receive it again
|
|
||||||
// in the decoder callback.
|
|
||||||
struct RTCH265FrameDecodeParams {
|
|
||||||
RTCH265FrameDecodeParams(RTCVideoDecoderCallback cb, int64_t ts, TGRTCVideoDecoderRequestKeyframeCallback requestFrame)
|
|
||||||
: callback(cb), timestamp(ts), requestFrame(requestFrame) {}
|
|
||||||
RTCVideoDecoderCallback callback;
|
|
||||||
int64_t timestamp;
|
|
||||||
TGRTCVideoDecoderRequestKeyframeCallback requestFrame;
|
|
||||||
};
|
|
||||||
|
|
||||||
// This is the callback function that VideoToolbox calls when decode is
|
|
||||||
// complete.
|
|
||||||
static void tg_h265DecompressionOutputCallback(void* decoder,
|
|
||||||
void* params,
|
|
||||||
OSStatus status,
|
|
||||||
VTDecodeInfoFlags infoFlags,
|
|
||||||
CVImageBufferRef imageBuffer,
|
|
||||||
CMTime timestamp,
|
|
||||||
CMTime duration) {
|
|
||||||
std::unique_ptr<RTCH265FrameDecodeParams> decodeParams(
|
|
||||||
reinterpret_cast<RTCH265FrameDecodeParams*>(params));
|
|
||||||
if (status != noErr) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Failed to decode frame. Status: " << status;
|
|
||||||
if (status == -12909) {
|
|
||||||
decodeParams->requestFrame();
|
|
||||||
}
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
// TODO(tkchin): Handle CVO properly.
|
|
||||||
RTCCVPixelBuffer* frameBuffer =
|
|
||||||
[[RTCCVPixelBuffer alloc] initWithPixelBuffer:imageBuffer];
|
|
||||||
RTCVideoFrame* decodedFrame = [[RTCVideoFrame alloc]
|
|
||||||
initWithBuffer:frameBuffer
|
|
||||||
rotation:RTCVideoRotation_0
|
|
||||||
timeStampNs:CMTimeGetSeconds(timestamp) * rtc::kNumNanosecsPerSec];
|
|
||||||
decodedFrame.timeStamp = decodeParams->timestamp;
|
|
||||||
decodeParams->callback(decodedFrame);
|
|
||||||
}
|
|
||||||
|
|
||||||
@interface TGRTCVideoDecoderH265RequestKeyframeHolder : NSObject
|
|
||||||
|
|
||||||
@property (nonatomic) NSLock *lock;
|
|
||||||
@property (nonatomic) bool shouldRequestKeyframe;
|
|
||||||
|
|
||||||
@end
|
|
||||||
|
|
||||||
@implementation TGRTCVideoDecoderH265RequestKeyframeHolder
|
|
||||||
|
|
||||||
- (instancetype)init {
|
|
||||||
self = [super init];
|
|
||||||
if (self != nil) {
|
|
||||||
_lock = [[NSLock alloc] init];
|
|
||||||
}
|
|
||||||
return self;
|
|
||||||
}
|
|
||||||
|
|
||||||
@end
|
|
||||||
|
|
||||||
// Decoder.
|
|
||||||
@implementation TGRTCVideoDecoderH265 {
|
|
||||||
CMVideoFormatDescriptionRef _videoFormat;
|
|
||||||
VTDecompressionSessionRef _decompressionSession;
|
|
||||||
RTCVideoDecoderCallback _callback;
|
|
||||||
TGRTCVideoDecoderH265RequestKeyframeHolder *_requestKeyframeHolder;
|
|
||||||
TGRTCVideoDecoderRequestKeyframeCallback _requestFrame;
|
|
||||||
OSStatus _error;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (instancetype)init {
|
|
||||||
if (self = [super init]) {
|
|
||||||
_requestKeyframeHolder = [[TGRTCVideoDecoderH265RequestKeyframeHolder alloc] init];
|
|
||||||
TGRTCVideoDecoderH265RequestKeyframeHolder *requestKeyframeHolder = _requestKeyframeHolder;
|
|
||||||
_requestFrame = ^{
|
|
||||||
[requestKeyframeHolder.lock lock];
|
|
||||||
requestKeyframeHolder.shouldRequestKeyframe = true;
|
|
||||||
[requestKeyframeHolder.lock unlock];
|
|
||||||
};
|
|
||||||
}
|
|
||||||
|
|
||||||
return self;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)dealloc {
|
|
||||||
[self destroyDecompressionSession];
|
|
||||||
[self setVideoFormat:nullptr];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (NSInteger)startDecodeWithNumberOfCores:(int)numberOfCores {
|
|
||||||
return WEBRTC_VIDEO_CODEC_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (NSInteger)decode:(RTCEncodedImage*)inputImage
|
|
||||||
missingFrames:(BOOL)missingFrames
|
|
||||||
codecSpecificInfo:(__nullable id<RTCCodecSpecificInfo>)info
|
|
||||||
renderTimeMs:(int64_t)renderTimeMs {
|
|
||||||
RTC_DCHECK(inputImage.buffer);
|
|
||||||
|
|
||||||
if (_error != noErr) {
|
|
||||||
RTC_LOG(LS_WARNING) << "Last frame decode failed.";
|
|
||||||
_error = noErr;
|
|
||||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
rtc::ScopedCFTypeRef<CMVideoFormatDescriptionRef> inputFormat =
|
|
||||||
rtc::ScopedCF(webrtc::CreateH265VideoFormatDescription(
|
|
||||||
(uint8_t*)inputImage.buffer.bytes, inputImage.buffer.length));
|
|
||||||
if (inputFormat) {
|
|
||||||
CMVideoDimensions dimensions =
|
|
||||||
CMVideoFormatDescriptionGetDimensions(inputFormat.get());
|
|
||||||
RTC_LOG(LS_INFO) << "Resolution: " << dimensions.width << " x "
|
|
||||||
<< dimensions.height;
|
|
||||||
// Check if the video format has changed, and reinitialize decoder if
|
|
||||||
// needed.
|
|
||||||
if (!CMFormatDescriptionEqual(inputFormat.get(), _videoFormat)) {
|
|
||||||
[self setVideoFormat:inputFormat.get()];
|
|
||||||
int resetDecompressionSessionError = [self resetDecompressionSession];
|
|
||||||
if (resetDecompressionSessionError != WEBRTC_VIDEO_CODEC_OK) {
|
|
||||||
return resetDecompressionSessionError;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (!_videoFormat) {
|
|
||||||
// We received a frame but we don't have format information so we can't
|
|
||||||
// decode it.
|
|
||||||
// This can happen after backgrounding. We need to wait for the next
|
|
||||||
// sps/pps before we can resume so we request a keyframe by returning an
|
|
||||||
// error.
|
|
||||||
RTC_LOG(LS_WARNING) << "Missing video format. Frame with sps/pps required.";
|
|
||||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
|
||||||
}
|
|
||||||
CMSampleBufferRef sampleBuffer = nullptr;
|
|
||||||
if (!webrtc::H265AnnexBBufferToCMSampleBuffer(
|
|
||||||
(uint8_t*)inputImage.buffer.bytes, inputImage.buffer.length,
|
|
||||||
_videoFormat, &sampleBuffer)) {
|
|
||||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
|
||||||
}
|
|
||||||
RTC_DCHECK(sampleBuffer);
|
|
||||||
VTDecodeFrameFlags decodeFlags =
|
|
||||||
kVTDecodeFrame_EnableAsynchronousDecompression;
|
|
||||||
std::unique_ptr<RTCH265FrameDecodeParams> frameDecodeParams;
|
|
||||||
frameDecodeParams.reset(
|
|
||||||
new RTCH265FrameDecodeParams(_callback, inputImage.timeStamp, _requestFrame));
|
|
||||||
OSStatus status = VTDecompressionSessionDecodeFrame(
|
|
||||||
_decompressionSession, sampleBuffer, decodeFlags,
|
|
||||||
frameDecodeParams.release(), nullptr);
|
|
||||||
#if defined(WEBRTC_IOS)
|
|
||||||
// Re-initialize the decoder if we have an invalid session while the app is
|
|
||||||
// active and retry the decode request.
|
|
||||||
if (status == kVTInvalidSessionErr &&
|
|
||||||
[self resetDecompressionSession] == WEBRTC_VIDEO_CODEC_OK) {
|
|
||||||
frameDecodeParams.reset(
|
|
||||||
new RTCH265FrameDecodeParams(_callback, inputImage.timeStamp, _requestFrame));
|
|
||||||
status = VTDecompressionSessionDecodeFrame(
|
|
||||||
_decompressionSession, sampleBuffer, decodeFlags,
|
|
||||||
frameDecodeParams.release(), nullptr);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
CFRelease(sampleBuffer);
|
|
||||||
if (status != noErr) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Failed to decode frame with code: " << status;
|
|
||||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool requestKeyframe = false;
|
|
||||||
|
|
||||||
[_requestKeyframeHolder.lock lock];
|
|
||||||
if (_requestKeyframeHolder.shouldRequestKeyframe) {
|
|
||||||
_requestKeyframeHolder.shouldRequestKeyframe = false;
|
|
||||||
requestKeyframe = true;
|
|
||||||
}
|
|
||||||
[_requestKeyframeHolder.lock unlock];
|
|
||||||
|
|
||||||
if (requestKeyframe) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Decoder asynchronously asked to request keyframe";
|
|
||||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
|
||||||
}
|
|
||||||
|
|
||||||
return WEBRTC_VIDEO_CODEC_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)setCallback:(RTCVideoDecoderCallback)callback {
|
|
||||||
_callback = callback;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (NSInteger)releaseDecoder {
|
|
||||||
// Need to invalidate the session so that callbacks no longer occur and it
|
|
||||||
// is safe to null out the callback.
|
|
||||||
[self destroyDecompressionSession];
|
|
||||||
[self setVideoFormat:nullptr];
|
|
||||||
_callback = nullptr;
|
|
||||||
return WEBRTC_VIDEO_CODEC_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
#pragma mark - Private
|
|
||||||
|
|
||||||
- (int)resetDecompressionSession {
|
|
||||||
[self destroyDecompressionSession];
|
|
||||||
|
|
||||||
// Need to wait for the first SPS to initialize decoder.
|
|
||||||
if (!_videoFormat) {
|
|
||||||
return WEBRTC_VIDEO_CODEC_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Set keys for OpenGL and IOSurface compatibilty, which makes the encoder
|
|
||||||
// create pixel buffers with GPU backed memory. The intent here is to pass
|
|
||||||
// the pixel buffers directly so we avoid a texture upload later during
|
|
||||||
// rendering. This currently is moot because we are converting back to an
|
|
||||||
// I420 frame after decode, but eventually we will be able to plumb
|
|
||||||
// CVPixelBuffers directly to the renderer.
|
|
||||||
// TODO(tkchin): Maybe only set OpenGL/IOSurface keys if we know that that
|
|
||||||
// we can pass CVPixelBuffers as native handles in decoder output.
|
|
||||||
static size_t const attributesSize = 3;
|
|
||||||
CFTypeRef keys[attributesSize] = {
|
|
||||||
#if defined(WEBRTC_IOS)
|
|
||||||
kCVPixelBufferOpenGLESCompatibilityKey,
|
|
||||||
#elif defined(WEBRTC_MAC)
|
|
||||||
kCVPixelBufferOpenGLCompatibilityKey,
|
|
||||||
#endif
|
|
||||||
kCVPixelBufferIOSurfacePropertiesKey,
|
|
||||||
kCVPixelBufferPixelFormatTypeKey
|
|
||||||
};
|
|
||||||
CFDictionaryRef ioSurfaceValue = CreateCFTypeDictionary(nullptr, nullptr, 0);
|
|
||||||
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
|
|
||||||
CFNumberRef pixelFormat =
|
|
||||||
CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
|
|
||||||
CFTypeRef values[attributesSize] = {kCFBooleanTrue, ioSurfaceValue,
|
|
||||||
pixelFormat};
|
|
||||||
CFDictionaryRef attributes =
|
|
||||||
CreateCFTypeDictionary(keys, values, attributesSize);
|
|
||||||
if (ioSurfaceValue) {
|
|
||||||
CFRelease(ioSurfaceValue);
|
|
||||||
ioSurfaceValue = nullptr;
|
|
||||||
}
|
|
||||||
if (pixelFormat) {
|
|
||||||
CFRelease(pixelFormat);
|
|
||||||
pixelFormat = nullptr;
|
|
||||||
}
|
|
||||||
VTDecompressionOutputCallbackRecord record = {
|
|
||||||
tg_h265DecompressionOutputCallback,
|
|
||||||
nullptr,
|
|
||||||
};
|
|
||||||
OSStatus status =
|
|
||||||
VTDecompressionSessionCreate(nullptr, _videoFormat, nullptr, attributes,
|
|
||||||
&record, &_decompressionSession);
|
|
||||||
CFRelease(attributes);
|
|
||||||
if (status != noErr) {
|
|
||||||
[self destroyDecompressionSession];
|
|
||||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
|
||||||
}
|
|
||||||
[self configureDecompressionSession];
|
|
||||||
|
|
||||||
return WEBRTC_VIDEO_CODEC_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)configureDecompressionSession {
|
|
||||||
RTC_DCHECK(_decompressionSession);
|
|
||||||
#if defined(WEBRTC_IOS)
|
|
||||||
// VTSessionSetProperty(_decompressionSession,
|
|
||||||
// kVTDecompressionPropertyKey_RealTime, kCFBooleanTrue);
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)destroyDecompressionSession {
|
|
||||||
if (_decompressionSession) {
|
|
||||||
#if defined(WEBRTC_IOS)
|
|
||||||
if ([UIDevice isIOS11OrLater]) {
|
|
||||||
VTDecompressionSessionWaitForAsynchronousFrames(_decompressionSession);
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
VTDecompressionSessionInvalidate(_decompressionSession);
|
|
||||||
CFRelease(_decompressionSession);
|
|
||||||
_decompressionSession = nullptr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)setVideoFormat:(CMVideoFormatDescriptionRef)videoFormat {
|
|
||||||
if (_videoFormat == videoFormat) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (_videoFormat) {
|
|
||||||
CFRelease(_videoFormat);
|
|
||||||
}
|
|
||||||
_videoFormat = videoFormat;
|
|
||||||
if (_videoFormat) {
|
|
||||||
CFRetain(_videoFormat);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
- (NSString*)implementationName {
|
|
||||||
return @"VideoToolbox";
|
|
||||||
}
|
|
||||||
|
|
||||||
@end
|
|
@ -1,23 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright 2017 The WebRTC project authors. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Use of this source code is governed by a BSD-style license
|
|
||||||
* that can be found in the LICENSE file in the root of the source
|
|
||||||
* tree. An additional intellectual property rights grant can be found
|
|
||||||
* in the file PATENTS. All contributing project authors may
|
|
||||||
* be found in the AUTHORS file in the root of the source tree.
|
|
||||||
*/
|
|
||||||
|
|
||||||
#import <Foundation/Foundation.h>
|
|
||||||
|
|
||||||
#import "RTCMacros.h"
|
|
||||||
#import "RTCVideoCodecInfo.h"
|
|
||||||
#import "RTCVideoEncoder.h"
|
|
||||||
|
|
||||||
RTC_OBJC_EXPORT
|
|
||||||
API_AVAILABLE(ios(11.0))
|
|
||||||
@interface TGRTCVideoEncoderH265 : NSObject <RTCVideoEncoder>
|
|
||||||
|
|
||||||
- (instancetype)initWithCodecInfo:(RTCVideoCodecInfo *)codecInfo;
|
|
||||||
|
|
||||||
@end
|
|
@ -1,613 +0,0 @@
|
|||||||
/*
|
|
||||||
* Copyright (c) 2018 The WebRTC project authors. All Rights Reserved.
|
|
||||||
*
|
|
||||||
* Use of this source code is governed by a BSD-style license
|
|
||||||
* that can be found in the LICENSE file in the root of the source
|
|
||||||
* tree. An additional intellectual property rights grant can be found
|
|
||||||
* in the file PATENTS. All contributing project authors may
|
|
||||||
* be found in the AUTHORS file in the root of the source tree.
|
|
||||||
*
|
|
||||||
*/
|
|
||||||
|
|
||||||
#import "TGRTCVideoEncoderH265.h"
|
|
||||||
|
|
||||||
#import <VideoToolbox/VideoToolbox.h>
|
|
||||||
#include <vector>
|
|
||||||
|
|
||||||
#import "RTCCodecSpecificInfoH265.h"
|
|
||||||
#import "api/peerconnection/RTCRtpFragmentationHeader+Private.h"
|
|
||||||
#import "api/peerconnection/RTCVideoCodecInfo+Private.h"
|
|
||||||
#import "base/RTCI420Buffer.h"
|
|
||||||
#import "base/RTCVideoFrame.h"
|
|
||||||
#import "base/RTCVideoFrameBuffer.h"
|
|
||||||
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
|
|
||||||
#import "helpers.h"
|
|
||||||
#if defined(WEBRTC_IOS)
|
|
||||||
#import "helpers/UIDevice+RTCDevice.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#include "common_video/h264/profile_level_id.h"
|
|
||||||
#include "common_video/h265/h265_bitstream_parser.h"
|
|
||||||
#include "common_video/include/bitrate_adjuster.h"
|
|
||||||
#include "libyuv/convert_from.h"
|
|
||||||
#include "modules/include/module_common_types.h"
|
|
||||||
#include "modules/video_coding/include/video_error_codes.h"
|
|
||||||
#include "rtc_base/buffer.h"
|
|
||||||
#include "rtc_base/logging.h"
|
|
||||||
#include "rtc_base/time_utils.h"
|
|
||||||
#include "sdk/objc/Framework/Classes/VideoToolbox/nalu_rewriter.h"
|
|
||||||
#include "system_wrappers/include/clock.h"
|
|
||||||
|
|
||||||
@interface TGRTCVideoEncoderH265 ()
|
|
||||||
|
|
||||||
- (void)frameWasEncoded:(OSStatus)status
|
|
||||||
flags:(VTEncodeInfoFlags)infoFlags
|
|
||||||
sampleBuffer:(CMSampleBufferRef)sampleBuffer
|
|
||||||
width:(int32_t)width
|
|
||||||
height:(int32_t)height
|
|
||||||
renderTimeMs:(int64_t)renderTimeMs
|
|
||||||
timestamp:(uint32_t)timestamp
|
|
||||||
rotation:(RTCVideoRotation)rotation;
|
|
||||||
|
|
||||||
@end
|
|
||||||
|
|
||||||
namespace { // anonymous namespace
|
|
||||||
|
|
||||||
// The ratio between kVTCompressionPropertyKey_DataRateLimits and
|
|
||||||
// kVTCompressionPropertyKey_AverageBitRate. The data rate limit is set higher
|
|
||||||
// than the average bit rate to avoid undershooting the target.
|
|
||||||
const float kLimitToAverageBitRateFactor = 1.5f;
|
|
||||||
// These thresholds deviate from the default h265 QP thresholds, as they
|
|
||||||
// have been found to work better on devices that support VideoToolbox
|
|
||||||
const int kLowh265QpThreshold = 28;
|
|
||||||
const int kHighh265QpThreshold = 39;
|
|
||||||
|
|
||||||
// Struct that we pass to the encoder per frame to encode. We receive it again
|
|
||||||
// in the encoder callback.
|
|
||||||
struct API_AVAILABLE(ios(11.0)) RTCFrameEncodeParams {
|
|
||||||
RTCFrameEncodeParams(TGRTCVideoEncoderH265* e,
|
|
||||||
int32_t w,
|
|
||||||
int32_t h,
|
|
||||||
int64_t rtms,
|
|
||||||
uint32_t ts,
|
|
||||||
RTCVideoRotation r)
|
|
||||||
: encoder(e),
|
|
||||||
width(w),
|
|
||||||
height(h),
|
|
||||||
render_time_ms(rtms),
|
|
||||||
timestamp(ts),
|
|
||||||
rotation(r) {}
|
|
||||||
|
|
||||||
TGRTCVideoEncoderH265* encoder;
|
|
||||||
int32_t width;
|
|
||||||
int32_t height;
|
|
||||||
int64_t render_time_ms;
|
|
||||||
uint32_t timestamp;
|
|
||||||
RTCVideoRotation rotation;
|
|
||||||
};
|
|
||||||
|
|
||||||
// We receive I420Frames as input, but we need to feed CVPixelBuffers into the
|
|
||||||
// encoder. This performs the copy and format conversion.
|
|
||||||
// TODO(tkchin): See if encoder will accept i420 frames and compare performance.
|
|
||||||
bool CopyVideoFrameToPixelBuffer(id<RTCI420Buffer> frameBuffer,
|
|
||||||
CVPixelBufferRef pixelBuffer) {
|
|
||||||
RTC_DCHECK(pixelBuffer);
|
|
||||||
RTC_DCHECK_EQ(CVPixelBufferGetPixelFormatType(pixelBuffer),
|
|
||||||
kCVPixelFormatType_420YpCbCr8BiPlanarFullRange);
|
|
||||||
RTC_DCHECK_EQ(CVPixelBufferGetHeightOfPlane(pixelBuffer, 0),
|
|
||||||
frameBuffer.height);
|
|
||||||
RTC_DCHECK_EQ(CVPixelBufferGetWidthOfPlane(pixelBuffer, 0),
|
|
||||||
frameBuffer.width);
|
|
||||||
|
|
||||||
CVReturn cvRet = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
|
|
||||||
if (cvRet != kCVReturnSuccess) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Failed to lock base address: " << cvRet;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint8_t* dstY = reinterpret_cast<uint8_t*>(
|
|
||||||
CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0));
|
|
||||||
int dstStrideY = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);
|
|
||||||
uint8_t* dstUV = reinterpret_cast<uint8_t*>(
|
|
||||||
CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1));
|
|
||||||
int dstStrideUV = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 1);
|
|
||||||
// Convert I420 to NV12.
|
|
||||||
int ret = libyuv::I420ToNV12(
|
|
||||||
frameBuffer.dataY, frameBuffer.strideY, frameBuffer.dataU,
|
|
||||||
frameBuffer.strideU, frameBuffer.dataV, frameBuffer.strideV, dstY,
|
|
||||||
dstStrideY, dstUV, dstStrideUV, frameBuffer.width, frameBuffer.height);
|
|
||||||
CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
|
|
||||||
if (ret) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret;
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
CVPixelBufferRef CreatePixelBuffer(CVPixelBufferPoolRef pixel_buffer_pool) {
|
|
||||||
if (!pixel_buffer_pool) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Failed to get pixel buffer pool.";
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
CVPixelBufferRef pixel_buffer;
|
|
||||||
CVReturn ret = CVPixelBufferPoolCreatePixelBuffer(nullptr, pixel_buffer_pool,
|
|
||||||
&pixel_buffer);
|
|
||||||
if (ret != kCVReturnSuccess) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Failed to create pixel buffer: " << ret;
|
|
||||||
// We probably want to drop frames here, since failure probably means
|
|
||||||
// that the pool is empty.
|
|
||||||
return nullptr;
|
|
||||||
}
|
|
||||||
return pixel_buffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
// This is the callback function that VideoToolbox calls when encode is
|
|
||||||
// complete. From inspection this happens on its own queue.
|
|
||||||
void compressionOutputCallback(void* encoder,
|
|
||||||
void* params,
|
|
||||||
OSStatus status,
|
|
||||||
VTEncodeInfoFlags infoFlags,
|
|
||||||
CMSampleBufferRef sampleBuffer)
|
|
||||||
API_AVAILABLE(ios(11.0)) {
|
|
||||||
RTC_CHECK(params);
|
|
||||||
std::unique_ptr<RTCFrameEncodeParams> encodeParams(
|
|
||||||
reinterpret_cast<RTCFrameEncodeParams*>(params));
|
|
||||||
RTC_CHECK(encodeParams->encoder);
|
|
||||||
[encodeParams->encoder frameWasEncoded:status
|
|
||||||
flags:infoFlags
|
|
||||||
sampleBuffer:sampleBuffer
|
|
||||||
width:encodeParams->width
|
|
||||||
height:encodeParams->height
|
|
||||||
renderTimeMs:encodeParams->render_time_ms
|
|
||||||
timestamp:encodeParams->timestamp
|
|
||||||
rotation:encodeParams->rotation];
|
|
||||||
}
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
@implementation TGRTCVideoEncoderH265 {
|
|
||||||
RTCVideoCodecInfo* _codecInfo;
|
|
||||||
std::unique_ptr<webrtc::BitrateAdjuster> _bitrateAdjuster;
|
|
||||||
uint32_t _targetBitrateBps;
|
|
||||||
uint32_t _encoderBitrateBps;
|
|
||||||
CFStringRef _profile;
|
|
||||||
RTCVideoEncoderCallback _callback;
|
|
||||||
int32_t _width;
|
|
||||||
int32_t _height;
|
|
||||||
VTCompressionSessionRef _compressionSession;
|
|
||||||
RTCVideoCodecMode _mode;
|
|
||||||
int framesLeft;
|
|
||||||
|
|
||||||
webrtc::H265BitstreamParser _h265BitstreamParser;
|
|
||||||
std::vector<uint8_t> _nv12ScaleBuffer;
|
|
||||||
}
|
|
||||||
|
|
||||||
// .5 is set as a mininum to prevent overcompensating for large temporary
|
|
||||||
// overshoots. We don't want to degrade video quality too badly.
|
|
||||||
// .95 is set to prevent oscillations. When a lower bitrate is set on the
|
|
||||||
// encoder than previously set, its output seems to have a brief period of
|
|
||||||
// drastically reduced bitrate, so we want to avoid that. In steady state
|
|
||||||
// conditions, 0.95 seems to give us better overall bitrate over long periods
|
|
||||||
// of time.
|
|
||||||
- (instancetype)initWithCodecInfo:(RTCVideoCodecInfo*)codecInfo {
|
|
||||||
if (self = [super init]) {
|
|
||||||
_codecInfo = codecInfo;
|
|
||||||
_bitrateAdjuster.reset(new webrtc::BitrateAdjuster(.5, .95));
|
|
||||||
RTC_CHECK([codecInfo.name isEqualToString:@"H265"]);
|
|
||||||
}
|
|
||||||
return self;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)dealloc {
|
|
||||||
[self destroyCompressionSession];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (NSInteger)startEncodeWithSettings:(RTCVideoEncoderSettings*)settings
|
|
||||||
numberOfCores:(int)numberOfCores {
|
|
||||||
RTC_DCHECK(settings);
|
|
||||||
RTC_DCHECK([settings.name isEqualToString:@"H265"]);
|
|
||||||
|
|
||||||
_width = settings.width;
|
|
||||||
_height = settings.height;
|
|
||||||
_mode = settings.mode;
|
|
||||||
|
|
||||||
// We can only set average bitrate on the HW encoder.
|
|
||||||
_targetBitrateBps = settings.startBitrate;
|
|
||||||
_bitrateAdjuster->SetTargetBitrateBps(_targetBitrateBps);
|
|
||||||
|
|
||||||
// TODO(tkchin): Try setting payload size via
|
|
||||||
// kVTCompressionPropertyKey_Maxh265SliceBytes.
|
|
||||||
|
|
||||||
return [self resetCompressionSession];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (NSInteger)encode:(RTCVideoFrame*)frame
|
|
||||||
codecSpecificInfo:(id<RTCCodecSpecificInfo>)codecSpecificInfo
|
|
||||||
frameTypes:(NSArray<NSNumber*>*)frameTypes {
|
|
||||||
RTC_DCHECK_EQ(frame.width, _width);
|
|
||||||
RTC_DCHECK_EQ(frame.height, _height);
|
|
||||||
if (!_callback || !_compressionSession) {
|
|
||||||
return WEBRTC_VIDEO_CODEC_UNINITIALIZED;
|
|
||||||
}
|
|
||||||
BOOL isKeyframeRequired = NO;
|
|
||||||
|
|
||||||
// Get a pixel buffer from the pool and copy frame data over.
|
|
||||||
CVPixelBufferPoolRef pixelBufferPool =
|
|
||||||
VTCompressionSessionGetPixelBufferPool(_compressionSession);
|
|
||||||
|
|
||||||
#if defined(WEBRTC_IOS)
|
|
||||||
if (!pixelBufferPool) {
|
|
||||||
// Kind of a hack. On backgrounding, the compression session seems to get
|
|
||||||
// invalidated, which causes this pool call to fail when the application
|
|
||||||
// is foregrounded and frames are being sent for encoding again.
|
|
||||||
// Resetting the session when this happens fixes the issue.
|
|
||||||
// In addition we request a keyframe so video can recover quickly.
|
|
||||||
[self resetCompressionSession];
|
|
||||||
pixelBufferPool =
|
|
||||||
VTCompressionSessionGetPixelBufferPool(_compressionSession);
|
|
||||||
isKeyframeRequired = YES;
|
|
||||||
RTC_LOG(LS_INFO) << "Resetting compression session due to invalid pool.";
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
CVPixelBufferRef pixelBuffer = nullptr;
|
|
||||||
if ([frame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) {
|
|
||||||
// Native frame buffer
|
|
||||||
RTCCVPixelBuffer* rtcPixelBuffer = (RTCCVPixelBuffer*)frame.buffer;
|
|
||||||
if (![rtcPixelBuffer requiresCropping]) {
|
|
||||||
// This pixel buffer might have a higher resolution than what the
|
|
||||||
// compression session is configured to. The compression session can
|
|
||||||
// handle that and will output encoded frames in the configured
|
|
||||||
// resolution regardless of the input pixel buffer resolution.
|
|
||||||
pixelBuffer = rtcPixelBuffer.pixelBuffer;
|
|
||||||
CVBufferRetain(pixelBuffer);
|
|
||||||
} else {
|
|
||||||
// Cropping required, we need to crop and scale to a new pixel buffer.
|
|
||||||
pixelBuffer = CreatePixelBuffer(pixelBufferPool);
|
|
||||||
if (!pixelBuffer) {
|
|
||||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
|
||||||
}
|
|
||||||
int dstWidth = CVPixelBufferGetWidth(pixelBuffer);
|
|
||||||
int dstHeight = CVPixelBufferGetHeight(pixelBuffer);
|
|
||||||
if ([rtcPixelBuffer requiresScalingToWidth:dstWidth height:dstHeight]) {
|
|
||||||
int size =
|
|
||||||
[rtcPixelBuffer bufferSizeForCroppingAndScalingToWidth:dstWidth
|
|
||||||
height:dstHeight];
|
|
||||||
_nv12ScaleBuffer.resize(size);
|
|
||||||
} else {
|
|
||||||
_nv12ScaleBuffer.clear();
|
|
||||||
}
|
|
||||||
_nv12ScaleBuffer.shrink_to_fit();
|
|
||||||
if (![rtcPixelBuffer cropAndScaleTo:pixelBuffer
|
|
||||||
withTempBuffer:_nv12ScaleBuffer.data()]) {
|
|
||||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!pixelBuffer) {
|
|
||||||
// We did not have a native frame buffer
|
|
||||||
pixelBuffer = CreatePixelBuffer(pixelBufferPool);
|
|
||||||
if (!pixelBuffer) {
|
|
||||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
|
||||||
}
|
|
||||||
RTC_DCHECK(pixelBuffer);
|
|
||||||
if (!CopyVideoFrameToPixelBuffer([frame.buffer toI420], pixelBuffer)) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Failed to copy frame data.";
|
|
||||||
CVBufferRelease(pixelBuffer);
|
|
||||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Check if we need a keyframe.
|
|
||||||
if (!isKeyframeRequired && frameTypes) {
|
|
||||||
for (NSNumber* frameType in frameTypes) {
|
|
||||||
if ((RTCFrameType)frameType.intValue == RTCFrameTypeVideoFrameKey) {
|
|
||||||
isKeyframeRequired = YES;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
CMTime presentationTimeStamp =
|
|
||||||
CMTimeMake(frame.timeStampNs / rtc::kNumNanosecsPerMillisec, 1000);
|
|
||||||
CFDictionaryRef frameProperties = nullptr;
|
|
||||||
if (isKeyframeRequired) {
|
|
||||||
CFTypeRef keys[] = {kVTEncodeFrameOptionKey_ForceKeyFrame};
|
|
||||||
CFTypeRef values[] = {kCFBooleanTrue};
|
|
||||||
frameProperties = CreateCFTypeDictionary(keys, values, 1);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<RTCFrameEncodeParams> encodeParams;
|
|
||||||
encodeParams.reset(new RTCFrameEncodeParams(
|
|
||||||
self, _width, _height, frame.timeStampNs / rtc::kNumNanosecsPerMillisec,
|
|
||||||
frame.timeStamp, frame.rotation));
|
|
||||||
|
|
||||||
// Update the bitrate if needed.
|
|
||||||
[self setBitrateBps:_bitrateAdjuster->GetAdjustedBitrateBps()];
|
|
||||||
|
|
||||||
OSStatus status = VTCompressionSessionEncodeFrame(
|
|
||||||
_compressionSession, pixelBuffer, presentationTimeStamp, kCMTimeInvalid,
|
|
||||||
frameProperties, encodeParams.release(), nullptr);
|
|
||||||
if (frameProperties) {
|
|
||||||
CFRelease(frameProperties);
|
|
||||||
}
|
|
||||||
if (pixelBuffer) {
|
|
||||||
CVBufferRelease(pixelBuffer);
|
|
||||||
}
|
|
||||||
if (status != noErr) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Failed to encode frame with code: " << status;
|
|
||||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
|
||||||
}
|
|
||||||
return WEBRTC_VIDEO_CODEC_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)setCallback:(RTCVideoEncoderCallback)callback {
|
|
||||||
_callback = callback;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (int)setBitrate:(uint32_t)bitrateKbit framerate:(uint32_t)framerate {
|
|
||||||
_targetBitrateBps = 1000 * bitrateKbit;
|
|
||||||
_bitrateAdjuster->SetTargetBitrateBps(_targetBitrateBps);
|
|
||||||
[self setBitrateBps:_bitrateAdjuster->GetAdjustedBitrateBps()];
|
|
||||||
return WEBRTC_VIDEO_CODEC_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
#pragma mark - Private
|
|
||||||
|
|
||||||
- (NSInteger)releaseEncoder {
|
|
||||||
// Need to destroy so that the session is invalidated and won't use the
|
|
||||||
// callback anymore. Do not remove callback until the session is invalidated
|
|
||||||
// since async encoder callbacks can occur until invalidation.
|
|
||||||
[self destroyCompressionSession];
|
|
||||||
_callback = nullptr;
|
|
||||||
return WEBRTC_VIDEO_CODEC_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (int)resetCompressionSession {
|
|
||||||
[self destroyCompressionSession];
|
|
||||||
|
|
||||||
// Set source image buffer attributes. These attributes will be present on
|
|
||||||
// buffers retrieved from the encoder's pixel buffer pool.
|
|
||||||
const size_t attributesSize = 3;
|
|
||||||
CFTypeRef keys[attributesSize] = {
|
|
||||||
#if defined(WEBRTC_IOS)
|
|
||||||
kCVPixelBufferOpenGLESCompatibilityKey,
|
|
||||||
#elif defined(WEBRTC_MAC)
|
|
||||||
kCVPixelBufferOpenGLCompatibilityKey,
|
|
||||||
#endif
|
|
||||||
kCVPixelBufferIOSurfacePropertiesKey,
|
|
||||||
kCVPixelBufferPixelFormatTypeKey
|
|
||||||
};
|
|
||||||
CFDictionaryRef ioSurfaceValue = CreateCFTypeDictionary(nullptr, nullptr, 0);
|
|
||||||
int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange;
|
|
||||||
CFNumberRef pixelFormat =
|
|
||||||
CFNumberCreate(nullptr, kCFNumberLongType, &nv12type);
|
|
||||||
CFTypeRef values[attributesSize] = {kCFBooleanTrue, ioSurfaceValue,
|
|
||||||
pixelFormat};
|
|
||||||
CFDictionaryRef sourceAttributes =
|
|
||||||
CreateCFTypeDictionary(keys, values, attributesSize);
|
|
||||||
if (ioSurfaceValue) {
|
|
||||||
CFRelease(ioSurfaceValue);
|
|
||||||
ioSurfaceValue = nullptr;
|
|
||||||
}
|
|
||||||
if (pixelFormat) {
|
|
||||||
CFRelease(pixelFormat);
|
|
||||||
pixelFormat = nullptr;
|
|
||||||
}
|
|
||||||
CFMutableDictionaryRef encoder_specs = nullptr;
|
|
||||||
#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
|
|
||||||
// Currently hw accl is supported above 360p on mac, below 360p
|
|
||||||
// the compression session will be created with hw accl disabled.
|
|
||||||
encoder_specs =
|
|
||||||
CFDictionaryCreateMutable(nullptr, 1, &kCFTypeDictionaryKeyCallBacks,
|
|
||||||
&kCFTypeDictionaryValueCallBacks);
|
|
||||||
CFDictionarySetValue(
|
|
||||||
encoder_specs,
|
|
||||||
kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder,
|
|
||||||
kCFBooleanTrue);
|
|
||||||
#endif
|
|
||||||
OSStatus status = VTCompressionSessionCreate(
|
|
||||||
nullptr, // use default allocator
|
|
||||||
_width, _height, kCMVideoCodecType_HEVC,
|
|
||||||
encoder_specs, // use hardware accelerated encoder if available
|
|
||||||
sourceAttributes,
|
|
||||||
nullptr, // use default compressed data allocator
|
|
||||||
compressionOutputCallback, nullptr, &_compressionSession);
|
|
||||||
if (sourceAttributes) {
|
|
||||||
CFRelease(sourceAttributes);
|
|
||||||
sourceAttributes = nullptr;
|
|
||||||
}
|
|
||||||
if (encoder_specs) {
|
|
||||||
CFRelease(encoder_specs);
|
|
||||||
encoder_specs = nullptr;
|
|
||||||
}
|
|
||||||
if (status != noErr) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Failed to create compression session: " << status;
|
|
||||||
return WEBRTC_VIDEO_CODEC_ERROR;
|
|
||||||
}
|
|
||||||
#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS)
|
|
||||||
CFBooleanRef hwaccl_enabled = nullptr;
|
|
||||||
status = VTSessionCopyProperty(
|
|
||||||
_compressionSession,
|
|
||||||
kVTCompressionPropertyKey_UsingHardwareAcceleratedVideoEncoder, nullptr,
|
|
||||||
&hwaccl_enabled);
|
|
||||||
if (status == noErr && (CFBooleanGetValue(hwaccl_enabled))) {
|
|
||||||
RTC_LOG(LS_INFO) << "Compression session created with hw accl enabled";
|
|
||||||
} else {
|
|
||||||
RTC_LOG(LS_INFO) << "Compression session created with hw accl disabled";
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
[self configureCompressionSession];
|
|
||||||
return WEBRTC_VIDEO_CODEC_OK;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)configureCompressionSession {
|
|
||||||
RTC_DCHECK(_compressionSession);
|
|
||||||
SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_RealTime,
|
|
||||||
false);
|
|
||||||
// SetVTSessionProperty(_compressionSession,
|
|
||||||
// kVTCompressionPropertyKey_ProfileLevel, _profile);
|
|
||||||
SetVTSessionProperty(_compressionSession,
|
|
||||||
kVTCompressionPropertyKey_AllowFrameReordering, false);
|
|
||||||
[self setEncoderBitrateBps:_targetBitrateBps];
|
|
||||||
// TODO(tkchin): Look at entropy mode and colorspace matrices.
|
|
||||||
// TODO(tkchin): Investigate to see if there's any way to make this work.
|
|
||||||
// May need it to interop with Android. Currently this call just fails.
|
|
||||||
// On inspecting encoder output on iOS8, this value is set to 6.
|
|
||||||
// internal::SetVTSessionProperty(compression_session_,
|
|
||||||
// kVTCompressionPropertyKey_MaxFrameDelayCount,
|
|
||||||
// 1);
|
|
||||||
|
|
||||||
// Set a relatively large value for keyframe emission (7200 frames or 4
|
|
||||||
// minutes).
|
|
||||||
SetVTSessionProperty(_compressionSession,
|
|
||||||
kVTCompressionPropertyKey_MaxKeyFrameInterval, 7200);
|
|
||||||
SetVTSessionProperty(_compressionSession,
|
|
||||||
kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration,
|
|
||||||
240);
|
|
||||||
OSStatus status =
|
|
||||||
VTCompressionSessionPrepareToEncodeFrames(_compressionSession);
|
|
||||||
if (status != noErr) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Compression session failed to prepare encode frames.";
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)destroyCompressionSession {
|
|
||||||
if (_compressionSession) {
|
|
||||||
VTCompressionSessionInvalidate(_compressionSession);
|
|
||||||
CFRelease(_compressionSession);
|
|
||||||
_compressionSession = nullptr;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
- (NSString*)implementationName {
|
|
||||||
return @"VideoToolbox";
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)setBitrateBps:(uint32_t)bitrateBps {
|
|
||||||
if (_encoderBitrateBps != bitrateBps) {
|
|
||||||
[self setEncoderBitrateBps:bitrateBps];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)setEncoderBitrateBps:(uint32_t)bitrateBps {
|
|
||||||
if (_compressionSession) {
|
|
||||||
SetVTSessionProperty(_compressionSession,
|
|
||||||
kVTCompressionPropertyKey_AverageBitRate, bitrateBps);
|
|
||||||
|
|
||||||
// TODO(tkchin): Add a helper method to set array value.
|
|
||||||
int64_t dataLimitBytesPerSecondValue =
|
|
||||||
static_cast<int64_t>(bitrateBps * kLimitToAverageBitRateFactor / 8);
|
|
||||||
CFNumberRef bytesPerSecond =
|
|
||||||
CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type,
|
|
||||||
&dataLimitBytesPerSecondValue);
|
|
||||||
int64_t oneSecondValue = 1;
|
|
||||||
CFNumberRef oneSecond = CFNumberCreate(
|
|
||||||
kCFAllocatorDefault, kCFNumberSInt64Type, &oneSecondValue);
|
|
||||||
const void* nums[2] = {bytesPerSecond, oneSecond};
|
|
||||||
CFArrayRef dataRateLimits =
|
|
||||||
CFArrayCreate(nullptr, nums, 2, &kCFTypeArrayCallBacks);
|
|
||||||
OSStatus status = VTSessionSetProperty(
|
|
||||||
_compressionSession, kVTCompressionPropertyKey_DataRateLimits,
|
|
||||||
dataRateLimits);
|
|
||||||
if (bytesPerSecond) {
|
|
||||||
CFRelease(bytesPerSecond);
|
|
||||||
}
|
|
||||||
if (oneSecond) {
|
|
||||||
CFRelease(oneSecond);
|
|
||||||
}
|
|
||||||
if (dataRateLimits) {
|
|
||||||
CFRelease(dataRateLimits);
|
|
||||||
}
|
|
||||||
if (status != noErr) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Failed to set data rate limit";
|
|
||||||
}
|
|
||||||
|
|
||||||
_encoderBitrateBps = bitrateBps;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)frameWasEncoded:(OSStatus)status
|
|
||||||
flags:(VTEncodeInfoFlags)infoFlags
|
|
||||||
sampleBuffer:(CMSampleBufferRef)sampleBuffer
|
|
||||||
width:(int32_t)width
|
|
||||||
height:(int32_t)height
|
|
||||||
renderTimeMs:(int64_t)renderTimeMs
|
|
||||||
timestamp:(uint32_t)timestamp
|
|
||||||
rotation:(RTCVideoRotation)rotation {
|
|
||||||
if (status != noErr) {
|
|
||||||
RTC_LOG(LS_ERROR) << "h265 encode failed.";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (infoFlags & kVTEncodeInfo_FrameDropped) {
|
|
||||||
RTC_LOG(LS_INFO) << "h265 encoder dropped a frame.";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
BOOL isKeyframe = NO;
|
|
||||||
CFArrayRef attachments =
|
|
||||||
CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, 0);
|
|
||||||
if (attachments != nullptr && CFArrayGetCount(attachments)) {
|
|
||||||
CFDictionaryRef attachment =
|
|
||||||
static_cast<CFDictionaryRef>(CFArrayGetValueAtIndex(attachments, 0));
|
|
||||||
isKeyframe =
|
|
||||||
!CFDictionaryContainsKey(attachment, kCMSampleAttachmentKey_NotSync);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (isKeyframe) {
|
|
||||||
RTC_LOG(LS_INFO) << "Generated keyframe";
|
|
||||||
}
|
|
||||||
|
|
||||||
// Convert the sample buffer into a buffer suitable for RTP packetization.
|
|
||||||
// TODO(tkchin): Allocate buffers through a pool.
|
|
||||||
std::unique_ptr<rtc::Buffer> buffer(new rtc::Buffer());
|
|
||||||
RTCRtpFragmentationHeader* header;
|
|
||||||
{
|
|
||||||
std::unique_ptr<webrtc::RTPFragmentationHeader> header_cpp;
|
|
||||||
bool result = H265CMSampleBufferToAnnexBBuffer(sampleBuffer, isKeyframe,
|
|
||||||
buffer.get(), &header_cpp);
|
|
||||||
header = [[RTCRtpFragmentationHeader alloc]
|
|
||||||
initWithNativeFragmentationHeader:header_cpp.get()];
|
|
||||||
if (!result) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Failed to convert sample buffer.";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
RTCEncodedImage* frame = [[RTCEncodedImage alloc] init];
|
|
||||||
frame.buffer = [NSData dataWithBytesNoCopy:buffer->data()
|
|
||||||
length:buffer->size()
|
|
||||||
freeWhenDone:NO];
|
|
||||||
frame.encodedWidth = width;
|
|
||||||
frame.encodedHeight = height;
|
|
||||||
frame.completeFrame = YES;
|
|
||||||
frame.frameType =
|
|
||||||
isKeyframe ? RTCFrameTypeVideoFrameKey : RTCFrameTypeVideoFrameDelta;
|
|
||||||
frame.captureTimeMs = renderTimeMs;
|
|
||||||
frame.timeStamp = timestamp;
|
|
||||||
frame.rotation = rotation;
|
|
||||||
frame.contentType = (_mode == RTCVideoCodecModeScreensharing)
|
|
||||||
? RTCVideoContentTypeScreenshare
|
|
||||||
: RTCVideoContentTypeUnspecified;
|
|
||||||
frame.flags = webrtc::VideoSendTiming::kInvalid;
|
|
||||||
|
|
||||||
int qp;
|
|
||||||
_h265BitstreamParser.ParseBitstream(buffer->data(), buffer->size());
|
|
||||||
_h265BitstreamParser.GetLastSliceQp(&qp);
|
|
||||||
frame.qp = @(qp);
|
|
||||||
|
|
||||||
BOOL res = _callback(frame, [[RTCCodecSpecificInfoH265 alloc] init], header);
|
|
||||||
if (!res) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Encode callback failed.";
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
_bitrateAdjuster->Update(frame.buffer.length);
|
|
||||||
}
|
|
||||||
|
|
||||||
- (RTCVideoEncoderQpThresholds*)scalingSettings {
|
|
||||||
return [[RTCVideoEncoderQpThresholds alloc]
|
|
||||||
initWithThresholdsLow:kLowh265QpThreshold
|
|
||||||
high:kHighh265QpThreshold];
|
|
||||||
}
|
|
||||||
|
|
||||||
@end
|
|
@ -1,31 +0,0 @@
|
|||||||
#ifndef CODECS_APPLE_H
|
|
||||||
#define CODECS_APPLE_H
|
|
||||||
|
|
||||||
#include "rtc_base/thread.h"
|
|
||||||
#include "api/video_codecs/video_encoder_factory.h"
|
|
||||||
#include "api/video_codecs/video_decoder_factory.h"
|
|
||||||
#include "api/media_stream_interface.h"
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
namespace TGVOIP_NAMESPACE {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
class VideoCapturerInterface {
|
|
||||||
public:
|
|
||||||
virtual ~VideoCapturerInterface();
|
|
||||||
|
|
||||||
virtual void setIsEnabled(bool isEnabled) = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
void configurePlatformAudio();
|
|
||||||
std::unique_ptr<webrtc::VideoEncoderFactory> makeVideoEncoderFactory();
|
|
||||||
std::unique_ptr<webrtc::VideoDecoderFactory> makeVideoDecoderFactory();
|
|
||||||
bool supportsH265Encoding();
|
|
||||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread);
|
|
||||||
std::unique_ptr<VideoCapturerInterface> makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(bool)> isActiveUpdated);
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
@ -1,224 +0,0 @@
|
|||||||
#import "CodecsApple.h"
|
|
||||||
|
|
||||||
#include "absl/strings/match.h"
|
|
||||||
#include "api/audio_codecs/audio_decoder_factory_template.h"
|
|
||||||
#include "api/audio_codecs/audio_encoder_factory_template.h"
|
|
||||||
#include "api/audio_codecs/opus/audio_decoder_opus.h"
|
|
||||||
#include "api/audio_codecs/opus/audio_encoder_opus.h"
|
|
||||||
#include "api/rtp_parameters.h"
|
|
||||||
#include "api/task_queue/default_task_queue_factory.h"
|
|
||||||
#include "media/base/codec.h"
|
|
||||||
#include "media/base/media_constants.h"
|
|
||||||
#include "media/engine/webrtc_media_engine.h"
|
|
||||||
#include "modules/audio_device/include/audio_device_default.h"
|
|
||||||
#include "rtc_base/task_utils/repeating_task.h"
|
|
||||||
#include "system_wrappers/include/field_trial.h"
|
|
||||||
#include "api/video/builtin_video_bitrate_allocator_factory.h"
|
|
||||||
#include "api/video/video_bitrate_allocation.h"
|
|
||||||
|
|
||||||
#include "Apple/TGRTCDefaultVideoEncoderFactory.h"
|
|
||||||
#include "Apple/TGRTCDefaultVideoDecoderFactory.h"
|
|
||||||
#include "sdk/objc/native/api/video_encoder_factory.h"
|
|
||||||
#include "sdk/objc/native/api/video_decoder_factory.h"
|
|
||||||
|
|
||||||
#include "sdk/objc/native/src/objc_video_track_source.h"
|
|
||||||
#include "api/video_track_source_proxy.h"
|
|
||||||
#include "sdk/objc/api/RTCVideoRendererAdapter.h"
|
|
||||||
#include "sdk/objc/native/api/video_frame.h"
|
|
||||||
#if defined(WEBRTC_IOS)
|
|
||||||
#include "sdk/objc/components/audio/RTCAudioSession.h"
|
|
||||||
#endif
|
|
||||||
#include "api/media_types.h"
|
|
||||||
|
|
||||||
#import "VideoCameraCapturer.h"
|
|
||||||
|
|
||||||
#import <AVFoundation/AVFoundation.h>
|
|
||||||
|
|
||||||
@interface VideoCapturerInterfaceImplReference : NSObject {
|
|
||||||
VideoCameraCapturer *_videoCapturer;
|
|
||||||
}
|
|
||||||
|
|
||||||
@end
|
|
||||||
|
|
||||||
@implementation VideoCapturerInterfaceImplReference
|
|
||||||
|
|
||||||
- (instancetype)initWithSource:(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source useFrontCamera:(bool)useFrontCamera isActiveUpdated:(void (^)(bool))isActiveUpdated {
|
|
||||||
self = [super init];
|
|
||||||
if (self != nil) {
|
|
||||||
assert([NSThread isMainThread]);
|
|
||||||
|
|
||||||
_videoCapturer = [[VideoCameraCapturer alloc] initWithSource:source isActiveUpdated:isActiveUpdated];
|
|
||||||
|
|
||||||
AVCaptureDevice *selectedCamera = nil;
|
|
||||||
|
|
||||||
#if TARGET_OS_IOS
|
|
||||||
AVCaptureDevice *frontCamera = nil;
|
|
||||||
AVCaptureDevice *backCamera = nil;
|
|
||||||
for (AVCaptureDevice *device in [VideoCameraCapturer captureDevices]) {
|
|
||||||
if (device.position == AVCaptureDevicePositionFront) {
|
|
||||||
frontCamera = device;
|
|
||||||
} else if (device.position == AVCaptureDevicePositionBack) {
|
|
||||||
backCamera = device;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if (useFrontCamera && frontCamera != nil) {
|
|
||||||
selectedCamera = frontCamera;
|
|
||||||
} else {
|
|
||||||
selectedCamera = backCamera;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
selectedCamera = [VideoCameraCapturer captureDevices].firstObject;
|
|
||||||
#endif
|
|
||||||
// NSLog(@"%@", selectedCamera);
|
|
||||||
if (selectedCamera == nil) {
|
|
||||||
return nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
NSArray<AVCaptureDeviceFormat *> *sortedFormats = [[VideoCameraCapturer supportedFormatsForDevice:selectedCamera] sortedArrayUsingComparator:^NSComparisonResult(AVCaptureDeviceFormat* lhs, AVCaptureDeviceFormat *rhs) {
|
|
||||||
int32_t width1 = CMVideoFormatDescriptionGetDimensions(lhs.formatDescription).width;
|
|
||||||
int32_t width2 = CMVideoFormatDescriptionGetDimensions(rhs.formatDescription).width;
|
|
||||||
return width1 < width2 ? NSOrderedAscending : NSOrderedDescending;
|
|
||||||
}];
|
|
||||||
|
|
||||||
AVCaptureDeviceFormat *bestFormat = sortedFormats.firstObject;
|
|
||||||
for (AVCaptureDeviceFormat *format in sortedFormats) {
|
|
||||||
CMVideoDimensions dimensions = CMVideoFormatDescriptionGetDimensions(format.formatDescription);
|
|
||||||
if (dimensions.width >= 1000 || dimensions.height >= 1000) {
|
|
||||||
bestFormat = format;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (bestFormat == nil) {
|
|
||||||
assert(false);
|
|
||||||
return nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
AVFrameRateRange *frameRateRange = [[bestFormat.videoSupportedFrameRateRanges sortedArrayUsingComparator:^NSComparisonResult(AVFrameRateRange *lhs, AVFrameRateRange *rhs) {
|
|
||||||
if (lhs.maxFrameRate < rhs.maxFrameRate) {
|
|
||||||
return NSOrderedAscending;
|
|
||||||
} else {
|
|
||||||
return NSOrderedDescending;
|
|
||||||
}
|
|
||||||
}] lastObject];
|
|
||||||
|
|
||||||
if (frameRateRange == nil) {
|
|
||||||
assert(false);
|
|
||||||
return nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
[_videoCapturer startCaptureWithDevice:selectedCamera format:bestFormat fps:30];
|
|
||||||
}
|
|
||||||
return self;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)dealloc {
|
|
||||||
assert([NSThread isMainThread]);
|
|
||||||
|
|
||||||
[_videoCapturer stopCapture];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)setIsEnabled:(bool)isEnabled {
|
|
||||||
[_videoCapturer setIsEnabled:isEnabled];
|
|
||||||
}
|
|
||||||
|
|
||||||
@end
|
|
||||||
|
|
||||||
@interface VideoCapturerInterfaceImplHolder : NSObject
|
|
||||||
|
|
||||||
@property (nonatomic) void *reference;
|
|
||||||
|
|
||||||
@end
|
|
||||||
|
|
||||||
@implementation VideoCapturerInterfaceImplHolder
|
|
||||||
|
|
||||||
@end
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
namespace TGVOIP_NAMESPACE {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
class VideoCapturerInterfaceImpl: public VideoCapturerInterface {
|
|
||||||
public:
|
|
||||||
VideoCapturerInterfaceImpl(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(bool)> isActiveUpdated) :
|
|
||||||
_source(source) {
|
|
||||||
_implReference = [[VideoCapturerInterfaceImplHolder alloc] init];
|
|
||||||
VideoCapturerInterfaceImplHolder *implReference = _implReference;
|
|
||||||
dispatch_async(dispatch_get_main_queue(), ^{
|
|
||||||
VideoCapturerInterfaceImplReference *value = [[VideoCapturerInterfaceImplReference alloc] initWithSource:source useFrontCamera:useFrontCamera isActiveUpdated:^(bool isActive) {
|
|
||||||
isActiveUpdated(isActive);
|
|
||||||
}];
|
|
||||||
if (value != nil) {
|
|
||||||
implReference.reference = (void *)CFBridgingRetain(value);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual ~VideoCapturerInterfaceImpl() {
|
|
||||||
VideoCapturerInterfaceImplHolder *implReference = _implReference;
|
|
||||||
dispatch_async(dispatch_get_main_queue(), ^{
|
|
||||||
if (implReference.reference != nil) {
|
|
||||||
CFBridgingRelease(implReference.reference);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void setIsEnabled(bool isEnabled) {
|
|
||||||
VideoCapturerInterfaceImplHolder *implReference = _implReference;
|
|
||||||
dispatch_async(dispatch_get_main_queue(), ^{
|
|
||||||
if (implReference.reference != nil) {
|
|
||||||
VideoCapturerInterfaceImplReference *reference = (__bridge VideoCapturerInterfaceImplReference *)implReference.reference;
|
|
||||||
[reference setIsEnabled:isEnabled];
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> _source;
|
|
||||||
VideoCapturerInterfaceImplHolder *_implReference;
|
|
||||||
};
|
|
||||||
|
|
||||||
VideoCapturerInterface::~VideoCapturerInterface() {
|
|
||||||
}
|
|
||||||
|
|
||||||
void configurePlatformAudio() {
|
|
||||||
//[RTCAudioSession sharedInstance].useManualAudio = true;
|
|
||||||
//[RTCAudioSession sharedInstance].isAudioEnabled = true;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<webrtc::VideoEncoderFactory> makeVideoEncoderFactory() {
|
|
||||||
return webrtc::ObjCToNativeVideoEncoderFactory([[TGRTCDefaultVideoEncoderFactory alloc] init]);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<webrtc::VideoDecoderFactory> makeVideoDecoderFactory() {
|
|
||||||
return webrtc::ObjCToNativeVideoDecoderFactory([[TGRTCDefaultVideoDecoderFactory alloc] init]);
|
|
||||||
}
|
|
||||||
|
|
||||||
bool supportsH265Encoding() {
|
|
||||||
#if TARGET_OS_IOS
|
|
||||||
if (@available(iOS 11.0, *)) {
|
|
||||||
return [[AVAssetExportSession allExportPresets] containsObject:AVAssetExportPresetHEVCHighestQuality];
|
|
||||||
} else {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
#else
|
|
||||||
if (@available(macOS 10.13, *)) {
|
|
||||||
return [[AVAssetExportSession allExportPresets] containsObject:AVAssetExportPresetHEVCHighestQuality];
|
|
||||||
} else {
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
}
|
|
||||||
|
|
||||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread) {
|
|
||||||
rtc::scoped_refptr<webrtc::ObjCVideoTrackSource> objCVideoTrackSource(new rtc::RefCountedObject<webrtc::ObjCVideoTrackSource>());
|
|
||||||
return webrtc::VideoTrackSourceProxy::Create(signalingThread, workerThread, objCVideoTrackSource);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::unique_ptr<VideoCapturerInterface> makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera, std::function<void(bool)> isActiveUpdated) {
|
|
||||||
return std::make_unique<VideoCapturerInterfaceImpl>(source, useFrontCamera, isActiveUpdated);
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -1,266 +0,0 @@
|
|||||||
#include "Manager.h"
|
|
||||||
|
|
||||||
#include "rtc_base/byte_buffer.h"
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
namespace TGVOIP_NAMESPACE {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static rtc::Thread *makeNetworkThread() {
|
|
||||||
static std::unique_ptr<rtc::Thread> value = rtc::Thread::CreateWithSocketServer();
|
|
||||||
value->SetName("WebRTC-Network", nullptr);
|
|
||||||
value->Start();
|
|
||||||
return value.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static rtc::Thread *getNetworkThread() {
|
|
||||||
static rtc::Thread *value = makeNetworkThread();
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
static rtc::Thread *makeMediaThread() {
|
|
||||||
static std::unique_ptr<rtc::Thread> value = rtc::Thread::Create();
|
|
||||||
value->SetName("WebRTC-Media", nullptr);
|
|
||||||
value->Start();
|
|
||||||
return value.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
rtc::Thread *Manager::getMediaThread() {
|
|
||||||
static rtc::Thread *value = makeMediaThread();
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
Manager::Manager(
|
|
||||||
rtc::Thread *thread,
|
|
||||||
TgVoipEncryptionKey encryptionKey,
|
|
||||||
bool enableP2P,
|
|
||||||
std::vector<TgVoipRtcServer> const &rtcServers,
|
|
||||||
std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture,
|
|
||||||
std::function<void (const TgVoipState &, VideoState)> stateUpdated,
|
|
||||||
std::function<void (bool)> remoteVideoIsActiveUpdated,
|
|
||||||
std::function<void (const std::vector<uint8_t> &)> signalingDataEmitted
|
|
||||||
) :
|
|
||||||
_thread(thread),
|
|
||||||
_encryptionKey(encryptionKey),
|
|
||||||
_enableP2P(enableP2P),
|
|
||||||
_rtcServers(rtcServers),
|
|
||||||
_videoCapture(videoCapture),
|
|
||||||
_stateUpdated(stateUpdated),
|
|
||||||
_remoteVideoIsActiveUpdated(remoteVideoIsActiveUpdated),
|
|
||||||
_signalingDataEmitted(signalingDataEmitted),
|
|
||||||
_state(TgVoipState::Reconnecting),
|
|
||||||
_videoState(VideoState::possible) {
|
|
||||||
assert(_thread->IsCurrent());
|
|
||||||
if (videoCapture != nullptr) {
|
|
||||||
_videoState = VideoState::outgoingRequested;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
Manager::~Manager() {
|
|
||||||
assert(_thread->IsCurrent());
|
|
||||||
}
|
|
||||||
|
|
||||||
void Manager::start() {
|
|
||||||
if (_videoCapture != nullptr) {
|
|
||||||
_videoState = VideoState::active;
|
|
||||||
}
|
|
||||||
auto weakThis = std::weak_ptr<Manager>(shared_from_this());
|
|
||||||
_networkManager.reset(new ThreadLocalObject<NetworkManager>(getNetworkThread(), [encryptionKey = _encryptionKey, enableP2P = _enableP2P, rtcServers = _rtcServers, thread = _thread, weakThis, signalingDataEmitted = _signalingDataEmitted]() {
|
|
||||||
return new NetworkManager(
|
|
||||||
getNetworkThread(),
|
|
||||||
encryptionKey,
|
|
||||||
enableP2P,
|
|
||||||
rtcServers,
|
|
||||||
[thread, weakThis](const NetworkManager::State &state) {
|
|
||||||
thread->PostTask(RTC_FROM_HERE, [weakThis, state]() {
|
|
||||||
auto strongThis = weakThis.lock();
|
|
||||||
if (strongThis == nullptr) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
TgVoipState mappedState;
|
|
||||||
if (state.isReadyToSendData) {
|
|
||||||
mappedState = TgVoipState::Estabilished;
|
|
||||||
if (!strongThis->_didConnectOnce) {
|
|
||||||
strongThis->_didConnectOnce = true;
|
|
||||||
if (strongThis->_videoState == VideoState::outgoingRequested) {
|
|
||||||
strongThis->_videoState = VideoState::active;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
mappedState = TgVoipState::Reconnecting;
|
|
||||||
}
|
|
||||||
strongThis->_state = mappedState;
|
|
||||||
strongThis->_stateUpdated(mappedState, strongThis->_videoState);
|
|
||||||
|
|
||||||
strongThis->_mediaManager->perform([state](MediaManager *mediaManager) {
|
|
||||||
mediaManager->setIsConnected(state.isReadyToSendData);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
},
|
|
||||||
[thread, weakThis](const rtc::CopyOnWriteBuffer &packet) {
|
|
||||||
thread->PostTask(RTC_FROM_HERE, [weakThis, packet]() {
|
|
||||||
auto strongThis = weakThis.lock();
|
|
||||||
if (strongThis == nullptr) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
strongThis->_mediaManager->perform([packet](MediaManager *mediaManager) {
|
|
||||||
mediaManager->receivePacket(packet);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
},
|
|
||||||
[signalingDataEmitted](const std::vector<uint8_t> &data) {
|
|
||||||
rtc::CopyOnWriteBuffer buffer;
|
|
||||||
uint8_t mode = 3;
|
|
||||||
buffer.AppendData(&mode, 1);
|
|
||||||
buffer.AppendData(data.data(), data.size());
|
|
||||||
std::vector<uint8_t> augmentedData;
|
|
||||||
augmentedData.resize(buffer.size());
|
|
||||||
memcpy(augmentedData.data(), buffer.data(), buffer.size());
|
|
||||||
signalingDataEmitted(augmentedData);
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}));
|
|
||||||
bool isOutgoing = _encryptionKey.isOutgoing;
|
|
||||||
_mediaManager.reset(new ThreadLocalObject<MediaManager>(getMediaThread(), [isOutgoing, thread = _thread, videoCapture = _videoCapture, weakThis]() {
|
|
||||||
return new MediaManager(
|
|
||||||
getMediaThread(),
|
|
||||||
isOutgoing,
|
|
||||||
videoCapture,
|
|
||||||
[thread, weakThis](const rtc::CopyOnWriteBuffer &packet) {
|
|
||||||
thread->PostTask(RTC_FROM_HERE, [weakThis, packet]() {
|
|
||||||
auto strongThis = weakThis.lock();
|
|
||||||
if (strongThis == nullptr) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
strongThis->_networkManager->perform([packet](NetworkManager *networkManager) {
|
|
||||||
networkManager->sendPacket(packet);
|
|
||||||
});
|
|
||||||
});
|
|
||||||
},
|
|
||||||
[thread, weakThis](bool isActive) {
|
|
||||||
thread->PostTask(RTC_FROM_HERE, [weakThis, isActive]() {
|
|
||||||
auto strongThis = weakThis.lock();
|
|
||||||
if (strongThis == nullptr) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
strongThis->notifyIsLocalVideoActive(isActive);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
|
|
||||||
void Manager::receiveSignalingData(const std::vector<uint8_t> &data) {
|
|
||||||
rtc::CopyOnWriteBuffer buffer;
|
|
||||||
buffer.AppendData(data.data(), data.size());
|
|
||||||
|
|
||||||
if (buffer.size() < 1) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
rtc::ByteBufferReader reader((const char *)buffer.data(), buffer.size());
|
|
||||||
uint8_t mode = 0;
|
|
||||||
if (!reader.ReadUInt8(&mode)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mode == 1) {
|
|
||||||
if (_videoState == VideoState::possible) {
|
|
||||||
_videoState = VideoState::incomingRequested;
|
|
||||||
_stateUpdated(_state, _videoState);
|
|
||||||
}
|
|
||||||
} else if (mode == 2) {
|
|
||||||
if (_videoState == VideoState::outgoingRequested) {
|
|
||||||
_videoState = VideoState::active;
|
|
||||||
_stateUpdated(_state, _videoState);
|
|
||||||
|
|
||||||
_mediaManager->perform([videoCapture = _videoCapture](MediaManager *mediaManager) {
|
|
||||||
mediaManager->setSendVideo(videoCapture);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
} else if (mode == 3) {
|
|
||||||
auto candidatesData = buffer.Slice(1, buffer.size() - 1);
|
|
||||||
_networkManager->perform([candidatesData](NetworkManager *networkManager) {
|
|
||||||
networkManager->receiveSignalingData(candidatesData);
|
|
||||||
});
|
|
||||||
} else if (mode == 4) {
|
|
||||||
uint8_t value = 0;
|
|
||||||
if (reader.ReadUInt8(&value)) {
|
|
||||||
_remoteVideoIsActiveUpdated(value != 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Manager::requestVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture) {
|
|
||||||
if (videoCapture != nullptr) {
|
|
||||||
_videoCapture = videoCapture;
|
|
||||||
if (_videoState == VideoState::possible) {
|
|
||||||
_videoState = VideoState::outgoingRequested;
|
|
||||||
|
|
||||||
rtc::CopyOnWriteBuffer buffer;
|
|
||||||
uint8_t mode = 1;
|
|
||||||
buffer.AppendData(&mode, 1);
|
|
||||||
|
|
||||||
std::vector<uint8_t> data;
|
|
||||||
data.resize(buffer.size());
|
|
||||||
memcpy(data.data(), buffer.data(), buffer.size());
|
|
||||||
|
|
||||||
_signalingDataEmitted(data);
|
|
||||||
_stateUpdated(_state, _videoState);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Manager::acceptVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture) {
|
|
||||||
if (videoCapture != nullptr) {
|
|
||||||
_videoCapture = videoCapture;
|
|
||||||
if (_videoState == VideoState::incomingRequested) {
|
|
||||||
_videoState = VideoState::active;
|
|
||||||
|
|
||||||
rtc::CopyOnWriteBuffer buffer;
|
|
||||||
uint8_t mode = 2;
|
|
||||||
buffer.AppendData(&mode, 1);
|
|
||||||
|
|
||||||
std::vector<uint8_t> data;
|
|
||||||
data.resize(buffer.size());
|
|
||||||
memcpy(data.data(), buffer.data(), buffer.size());
|
|
||||||
|
|
||||||
_signalingDataEmitted(data);
|
|
||||||
_stateUpdated(_state, _videoState);
|
|
||||||
|
|
||||||
_mediaManager->perform([videoCapture](MediaManager *mediaManager) {
|
|
||||||
mediaManager->setSendVideo(videoCapture);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void Manager::setMuteOutgoingAudio(bool mute) {
|
|
||||||
_mediaManager->perform([mute](MediaManager *mediaManager) {
|
|
||||||
mediaManager->setMuteOutgoingAudio(mute);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void Manager::notifyIsLocalVideoActive(bool isActive) {
|
|
||||||
rtc::CopyOnWriteBuffer buffer;
|
|
||||||
uint8_t mode = 4;
|
|
||||||
buffer.AppendData(&mode, 1);
|
|
||||||
uint8_t value = isActive ? 1 : 0;
|
|
||||||
buffer.AppendData(&value, 1);
|
|
||||||
|
|
||||||
std::vector<uint8_t> data;
|
|
||||||
data.resize(buffer.size());
|
|
||||||
memcpy(data.data(), buffer.data(), buffer.size());
|
|
||||||
_signalingDataEmitted(data);
|
|
||||||
}
|
|
||||||
|
|
||||||
void Manager::setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
|
||||||
_mediaManager->perform([sink](MediaManager *mediaManager) {
|
|
||||||
mediaManager->setIncomingVideoOutput(sink);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -1,66 +0,0 @@
|
|||||||
#ifndef TGVOIP_WEBRTC_MANAGER_H
|
|
||||||
#define TGVOIP_WEBRTC_MANAGER_H
|
|
||||||
|
|
||||||
#include "ThreadLocalObject.h"
|
|
||||||
#include "NetworkManager.h"
|
|
||||||
#include "MediaManager.h"
|
|
||||||
#include "TgVoip.h"
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
namespace TGVOIP_NAMESPACE {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
class Manager : public std::enable_shared_from_this<Manager> {
|
|
||||||
public:
|
|
||||||
enum class VideoState {
|
|
||||||
possible,
|
|
||||||
outgoingRequested,
|
|
||||||
incomingRequested,
|
|
||||||
active
|
|
||||||
};
|
|
||||||
|
|
||||||
static rtc::Thread *getMediaThread();
|
|
||||||
|
|
||||||
Manager(
|
|
||||||
rtc::Thread *thread,
|
|
||||||
TgVoipEncryptionKey encryptionKey,
|
|
||||||
bool enableP2P,
|
|
||||||
std::vector<TgVoipRtcServer> const &rtcServers,
|
|
||||||
std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture,
|
|
||||||
std::function<void (const TgVoipState &, VideoState)> stateUpdated,
|
|
||||||
std::function<void (bool)> remoteVideoIsActiveUpdated,
|
|
||||||
std::function<void (const std::vector<uint8_t> &)> signalingDataEmitted
|
|
||||||
);
|
|
||||||
~Manager();
|
|
||||||
|
|
||||||
void start();
|
|
||||||
void receiveSignalingData(const std::vector<uint8_t> &data);
|
|
||||||
void requestVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture);
|
|
||||||
void acceptVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture);
|
|
||||||
void setMuteOutgoingAudio(bool mute);
|
|
||||||
void notifyIsLocalVideoActive(bool isActive);
|
|
||||||
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
|
||||||
|
|
||||||
private:
|
|
||||||
rtc::Thread *_thread;
|
|
||||||
TgVoipEncryptionKey _encryptionKey;
|
|
||||||
bool _enableP2P;
|
|
||||||
std::vector<TgVoipRtcServer> _rtcServers;
|
|
||||||
std::shared_ptr<TgVoipVideoCaptureInterface> _videoCapture;
|
|
||||||
std::function<void (const TgVoipState &, VideoState)> _stateUpdated;
|
|
||||||
std::function<void (bool)> _remoteVideoIsActiveUpdated;
|
|
||||||
std::function<void (const std::vector<uint8_t> &)> _signalingDataEmitted;
|
|
||||||
std::unique_ptr<ThreadLocalObject<NetworkManager>> _networkManager;
|
|
||||||
std::unique_ptr<ThreadLocalObject<MediaManager>> _mediaManager;
|
|
||||||
TgVoipState _state;
|
|
||||||
VideoState _videoState;
|
|
||||||
bool _didConnectOnce;
|
|
||||||
|
|
||||||
private:
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
@ -1,513 +0,0 @@
|
|||||||
#include "MediaManager.h"
|
|
||||||
|
|
||||||
#include "absl/strings/match.h"
|
|
||||||
#include "api/audio_codecs/audio_decoder_factory_template.h"
|
|
||||||
#include "api/audio_codecs/audio_encoder_factory_template.h"
|
|
||||||
#include "api/audio_codecs/opus/audio_decoder_opus.h"
|
|
||||||
#include "api/audio_codecs/opus/audio_encoder_opus.h"
|
|
||||||
#include "api/rtp_parameters.h"
|
|
||||||
#include "api/task_queue/default_task_queue_factory.h"
|
|
||||||
#include "media/base/codec.h"
|
|
||||||
#include "media/base/media_constants.h"
|
|
||||||
#include "media/engine/webrtc_media_engine.h"
|
|
||||||
#include "modules/audio_device/include/audio_device_default.h"
|
|
||||||
#include "rtc_base/task_utils/repeating_task.h"
|
|
||||||
#include "system_wrappers/include/field_trial.h"
|
|
||||||
#include "api/video/builtin_video_bitrate_allocator_factory.h"
|
|
||||||
#include "api/video/video_bitrate_allocation.h"
|
|
||||||
#include "call/call.h"
|
|
||||||
|
|
||||||
#include "api/video_codecs/builtin_video_encoder_factory.h"
|
|
||||||
|
|
||||||
#include "TgVoip.h"
|
|
||||||
#include "VideoCaptureInterfaceImpl.h"
|
|
||||||
|
|
||||||
#if TARGET_OS_IPHONE || TARGET_OS_OSX
|
|
||||||
|
|
||||||
#include "CodecsApple.h"
|
|
||||||
|
|
||||||
#else
|
|
||||||
#error "Unsupported platform"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
namespace TGVOIP_NAMESPACE {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static const uint32_t ssrcAudioIncoming = 1;
|
|
||||||
static const uint32_t ssrcAudioOutgoing = 2;
|
|
||||||
static const uint32_t ssrcAudioFecIncoming = 5;
|
|
||||||
static const uint32_t ssrcAudioFecOutgoing = 6;
|
|
||||||
static const uint32_t ssrcVideoIncoming = 3;
|
|
||||||
static const uint32_t ssrcVideoOutgoing = 4;
|
|
||||||
static const uint32_t ssrcVideoFecIncoming = 7;
|
|
||||||
static const uint32_t ssrcVideoFecOutgoing = 8;
|
|
||||||
|
|
||||||
static void AddDefaultFeedbackParams(cricket::VideoCodec *codec) {
|
|
||||||
// Don't add any feedback params for RED and ULPFEC.
|
|
||||||
if (codec->name == cricket::kRedCodecName || codec->name == cricket::kUlpfecCodecName)
|
|
||||||
return;
|
|
||||||
codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamRemb, cricket::kParamValueEmpty));
|
|
||||||
codec->AddFeedbackParam(
|
|
||||||
cricket::FeedbackParam(cricket::kRtcpFbParamTransportCc, cricket::kParamValueEmpty));
|
|
||||||
// Don't add any more feedback params for FLEXFEC.
|
|
||||||
if (codec->name == cricket::kFlexfecCodecName)
|
|
||||||
return;
|
|
||||||
codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamCcm, cricket::kRtcpFbCcmParamFir));
|
|
||||||
codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack, cricket::kParamValueEmpty));
|
|
||||||
codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack, cricket::kRtcpFbNackParamPli));
|
|
||||||
if (codec->name == cricket::kVp8CodecName &&
|
|
||||||
webrtc::field_trial::IsEnabled("WebRTC-RtcpLossNotification")) {
|
|
||||||
codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamLntf, cricket::kParamValueEmpty));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static std::vector<cricket::VideoCodec> AssignPayloadTypesAndDefaultCodecs(std::vector<webrtc::SdpVideoFormat> input_formats) {
|
|
||||||
if (input_formats.empty())
|
|
||||||
return std::vector<cricket::VideoCodec>();
|
|
||||||
static const int kFirstDynamicPayloadType = 96;
|
|
||||||
static const int kLastDynamicPayloadType = 127;
|
|
||||||
int payload_type = kFirstDynamicPayloadType;
|
|
||||||
|
|
||||||
input_formats.push_back(webrtc::SdpVideoFormat(cricket::kRedCodecName));
|
|
||||||
input_formats.push_back(webrtc::SdpVideoFormat(cricket::kUlpfecCodecName));
|
|
||||||
|
|
||||||
if (true) {
|
|
||||||
webrtc::SdpVideoFormat flexfec_format(cricket::kFlexfecCodecName);
|
|
||||||
// This value is currently arbitrarily set to 10 seconds. (The unit
|
|
||||||
// is microseconds.) This parameter MUST be present in the SDP, but
|
|
||||||
// we never use the actual value anywhere in our code however.
|
|
||||||
// TODO(brandtr): Consider honouring this value in the sender and receiver.
|
|
||||||
flexfec_format.parameters = {{cricket::kFlexfecFmtpRepairWindow, "10000000"}};
|
|
||||||
input_formats.push_back(flexfec_format);
|
|
||||||
}
|
|
||||||
|
|
||||||
std::vector<cricket::VideoCodec> output_codecs;
|
|
||||||
for (const webrtc::SdpVideoFormat& format : input_formats) {
|
|
||||||
cricket::VideoCodec codec(format);
|
|
||||||
codec.id = payload_type;
|
|
||||||
AddDefaultFeedbackParams(&codec);
|
|
||||||
output_codecs.push_back(codec);
|
|
||||||
|
|
||||||
// Increment payload type.
|
|
||||||
++payload_type;
|
|
||||||
if (payload_type > kLastDynamicPayloadType) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Out of dynamic payload types, skipping the rest.";
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Add associated RTX codec for non-FEC codecs.
|
|
||||||
if (!absl::EqualsIgnoreCase(codec.name, cricket::kUlpfecCodecName) &&
|
|
||||||
!absl::EqualsIgnoreCase(codec.name, cricket::kFlexfecCodecName)) {
|
|
||||||
output_codecs.push_back(
|
|
||||||
cricket::VideoCodec::CreateRtxCodec(payload_type, codec.id));
|
|
||||||
|
|
||||||
// Increment payload type.
|
|
||||||
++payload_type;
|
|
||||||
if (payload_type > kLastDynamicPayloadType) {
|
|
||||||
RTC_LOG(LS_ERROR) << "Out of dynamic payload types, skipping the rest.";
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return output_codecs;
|
|
||||||
}
|
|
||||||
|
|
||||||
static int sendCodecPriority(const cricket::VideoCodec &codec) {
|
|
||||||
int priotity = 0;
|
|
||||||
if (codec.name == cricket::kAv1CodecName) {
|
|
||||||
return priotity;
|
|
||||||
}
|
|
||||||
priotity++;
|
|
||||||
if (codec.name == cricket::kH265CodecName) {
|
|
||||||
if (supportsH265Encoding()) {
|
|
||||||
return priotity;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
priotity++;
|
|
||||||
if (codec.name == cricket::kH264CodecName) {
|
|
||||||
return priotity;
|
|
||||||
}
|
|
||||||
priotity++;
|
|
||||||
if (codec.name == cricket::kVp9CodecName) {
|
|
||||||
return priotity;
|
|
||||||
}
|
|
||||||
priotity++;
|
|
||||||
if (codec.name == cricket::kVp8CodecName) {
|
|
||||||
return priotity;
|
|
||||||
}
|
|
||||||
priotity++;
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
static absl::optional<cricket::VideoCodec> selectVideoCodec(std::vector<cricket::VideoCodec> &codecs) {
|
|
||||||
std::vector<cricket::VideoCodec> sortedCodecs;
|
|
||||||
for (auto &codec : codecs) {
|
|
||||||
if (sendCodecPriority(codec) != -1) {
|
|
||||||
sortedCodecs.push_back(codec);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
std::sort(sortedCodecs.begin(), sortedCodecs.end(), [](const cricket::VideoCodec &lhs, const cricket::VideoCodec &rhs) {
|
|
||||||
return sendCodecPriority(lhs) < sendCodecPriority(rhs);
|
|
||||||
});
|
|
||||||
|
|
||||||
if (sortedCodecs.size() != 0) {
|
|
||||||
return sortedCodecs[0];
|
|
||||||
} else {
|
|
||||||
return absl::nullopt;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
static rtc::Thread *makeWorkerThread() {
|
|
||||||
static std::unique_ptr<rtc::Thread> value = rtc::Thread::Create();
|
|
||||||
value->SetName("WebRTC-Worker", nullptr);
|
|
||||||
value->Start();
|
|
||||||
return value.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
rtc::Thread *MediaManager::getWorkerThread() {
|
|
||||||
static rtc::Thread *value = makeWorkerThread();
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
MediaManager::MediaManager(
|
|
||||||
rtc::Thread *thread,
|
|
||||||
bool isOutgoing,
|
|
||||||
std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture,
|
|
||||||
std::function<void (const rtc::CopyOnWriteBuffer &)> packetEmitted,
|
|
||||||
std::function<void (bool)> localVideoCaptureActiveUpdated
|
|
||||||
) :
|
|
||||||
_packetEmitted(packetEmitted),
|
|
||||||
_localVideoCaptureActiveUpdated(localVideoCaptureActiveUpdated),
|
|
||||||
_thread(thread),
|
|
||||||
_eventLog(std::make_unique<webrtc::RtcEventLogNull>()),
|
|
||||||
_taskQueueFactory(webrtc::CreateDefaultTaskQueueFactory()),
|
|
||||||
_videoCapture(videoCapture) {
|
|
||||||
_ssrcAudio.incoming = isOutgoing ? ssrcAudioIncoming : ssrcAudioOutgoing;
|
|
||||||
_ssrcAudio.outgoing = (!isOutgoing) ? ssrcAudioIncoming : ssrcAudioOutgoing;
|
|
||||||
_ssrcAudio.fecIncoming = isOutgoing ? ssrcAudioFecIncoming : ssrcAudioFecOutgoing;
|
|
||||||
_ssrcAudio.fecOutgoing = (!isOutgoing) ? ssrcAudioFecIncoming : ssrcAudioFecOutgoing;
|
|
||||||
_ssrcVideo.incoming = isOutgoing ? ssrcVideoIncoming : ssrcVideoOutgoing;
|
|
||||||
_ssrcVideo.outgoing = (!isOutgoing) ? ssrcVideoIncoming : ssrcVideoOutgoing;
|
|
||||||
_ssrcVideo.fecIncoming = isOutgoing ? ssrcVideoFecIncoming : ssrcVideoFecOutgoing;
|
|
||||||
_ssrcVideo.fecOutgoing = (!isOutgoing) ? ssrcVideoFecIncoming : ssrcVideoFecOutgoing;
|
|
||||||
|
|
||||||
_enableFlexfec = true;
|
|
||||||
|
|
||||||
_isConnected = false;
|
|
||||||
_muteOutgoingAudio = false;
|
|
||||||
|
|
||||||
auto videoEncoderFactory = makeVideoEncoderFactory();
|
|
||||||
_videoCodecs = AssignPayloadTypesAndDefaultCodecs(videoEncoderFactory->GetSupportedFormats());
|
|
||||||
|
|
||||||
_isSendingVideo = false;
|
|
||||||
|
|
||||||
_audioNetworkInterface = std::unique_ptr<MediaManager::NetworkInterfaceImpl>(new MediaManager::NetworkInterfaceImpl(this, false));
|
|
||||||
_videoNetworkInterface = std::unique_ptr<MediaManager::NetworkInterfaceImpl>(new MediaManager::NetworkInterfaceImpl(this, true));
|
|
||||||
|
|
||||||
webrtc::field_trial::InitFieldTrialsFromString(
|
|
||||||
"WebRTC-Audio-SendSideBwe/Enabled/"
|
|
||||||
"WebRTC-Audio-Allocation/min:6kbps,max:32kbps/"
|
|
||||||
"WebRTC-Audio-OpusMinPacketLossRate/Enabled-1/"
|
|
||||||
"WebRTC-FlexFEC-03/Enabled/"
|
|
||||||
"WebRTC-FlexFEC-03-Advertised/Enabled/"
|
|
||||||
);
|
|
||||||
|
|
||||||
configurePlatformAudio();
|
|
||||||
|
|
||||||
_videoBitrateAllocatorFactory = webrtc::CreateBuiltinVideoBitrateAllocatorFactory();
|
|
||||||
|
|
||||||
cricket::MediaEngineDependencies mediaDeps;
|
|
||||||
mediaDeps.task_queue_factory = _taskQueueFactory.get();
|
|
||||||
mediaDeps.audio_encoder_factory = webrtc::CreateAudioEncoderFactory<webrtc::AudioEncoderOpus>();
|
|
||||||
mediaDeps.audio_decoder_factory = webrtc::CreateAudioDecoderFactory<webrtc::AudioDecoderOpus>();
|
|
||||||
|
|
||||||
mediaDeps.video_encoder_factory = makeVideoEncoderFactory();
|
|
||||||
mediaDeps.video_decoder_factory = makeVideoDecoderFactory();
|
|
||||||
|
|
||||||
mediaDeps.audio_processing = webrtc::AudioProcessingBuilder().Create();
|
|
||||||
_mediaEngine = cricket::CreateMediaEngine(std::move(mediaDeps));
|
|
||||||
_mediaEngine->Init();
|
|
||||||
webrtc::Call::Config callConfig(_eventLog.get());
|
|
||||||
callConfig.task_queue_factory = _taskQueueFactory.get();
|
|
||||||
callConfig.trials = &_fieldTrials;
|
|
||||||
callConfig.audio_state = _mediaEngine->voice().GetAudioState();
|
|
||||||
_call.reset(webrtc::Call::Create(callConfig));
|
|
||||||
_audioChannel.reset(_mediaEngine->voice().CreateMediaChannel(_call.get(), cricket::MediaConfig(), cricket::AudioOptions(), webrtc::CryptoOptions::NoGcm()));
|
|
||||||
_videoChannel.reset(_mediaEngine->video().CreateMediaChannel(_call.get(), cricket::MediaConfig(), cricket::VideoOptions(), webrtc::CryptoOptions::NoGcm(), _videoBitrateAllocatorFactory.get()));
|
|
||||||
|
|
||||||
_audioChannel->AddSendStream(cricket::StreamParams::CreateLegacy(_ssrcAudio.outgoing));
|
|
||||||
|
|
||||||
const uint32_t opusClockrate = 48000;
|
|
||||||
const uint16_t opusSdpPayload = 111;
|
|
||||||
const char *opusSdpName = "opus";
|
|
||||||
const uint8_t opusSdpChannels = 2;
|
|
||||||
const uint32_t opusSdpBitrate = 0;
|
|
||||||
|
|
||||||
const uint8_t opusMinBitrateKbps = 6;
|
|
||||||
const uint8_t opusMaxBitrateKbps = 32;
|
|
||||||
const uint8_t opusStartBitrateKbps = 8;
|
|
||||||
const uint8_t opusPTimeMs = 120;
|
|
||||||
|
|
||||||
cricket::AudioCodec opusCodec(opusSdpPayload, opusSdpName, opusClockrate, opusSdpBitrate, opusSdpChannels);
|
|
||||||
opusCodec.AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamTransportCc));
|
|
||||||
opusCodec.SetParam(cricket::kCodecParamMinBitrate, opusMinBitrateKbps);
|
|
||||||
opusCodec.SetParam(cricket::kCodecParamStartBitrate, opusStartBitrateKbps);
|
|
||||||
opusCodec.SetParam(cricket::kCodecParamMaxBitrate, opusMaxBitrateKbps);
|
|
||||||
opusCodec.SetParam(cricket::kCodecParamUseInbandFec, 1);
|
|
||||||
opusCodec.SetParam(cricket::kCodecParamPTime, opusPTimeMs);
|
|
||||||
|
|
||||||
cricket::AudioSendParameters audioSendPrameters;
|
|
||||||
audioSendPrameters.codecs.push_back(opusCodec);
|
|
||||||
audioSendPrameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, 1);
|
|
||||||
audioSendPrameters.options.echo_cancellation = false;
|
|
||||||
//audioSendPrameters.options.experimental_ns = false;
|
|
||||||
audioSendPrameters.options.noise_suppression = false;
|
|
||||||
audioSendPrameters.options.auto_gain_control = false;
|
|
||||||
audioSendPrameters.options.highpass_filter = false;
|
|
||||||
audioSendPrameters.options.typing_detection = false;
|
|
||||||
//audioSendPrameters.max_bandwidth_bps = 16000;
|
|
||||||
audioSendPrameters.rtcp.reduced_size = true;
|
|
||||||
audioSendPrameters.rtcp.remote_estimate = true;
|
|
||||||
_audioChannel->SetSendParameters(audioSendPrameters);
|
|
||||||
_audioChannel->SetInterface(_audioNetworkInterface.get(), webrtc::MediaTransportConfig());
|
|
||||||
|
|
||||||
cricket::AudioRecvParameters audioRecvParameters;
|
|
||||||
audioRecvParameters.codecs.emplace_back(opusSdpPayload, opusSdpName, opusClockrate, opusSdpBitrate, opusSdpChannels);
|
|
||||||
audioRecvParameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, 1);
|
|
||||||
audioRecvParameters.rtcp.reduced_size = true;
|
|
||||||
audioRecvParameters.rtcp.remote_estimate = true;
|
|
||||||
|
|
||||||
_audioChannel->SetRecvParameters(audioRecvParameters);
|
|
||||||
_audioChannel->AddRecvStream(cricket::StreamParams::CreateLegacy(_ssrcAudio.incoming));
|
|
||||||
_audioChannel->SetPlayout(true);
|
|
||||||
|
|
||||||
_videoChannel->SetInterface(_videoNetworkInterface.get(), webrtc::MediaTransportConfig());
|
|
||||||
|
|
||||||
if (_videoCapture != nullptr) {
|
|
||||||
setSendVideo(_videoCapture);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
MediaManager::~MediaManager() {
|
|
||||||
assert(_thread->IsCurrent());
|
|
||||||
|
|
||||||
_call->SignalChannelNetworkState(webrtc::MediaType::AUDIO, webrtc::kNetworkDown);
|
|
||||||
_call->SignalChannelNetworkState(webrtc::MediaType::VIDEO, webrtc::kNetworkDown);
|
|
||||||
|
|
||||||
_audioChannel->OnReadyToSend(false);
|
|
||||||
_audioChannel->SetSend(false);
|
|
||||||
_audioChannel->SetAudioSend(_ssrcAudio.outgoing, false, nullptr, &_audioSource);
|
|
||||||
|
|
||||||
_audioChannel->SetPlayout(false);
|
|
||||||
|
|
||||||
_audioChannel->RemoveRecvStream(_ssrcAudio.incoming);
|
|
||||||
_audioChannel->RemoveSendStream(_ssrcAudio.outgoing);
|
|
||||||
|
|
||||||
_audioChannel->SetInterface(nullptr, webrtc::MediaTransportConfig());
|
|
||||||
|
|
||||||
setSendVideo(nullptr);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MediaManager::setIsConnected(bool isConnected) {
|
|
||||||
if (_isConnected == isConnected) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
_isConnected = isConnected;
|
|
||||||
|
|
||||||
if (_isConnected) {
|
|
||||||
_call->SignalChannelNetworkState(webrtc::MediaType::AUDIO, webrtc::kNetworkUp);
|
|
||||||
_call->SignalChannelNetworkState(webrtc::MediaType::VIDEO, webrtc::kNetworkUp);
|
|
||||||
} else {
|
|
||||||
_call->SignalChannelNetworkState(webrtc::MediaType::AUDIO, webrtc::kNetworkDown);
|
|
||||||
_call->SignalChannelNetworkState(webrtc::MediaType::VIDEO, webrtc::kNetworkDown);
|
|
||||||
}
|
|
||||||
if (_audioChannel) {
|
|
||||||
_audioChannel->OnReadyToSend(_isConnected);
|
|
||||||
_audioChannel->SetSend(_isConnected);
|
|
||||||
_audioChannel->SetAudioSend(_ssrcAudio.outgoing, _isConnected && !_muteOutgoingAudio, nullptr, &_audioSource);
|
|
||||||
}
|
|
||||||
if (_isSendingVideo && _videoChannel) {
|
|
||||||
_videoChannel->OnReadyToSend(_isConnected);
|
|
||||||
_videoChannel->SetSend(_isConnected);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MediaManager::receivePacket(const rtc::CopyOnWriteBuffer &packet) {
|
|
||||||
if (packet.size() < 1) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
uint8_t header = ((uint8_t *)packet.data())[0];
|
|
||||||
rtc::CopyOnWriteBuffer unwrappedPacket = packet.Slice(1, packet.size() - 1);
|
|
||||||
|
|
||||||
if (header == 0xba) {
|
|
||||||
if (_audioChannel) {
|
|
||||||
_audioChannel->OnPacketReceived(unwrappedPacket, -1);
|
|
||||||
}
|
|
||||||
} else if (header == 0xbf) {
|
|
||||||
if (_videoChannel) {
|
|
||||||
_videoChannel->OnPacketReceived(unwrappedPacket, -1);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MediaManager::notifyPacketSent(const rtc::SentPacket &sentPacket) {
|
|
||||||
_call->OnSentPacket(sentPacket);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MediaManager::setSendVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture) {
|
|
||||||
if (_isSendingVideo == (videoCapture != nullptr)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
_isSendingVideo = videoCapture != nullptr;
|
|
||||||
_videoCapture = videoCapture;
|
|
||||||
|
|
||||||
if (_videoCapture != nullptr) {
|
|
||||||
((TgVoipVideoCaptureInterfaceImpl *)_videoCapture.get())->_impl->getSyncAssumingSameThread()->setIsActiveUpdated(this->_localVideoCaptureActiveUpdated);
|
|
||||||
}
|
|
||||||
|
|
||||||
if (_isSendingVideo) {
|
|
||||||
auto videoCodec = selectVideoCodec(_videoCodecs);
|
|
||||||
if (videoCodec.has_value()) {
|
|
||||||
auto codec = videoCodec.value();
|
|
||||||
|
|
||||||
codec.SetParam(cricket::kCodecParamMinBitrate, 64);
|
|
||||||
codec.SetParam(cricket::kCodecParamStartBitrate, 512);
|
|
||||||
codec.SetParam(cricket::kCodecParamMaxBitrate, 2500);
|
|
||||||
|
|
||||||
cricket::VideoSendParameters videoSendParameters;
|
|
||||||
videoSendParameters.codecs.push_back(codec);
|
|
||||||
|
|
||||||
if (_enableFlexfec) {
|
|
||||||
for (auto &c : _videoCodecs) {
|
|
||||||
if (c.name == cricket::kFlexfecCodecName) {
|
|
||||||
videoSendParameters.codecs.push_back(c);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
videoSendParameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, 1);
|
|
||||||
//send_parameters.max_bandwidth_bps = 800000;
|
|
||||||
//send_parameters.rtcp.reduced_size = true;
|
|
||||||
//videoSendParameters.rtcp.remote_estimate = true;
|
|
||||||
_videoChannel->SetSendParameters(videoSendParameters);
|
|
||||||
|
|
||||||
if (_enableFlexfec) {
|
|
||||||
cricket::StreamParams videoSendStreamParams;
|
|
||||||
cricket::SsrcGroup videoSendSsrcGroup(cricket::kFecFrSsrcGroupSemantics, {_ssrcVideo.outgoing, _ssrcVideo.fecOutgoing});
|
|
||||||
videoSendStreamParams.ssrcs = {_ssrcVideo.outgoing};
|
|
||||||
videoSendStreamParams.ssrc_groups.push_back(videoSendSsrcGroup);
|
|
||||||
videoSendStreamParams.cname = "cname";
|
|
||||||
_videoChannel->AddSendStream(videoSendStreamParams);
|
|
||||||
|
|
||||||
if (_videoCapture != nullptr) {
|
|
||||||
_videoChannel->SetVideoSend(_ssrcVideo.outgoing, NULL, ((TgVoipVideoCaptureInterfaceImpl *)_videoCapture.get())->_impl->getSyncAssumingSameThread()->_videoSource.get());
|
|
||||||
}
|
|
||||||
_videoChannel->SetVideoSend(_ssrcVideo.fecOutgoing, NULL, nullptr);
|
|
||||||
} else {
|
|
||||||
_videoChannel->AddSendStream(cricket::StreamParams::CreateLegacy(_ssrcVideo.outgoing));
|
|
||||||
if (_videoCapture != nullptr) {
|
|
||||||
_videoChannel->SetVideoSend(_ssrcVideo.outgoing, NULL, ((TgVoipVideoCaptureInterfaceImpl *)_videoCapture.get())->_impl->getSyncAssumingSameThread()->_videoSource);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
cricket::VideoRecvParameters videoRecvParameters;
|
|
||||||
|
|
||||||
for (auto &c : _videoCodecs) {
|
|
||||||
if (c.name == cricket::kFlexfecCodecName) {
|
|
||||||
videoRecvParameters.codecs.push_back(c);
|
|
||||||
} else if (c.name == cricket::kH264CodecName) {
|
|
||||||
videoRecvParameters.codecs.push_back(c);
|
|
||||||
} else if (c.name == cricket::kH265CodecName) {
|
|
||||||
videoRecvParameters.codecs.push_back(c);
|
|
||||||
} else if (c.name == cricket::kVp8CodecName) {
|
|
||||||
videoRecvParameters.codecs.push_back(c);
|
|
||||||
} else if (c.name == cricket::kVp9CodecName) {
|
|
||||||
videoRecvParameters.codecs.push_back(c);
|
|
||||||
} else if (c.name == cricket::kAv1CodecName) {
|
|
||||||
videoRecvParameters.codecs.push_back(c);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
videoRecvParameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, 1);
|
|
||||||
//recv_parameters.rtcp.reduced_size = true;
|
|
||||||
videoRecvParameters.rtcp.remote_estimate = true;
|
|
||||||
|
|
||||||
cricket::StreamParams videoRecvStreamParams;
|
|
||||||
cricket::SsrcGroup videoRecvSsrcGroup(cricket::kFecFrSsrcGroupSemantics, {_ssrcVideo.incoming, _ssrcVideo.fecIncoming});
|
|
||||||
videoRecvStreamParams.ssrcs = {_ssrcVideo.incoming};
|
|
||||||
videoRecvStreamParams.ssrc_groups.push_back(videoRecvSsrcGroup);
|
|
||||||
videoRecvStreamParams.cname = "cname";
|
|
||||||
|
|
||||||
_videoChannel->AddRecvStream(videoRecvStreamParams);
|
|
||||||
_videoChannel->SetRecvParameters(videoRecvParameters);
|
|
||||||
|
|
||||||
if (_isSendingVideo && _videoChannel) {
|
|
||||||
_videoChannel->OnReadyToSend(_isConnected);
|
|
||||||
_videoChannel->SetSend(_isConnected);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
_videoChannel->SetVideoSend(_ssrcVideo.outgoing, NULL, nullptr);
|
|
||||||
_videoChannel->SetVideoSend(_ssrcVideo.fecOutgoing, NULL, nullptr);
|
|
||||||
|
|
||||||
_videoChannel->RemoveRecvStream(_ssrcVideo.incoming);
|
|
||||||
_videoChannel->RemoveRecvStream(_ssrcVideo.fecIncoming);
|
|
||||||
_videoChannel->RemoveSendStream(_ssrcVideo.outgoing);
|
|
||||||
if (_enableFlexfec) {
|
|
||||||
_videoChannel->RemoveSendStream(_ssrcVideo.fecOutgoing);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void MediaManager::setMuteOutgoingAudio(bool mute) {
|
|
||||||
_muteOutgoingAudio = mute;
|
|
||||||
|
|
||||||
_audioChannel->SetAudioSend(_ssrcAudio.outgoing, _isConnected && !_muteOutgoingAudio, nullptr, &_audioSource);
|
|
||||||
}
|
|
||||||
|
|
||||||
void MediaManager::setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
|
||||||
_currentIncomingVideoSink = sink;
|
|
||||||
_videoChannel->SetSink(_ssrcVideo.incoming, _currentIncomingVideoSink.get());
|
|
||||||
}
|
|
||||||
|
|
||||||
MediaManager::NetworkInterfaceImpl::NetworkInterfaceImpl(MediaManager *mediaManager, bool isVideo) :
|
|
||||||
_mediaManager(mediaManager),
|
|
||||||
_isVideo(isVideo) {
|
|
||||||
}
|
|
||||||
|
|
||||||
bool MediaManager::NetworkInterfaceImpl::SendPacket(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) {
|
|
||||||
rtc::CopyOnWriteBuffer wrappedPacket;
|
|
||||||
uint8_t header = _isVideo ? 0xbf : 0xba;
|
|
||||||
wrappedPacket.AppendData(&header, 1);
|
|
||||||
wrappedPacket.AppendData(*packet);
|
|
||||||
|
|
||||||
_mediaManager->_packetEmitted(wrappedPacket);
|
|
||||||
rtc::SentPacket sentPacket(options.packet_id, rtc::TimeMillis(), options.info_signaled_after_sent);
|
|
||||||
_mediaManager->notifyPacketSent(sentPacket);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
bool MediaManager::NetworkInterfaceImpl::SendRtcp(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) {
|
|
||||||
rtc::CopyOnWriteBuffer wrappedPacket;
|
|
||||||
uint8_t header = _isVideo ? 0xbf : 0xba;
|
|
||||||
wrappedPacket.AppendData(&header, 1);
|
|
||||||
wrappedPacket.AppendData(*packet);
|
|
||||||
|
|
||||||
_mediaManager->_packetEmitted(wrappedPacket);
|
|
||||||
rtc::SentPacket sentPacket(options.packet_id, rtc::TimeMillis(), options.info_signaled_after_sent);
|
|
||||||
_mediaManager->notifyPacketSent(sentPacket);
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
int MediaManager::NetworkInterfaceImpl::SetOption(cricket::MediaChannel::NetworkInterface::SocketType, rtc::Socket::Option, int) {
|
|
||||||
return -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -1,114 +0,0 @@
|
|||||||
#ifndef TGVOIP_WEBRTC_MEDIA_MANAGER_H
|
|
||||||
#define TGVOIP_WEBRTC_MEDIA_MANAGER_H
|
|
||||||
|
|
||||||
#include "rtc_base/thread.h"
|
|
||||||
#include "rtc_base/copy_on_write_buffer.h"
|
|
||||||
#include "rtc_base/third_party/sigslot/sigslot.h"
|
|
||||||
#include "api/transport/field_trial_based_config.h"
|
|
||||||
#include "pc/rtp_sender.h"
|
|
||||||
|
|
||||||
#include "TgVoip.h"
|
|
||||||
|
|
||||||
#include <functional>
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
namespace webrtc {
|
|
||||||
class Call;
|
|
||||||
class RtcEventLogNull;
|
|
||||||
class TaskQueueFactory;
|
|
||||||
class VideoBitrateAllocatorFactory;
|
|
||||||
class VideoTrackSourceInterface;
|
|
||||||
};
|
|
||||||
|
|
||||||
namespace cricket {
|
|
||||||
class MediaEngineInterface;
|
|
||||||
class VoiceMediaChannel;
|
|
||||||
class VideoMediaChannel;
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
namespace TGVOIP_NAMESPACE {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
class VideoCapturerInterface;
|
|
||||||
|
|
||||||
class MediaManager : public sigslot::has_slots<>, public std::enable_shared_from_this<MediaManager> {
|
|
||||||
private:
|
|
||||||
struct SSRC {
|
|
||||||
uint32_t incoming;
|
|
||||||
uint32_t outgoing;
|
|
||||||
uint32_t fecIncoming;
|
|
||||||
uint32_t fecOutgoing;
|
|
||||||
};
|
|
||||||
|
|
||||||
class NetworkInterfaceImpl : public cricket::MediaChannel::NetworkInterface {
|
|
||||||
public:
|
|
||||||
NetworkInterfaceImpl(MediaManager *mediaManager, bool isVideo);
|
|
||||||
bool SendPacket(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) override;
|
|
||||||
bool SendRtcp(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) override;
|
|
||||||
int SetOption(SocketType type, rtc::Socket::Option opt, int option) override;
|
|
||||||
|
|
||||||
private:
|
|
||||||
MediaManager *_mediaManager;
|
|
||||||
bool _isVideo;
|
|
||||||
};
|
|
||||||
|
|
||||||
friend class MediaManager::NetworkInterfaceImpl;
|
|
||||||
|
|
||||||
public:
|
|
||||||
static rtc::Thread *getWorkerThread();
|
|
||||||
|
|
||||||
MediaManager(
|
|
||||||
rtc::Thread *thread,
|
|
||||||
bool isOutgoing,
|
|
||||||
std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture,
|
|
||||||
std::function<void (const rtc::CopyOnWriteBuffer &)> packetEmitted,
|
|
||||||
std::function<void (bool)> localVideoCaptureActiveUpdated
|
|
||||||
);
|
|
||||||
~MediaManager();
|
|
||||||
|
|
||||||
void setIsConnected(bool isConnected);
|
|
||||||
void receivePacket(const rtc::CopyOnWriteBuffer &packet);
|
|
||||||
void notifyPacketSent(const rtc::SentPacket &sentPacket);
|
|
||||||
void setSendVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture);
|
|
||||||
void setMuteOutgoingAudio(bool mute);
|
|
||||||
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
|
||||||
|
|
||||||
protected:
|
|
||||||
std::function<void (const rtc::CopyOnWriteBuffer &)> _packetEmitted;
|
|
||||||
std::function<void (bool)> _localVideoCaptureActiveUpdated;
|
|
||||||
|
|
||||||
private:
|
|
||||||
rtc::Thread *_thread;
|
|
||||||
std::unique_ptr<webrtc::RtcEventLogNull> _eventLog;
|
|
||||||
std::unique_ptr<webrtc::TaskQueueFactory> _taskQueueFactory;
|
|
||||||
|
|
||||||
SSRC _ssrcAudio;
|
|
||||||
SSRC _ssrcVideo;
|
|
||||||
bool _enableFlexfec;
|
|
||||||
|
|
||||||
bool _isConnected;
|
|
||||||
bool _muteOutgoingAudio;
|
|
||||||
|
|
||||||
std::vector<cricket::VideoCodec> _videoCodecs;
|
|
||||||
bool _isSendingVideo;
|
|
||||||
|
|
||||||
std::unique_ptr<cricket::MediaEngineInterface> _mediaEngine;
|
|
||||||
std::unique_ptr<webrtc::Call> _call;
|
|
||||||
webrtc::FieldTrialBasedConfig _fieldTrials;
|
|
||||||
webrtc::LocalAudioSinkAdapter _audioSource;
|
|
||||||
std::unique_ptr<cricket::VoiceMediaChannel> _audioChannel;
|
|
||||||
std::unique_ptr<cricket::VideoMediaChannel> _videoChannel;
|
|
||||||
std::unique_ptr<webrtc::VideoBitrateAllocatorFactory> _videoBitrateAllocatorFactory;
|
|
||||||
std::shared_ptr<TgVoipVideoCaptureInterface> _videoCapture;
|
|
||||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> _currentIncomingVideoSink;
|
|
||||||
|
|
||||||
std::unique_ptr<MediaManager::NetworkInterfaceImpl> _audioNetworkInterface;
|
|
||||||
std::unique_ptr<MediaManager::NetworkInterfaceImpl> _videoNetworkInterface;
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
@ -1,353 +0,0 @@
|
|||||||
#include "NetworkManager.h"
|
|
||||||
|
|
||||||
#include "p2p/base/basic_packet_socket_factory.h"
|
|
||||||
#include "p2p/client/basic_port_allocator.h"
|
|
||||||
#include "p2p/base/p2p_transport_channel.h"
|
|
||||||
#include "p2p/base/basic_async_resolver_factory.h"
|
|
||||||
#include "api/packet_socket_factory.h"
|
|
||||||
#include "rtc_base/task_utils/to_queued_task.h"
|
|
||||||
#include "p2p/base/ice_credentials_iterator.h"
|
|
||||||
#include "api/jsep_ice_candidate.h"
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
#include <openssl/sha.h>
|
|
||||||
#include <openssl/aes.h>
|
|
||||||
#include <openssl/modes.h>
|
|
||||||
#include <openssl/rand.h>
|
|
||||||
#include <openssl/crypto.h>
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
namespace TGVOIP_NAMESPACE {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
static void KDF2(unsigned char *encryptionKey, unsigned char *msgKey, size_t x, unsigned char *aesKey, unsigned char *aesIv) {
|
|
||||||
uint8_t sA[32], sB[32];
|
|
||||||
uint8_t buf[16 + 36];
|
|
||||||
memcpy(buf, msgKey, 16);
|
|
||||||
memcpy(buf + 16, encryptionKey + x, 36);
|
|
||||||
SHA256(buf, 16 + 36, sA);
|
|
||||||
memcpy(buf, encryptionKey + 40 + x, 36);
|
|
||||||
memcpy(buf + 36, msgKey, 16);
|
|
||||||
SHA256(buf, 36 + 16, sB);
|
|
||||||
memcpy(aesKey, sA, 8);
|
|
||||||
memcpy(aesKey + 8, sB + 8, 16);
|
|
||||||
memcpy(aesKey + 8 + 16, sA + 24, 8);
|
|
||||||
memcpy(aesIv, sB, 8);
|
|
||||||
memcpy(aesIv + 8, sA + 8, 16);
|
|
||||||
memcpy(aesIv + 8 + 16, sB + 24, 8);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void aesIgeEncrypt(uint8_t *in, uint8_t *out, size_t length, uint8_t *key, uint8_t *iv) {
|
|
||||||
AES_KEY akey;
|
|
||||||
AES_set_encrypt_key(key, 32*8, &akey);
|
|
||||||
AES_ige_encrypt(in, out, length, &akey, iv, AES_ENCRYPT);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void aesIgeDecrypt(uint8_t *in, uint8_t *out, size_t length, uint8_t *key, uint8_t *iv) {
|
|
||||||
AES_KEY akey;
|
|
||||||
AES_set_decrypt_key(key, 32*8, &akey);
|
|
||||||
AES_ige_encrypt(in, out, length, &akey, iv, AES_DECRYPT);
|
|
||||||
}
|
|
||||||
|
|
||||||
static absl::optional<rtc::CopyOnWriteBuffer> decryptPacket(const rtc::CopyOnWriteBuffer &packet, const TgVoipEncryptionKey &encryptionKey) {
|
|
||||||
if (packet.size() < 16 + 16) {
|
|
||||||
return absl::nullopt;
|
|
||||||
}
|
|
||||||
unsigned char msgKey[16];
|
|
||||||
memcpy(msgKey, packet.data(), 16);
|
|
||||||
|
|
||||||
int x = encryptionKey.isOutgoing ? 8 : 0;
|
|
||||||
|
|
||||||
unsigned char aesKey[32];
|
|
||||||
unsigned char aesIv[32];
|
|
||||||
KDF2((unsigned char *)encryptionKey.value.data(), msgKey, x, aesKey, aesIv);
|
|
||||||
size_t decryptedSize = packet.size() - 16;
|
|
||||||
if (decryptedSize < 0 || decryptedSize > 128 * 1024) {
|
|
||||||
return absl::nullopt;
|
|
||||||
}
|
|
||||||
if (decryptedSize % 16 != 0) {
|
|
||||||
return absl::nullopt;
|
|
||||||
}
|
|
||||||
rtc::Buffer decryptionBuffer(decryptedSize);
|
|
||||||
aesIgeDecrypt(((uint8_t *)packet.data()) + 16, decryptionBuffer.begin(), decryptionBuffer.size(), aesKey, aesIv);
|
|
||||||
|
|
||||||
rtc::ByteBufferWriter msgKeyData;
|
|
||||||
msgKeyData.WriteBytes((const char *)encryptionKey.value.data() + 88 + x, 32);
|
|
||||||
msgKeyData.WriteBytes((const char *)decryptionBuffer.data(), decryptionBuffer.size());
|
|
||||||
unsigned char msgKeyLarge[32];
|
|
||||||
SHA256((uint8_t *)msgKeyData.Data(), msgKeyData.Length(), msgKeyLarge);
|
|
||||||
|
|
||||||
uint16_t innerSize;
|
|
||||||
memcpy(&innerSize, decryptionBuffer.data(), 2);
|
|
||||||
|
|
||||||
unsigned char checkMsgKey[16];
|
|
||||||
memcpy(checkMsgKey, msgKeyLarge + 8, 16);
|
|
||||||
|
|
||||||
if (memcmp(checkMsgKey, msgKey, 16) != 0) {
|
|
||||||
return absl::nullopt;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (innerSize < 0 || innerSize > decryptionBuffer.size() - 2) {
|
|
||||||
return absl::nullopt;
|
|
||||||
}
|
|
||||||
|
|
||||||
rtc::CopyOnWriteBuffer decryptedPacket;
|
|
||||||
decryptedPacket.AppendData((const char *)decryptionBuffer.data() + 2, innerSize);
|
|
||||||
return decryptedPacket;
|
|
||||||
}
|
|
||||||
|
|
||||||
static absl::optional<rtc::Buffer> encryptPacket(const rtc::CopyOnWriteBuffer &packet, const TgVoipEncryptionKey &encryptionKey) {
|
|
||||||
if (packet.size() > UINT16_MAX) {
|
|
||||||
return absl::nullopt;
|
|
||||||
}
|
|
||||||
|
|
||||||
rtc::ByteBufferWriter innerData;
|
|
||||||
uint16_t packetSize = (uint16_t)packet.size();
|
|
||||||
innerData.WriteBytes((const char *)&packetSize, 2);
|
|
||||||
innerData.WriteBytes((const char *)packet.data(), packet.size());
|
|
||||||
|
|
||||||
size_t innerPadding = 16 - innerData.Length() % 16;
|
|
||||||
uint8_t paddingData[16];
|
|
||||||
RAND_bytes(paddingData, (int)innerPadding);
|
|
||||||
innerData.WriteBytes((const char *)paddingData, innerPadding);
|
|
||||||
|
|
||||||
if (innerData.Length() % 16 != 0) {
|
|
||||||
assert(false);
|
|
||||||
return absl::nullopt;
|
|
||||||
}
|
|
||||||
|
|
||||||
int x = encryptionKey.isOutgoing ? 0 : 8;
|
|
||||||
|
|
||||||
rtc::ByteBufferWriter msgKeyData;
|
|
||||||
msgKeyData.WriteBytes((const char *)encryptionKey.value.data() + 88 + x, 32);
|
|
||||||
msgKeyData.WriteBytes(innerData.Data(), innerData.Length());
|
|
||||||
unsigned char msgKeyLarge[32];
|
|
||||||
SHA256((uint8_t *)msgKeyData.Data(), msgKeyData.Length(), msgKeyLarge);
|
|
||||||
|
|
||||||
unsigned char msgKey[16];
|
|
||||||
memcpy(msgKey, msgKeyLarge + 8, 16);
|
|
||||||
|
|
||||||
unsigned char aesKey[32];
|
|
||||||
unsigned char aesIv[32];
|
|
||||||
KDF2((unsigned char *)encryptionKey.value.data(), msgKey, x, aesKey, aesIv);
|
|
||||||
|
|
||||||
rtc::Buffer encryptedPacket;
|
|
||||||
encryptedPacket.AppendData((const char *)msgKey, 16);
|
|
||||||
|
|
||||||
rtc::Buffer encryptionBuffer(innerData.Length());
|
|
||||||
aesIgeEncrypt((uint8_t *)innerData.Data(), encryptionBuffer.begin(), innerData.Length(), aesKey, aesIv);
|
|
||||||
|
|
||||||
encryptedPacket.AppendData(encryptionBuffer.begin(), encryptionBuffer.size());
|
|
||||||
|
|
||||||
/*rtc::CopyOnWriteBuffer testBuffer;
|
|
||||||
testBuffer.AppendData(encryptedPacket.data(), encryptedPacket.size());
|
|
||||||
TgVoipEncryptionKey testKey;
|
|
||||||
testKey.value = encryptionKey.value;
|
|
||||||
testKey.isOutgoing = !encryptionKey.isOutgoing;
|
|
||||||
decryptPacket(testBuffer, testKey);*/
|
|
||||||
|
|
||||||
return encryptedPacket;
|
|
||||||
}
|
|
||||||
|
|
||||||
NetworkManager::NetworkManager(
|
|
||||||
rtc::Thread *thread,
|
|
||||||
TgVoipEncryptionKey encryptionKey,
|
|
||||||
bool enableP2P,
|
|
||||||
std::vector<TgVoipRtcServer> const &rtcServers,
|
|
||||||
std::function<void (const NetworkManager::State &)> stateUpdated,
|
|
||||||
std::function<void (const rtc::CopyOnWriteBuffer &)> packetReceived,
|
|
||||||
std::function<void (const std::vector<uint8_t> &)> signalingDataEmitted
|
|
||||||
) :
|
|
||||||
_thread(thread),
|
|
||||||
_encryptionKey(encryptionKey),
|
|
||||||
_stateUpdated(stateUpdated),
|
|
||||||
_packetReceived(packetReceived),
|
|
||||||
_signalingDataEmitted(signalingDataEmitted) {
|
|
||||||
assert(_thread->IsCurrent());
|
|
||||||
|
|
||||||
_socketFactory.reset(new rtc::BasicPacketSocketFactory(_thread));
|
|
||||||
|
|
||||||
_networkManager = std::make_unique<rtc::BasicNetworkManager>();
|
|
||||||
_portAllocator.reset(new cricket::BasicPortAllocator(_networkManager.get(), _socketFactory.get(), nullptr, nullptr));
|
|
||||||
|
|
||||||
uint32_t flags = cricket::PORTALLOCATOR_DISABLE_TCP;
|
|
||||||
if (!enableP2P) {
|
|
||||||
flags |= cricket::PORTALLOCATOR_DISABLE_UDP;
|
|
||||||
flags |= cricket::PORTALLOCATOR_DISABLE_STUN;
|
|
||||||
}
|
|
||||||
_portAllocator->set_flags(_portAllocator->flags() | flags);
|
|
||||||
_portAllocator->Initialize();
|
|
||||||
|
|
||||||
cricket::ServerAddresses stunServers;
|
|
||||||
std::vector<cricket::RelayServerConfig> turnServers;
|
|
||||||
|
|
||||||
if (rtcServers.size() == 0) {
|
|
||||||
rtc::SocketAddress defaultStunAddress = rtc::SocketAddress("134.122.52.178", 3478);
|
|
||||||
stunServers.insert(defaultStunAddress);
|
|
||||||
|
|
||||||
turnServers.push_back(cricket::RelayServerConfig(
|
|
||||||
rtc::SocketAddress("134.122.52.178", 3478),
|
|
||||||
"openrelay",
|
|
||||||
"openrelay",
|
|
||||||
cricket::PROTO_UDP
|
|
||||||
));
|
|
||||||
} else {
|
|
||||||
for (auto &server : rtcServers) {
|
|
||||||
if (server.isTurn) {
|
|
||||||
turnServers.push_back(cricket::RelayServerConfig(
|
|
||||||
rtc::SocketAddress(server.host, server.port),
|
|
||||||
server.login,
|
|
||||||
server.password,
|
|
||||||
cricket::PROTO_UDP
|
|
||||||
));
|
|
||||||
} else {
|
|
||||||
rtc::SocketAddress stunAddress = rtc::SocketAddress(server.host, server.port);
|
|
||||||
stunServers.insert(stunAddress);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
_portAllocator->SetConfiguration(stunServers, turnServers, 2, webrtc::NO_PRUNE);
|
|
||||||
|
|
||||||
_asyncResolverFactory = std::make_unique<webrtc::BasicAsyncResolverFactory>();
|
|
||||||
_transportChannel.reset(new cricket::P2PTransportChannel("transport", 0, _portAllocator.get(), _asyncResolverFactory.get(), nullptr));
|
|
||||||
|
|
||||||
cricket::IceConfig iceConfig;
|
|
||||||
iceConfig.continual_gathering_policy = cricket::GATHER_CONTINUALLY;
|
|
||||||
_transportChannel->SetIceConfig(iceConfig);
|
|
||||||
|
|
||||||
cricket::IceParameters localIceParameters(
|
|
||||||
"gcp3",
|
|
||||||
"zWDKozH8/3JWt8he3M/CMj5R",
|
|
||||||
false
|
|
||||||
);
|
|
||||||
cricket::IceParameters remoteIceParameters(
|
|
||||||
"acp3",
|
|
||||||
"aWDKozH8/3JWt8he3M/CMj5R",
|
|
||||||
false
|
|
||||||
);
|
|
||||||
|
|
||||||
_transportChannel->SetIceParameters(_encryptionKey.isOutgoing ? localIceParameters : remoteIceParameters);
|
|
||||||
_transportChannel->SetIceRole(_encryptionKey.isOutgoing ? cricket::ICEROLE_CONTROLLING : cricket::ICEROLE_CONTROLLED);
|
|
||||||
|
|
||||||
_transportChannel->SignalCandidateGathered.connect(this, &NetworkManager::candidateGathered);
|
|
||||||
_transportChannel->SignalGatheringState.connect(this, &NetworkManager::candidateGatheringState);
|
|
||||||
_transportChannel->SignalIceTransportStateChanged.connect(this, &NetworkManager::transportStateChanged);
|
|
||||||
_transportChannel->SignalReadPacket.connect(this, &NetworkManager::transportPacketReceived);
|
|
||||||
|
|
||||||
_transportChannel->MaybeStartGathering();
|
|
||||||
|
|
||||||
_transportChannel->SetRemoteIceMode(cricket::ICEMODE_FULL);
|
|
||||||
_transportChannel->SetRemoteIceParameters((!_encryptionKey.isOutgoing) ? localIceParameters : remoteIceParameters);
|
|
||||||
}
|
|
||||||
|
|
||||||
NetworkManager::~NetworkManager() {
|
|
||||||
assert(_thread->IsCurrent());
|
|
||||||
|
|
||||||
_transportChannel.reset();
|
|
||||||
_asyncResolverFactory.reset();
|
|
||||||
_portAllocator.reset();
|
|
||||||
_networkManager.reset();
|
|
||||||
_socketFactory.reset();
|
|
||||||
}
|
|
||||||
|
|
||||||
void NetworkManager::receiveSignalingData(const rtc::CopyOnWriteBuffer &data) {
|
|
||||||
rtc::ByteBufferReader reader((const char *)data.data(), data.size());
|
|
||||||
uint32_t candidateCount = 0;
|
|
||||||
if (!reader.ReadUInt32(&candidateCount)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
std::vector<std::string> candidates;
|
|
||||||
for (uint32_t i = 0; i < candidateCount; i++) {
|
|
||||||
uint32_t candidateLength = 0;
|
|
||||||
if (!reader.ReadUInt32(&candidateLength)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
std::string candidate;
|
|
||||||
if (!reader.ReadString(&candidate, candidateLength)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
candidates.push_back(candidate);
|
|
||||||
}
|
|
||||||
|
|
||||||
for (auto &serializedCandidate : candidates) {
|
|
||||||
webrtc::JsepIceCandidate parseCandidate("", 0);
|
|
||||||
if (parseCandidate.Initialize(serializedCandidate, nullptr)) {
|
|
||||||
auto parsedCandidate = parseCandidate.candidate();
|
|
||||||
_transportChannel->AddRemoteCandidate(parsedCandidate);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void NetworkManager::sendPacket(const rtc::CopyOnWriteBuffer &packet) {
|
|
||||||
auto encryptedPacket = encryptPacket(packet, _encryptionKey);
|
|
||||||
if (encryptedPacket.has_value()) {
|
|
||||||
rtc::PacketOptions packetOptions;
|
|
||||||
_transportChannel->SendPacket((const char *)encryptedPacket->data(), encryptedPacket->size(), packetOptions, 0);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void NetworkManager::candidateGathered(cricket::IceTransportInternal *transport, const cricket::Candidate &candidate) {
|
|
||||||
assert(_thread->IsCurrent());
|
|
||||||
webrtc::JsepIceCandidate iceCandidate("", 0);
|
|
||||||
iceCandidate.SetCandidate(candidate);
|
|
||||||
std::string serializedCandidate;
|
|
||||||
if (!iceCandidate.ToString(&serializedCandidate)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
std::vector<std::string> candidates;
|
|
||||||
candidates.push_back(serializedCandidate);
|
|
||||||
|
|
||||||
rtc::ByteBufferWriter writer;
|
|
||||||
writer.WriteUInt32((uint32_t)candidates.size());
|
|
||||||
for (auto string : candidates) {
|
|
||||||
writer.WriteUInt32((uint32_t)string.size());
|
|
||||||
writer.WriteString(string);
|
|
||||||
}
|
|
||||||
std::vector<uint8_t> data;
|
|
||||||
data.resize(writer.Length());
|
|
||||||
memcpy(data.data(), writer.Data(), writer.Length());
|
|
||||||
_signalingDataEmitted(data);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NetworkManager::candidateGatheringState(cricket::IceTransportInternal *transport) {
|
|
||||||
assert(_thread->IsCurrent());
|
|
||||||
}
|
|
||||||
|
|
||||||
void NetworkManager::transportStateChanged(cricket::IceTransportInternal *transport) {
|
|
||||||
assert(_thread->IsCurrent());
|
|
||||||
|
|
||||||
auto state = transport->GetIceTransportState();
|
|
||||||
bool isConnected = false;
|
|
||||||
switch (state) {
|
|
||||||
case webrtc::IceTransportState::kConnected:
|
|
||||||
case webrtc::IceTransportState::kCompleted:
|
|
||||||
isConnected = true;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
NetworkManager::State emitState;
|
|
||||||
emitState.isReadyToSendData = isConnected;
|
|
||||||
_stateUpdated(emitState);
|
|
||||||
}
|
|
||||||
|
|
||||||
void NetworkManager::transportReadyToSend(cricket::IceTransportInternal *transport) {
|
|
||||||
assert(_thread->IsCurrent());
|
|
||||||
}
|
|
||||||
|
|
||||||
void NetworkManager::transportPacketReceived(rtc::PacketTransportInternal *transport, const char *bytes, size_t size, const int64_t ×tamp, int unused) {
|
|
||||||
assert(_thread->IsCurrent());
|
|
||||||
rtc::CopyOnWriteBuffer packet;
|
|
||||||
packet.AppendData(bytes, size);
|
|
||||||
|
|
||||||
auto decryptedPacket = decryptPacket(packet, _encryptionKey);
|
|
||||||
if (decryptedPacket.has_value()) {
|
|
||||||
_packetReceived(decryptedPacket.value());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -1,79 +0,0 @@
|
|||||||
#ifndef TGVOIP_WEBRTC_NETWORK_MANAGER_H
|
|
||||||
#define TGVOIP_WEBRTC_NETWORK_MANAGER_H
|
|
||||||
|
|
||||||
#include "rtc_base/thread.h"
|
|
||||||
|
|
||||||
#include <functional>
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
#include "rtc_base/copy_on_write_buffer.h"
|
|
||||||
#include "api/candidate.h"
|
|
||||||
#include "TgVoip.h"
|
|
||||||
|
|
||||||
namespace rtc {
|
|
||||||
class BasicPacketSocketFactory;
|
|
||||||
class BasicNetworkManager;
|
|
||||||
class PacketTransportInternal;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace cricket {
|
|
||||||
class BasicPortAllocator;
|
|
||||||
class P2PTransportChannel;
|
|
||||||
class IceTransportInternal;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace webrtc {
|
|
||||||
class BasicAsyncResolverFactory;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
namespace TGVOIP_NAMESPACE {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
class NetworkManager: public sigslot::has_slots<> {
|
|
||||||
public:
|
|
||||||
struct State {
|
|
||||||
bool isReadyToSendData;
|
|
||||||
};
|
|
||||||
|
|
||||||
public:
|
|
||||||
NetworkManager(
|
|
||||||
rtc::Thread *thread,
|
|
||||||
TgVoipEncryptionKey encryptionKey,
|
|
||||||
bool enableP2P,
|
|
||||||
std::vector<TgVoipRtcServer> const &rtcServers,
|
|
||||||
std::function<void (const NetworkManager::State &)> stateUpdated,
|
|
||||||
std::function<void (const rtc::CopyOnWriteBuffer &)> packetReceived,
|
|
||||||
std::function<void (const std::vector<uint8_t> &)> signalingDataEmitted
|
|
||||||
);
|
|
||||||
~NetworkManager();
|
|
||||||
|
|
||||||
void receiveSignalingData(const rtc::CopyOnWriteBuffer &data);
|
|
||||||
void sendPacket(const rtc::CopyOnWriteBuffer &packet);
|
|
||||||
|
|
||||||
private:
|
|
||||||
rtc::Thread *_thread;
|
|
||||||
TgVoipEncryptionKey _encryptionKey;
|
|
||||||
std::function<void (const NetworkManager::State &)> _stateUpdated;
|
|
||||||
std::function<void (const rtc::CopyOnWriteBuffer &)> _packetReceived;
|
|
||||||
std::function<void (const std::vector<uint8_t> &)> _signalingDataEmitted;
|
|
||||||
|
|
||||||
std::unique_ptr<rtc::BasicPacketSocketFactory> _socketFactory;
|
|
||||||
std::unique_ptr<rtc::BasicNetworkManager> _networkManager;
|
|
||||||
std::unique_ptr<cricket::BasicPortAllocator> _portAllocator;
|
|
||||||
std::unique_ptr<webrtc::BasicAsyncResolverFactory> _asyncResolverFactory;
|
|
||||||
std::unique_ptr<cricket::P2PTransportChannel> _transportChannel;
|
|
||||||
|
|
||||||
private:
|
|
||||||
void candidateGathered(cricket::IceTransportInternal *transport, const cricket::Candidate &candidate);
|
|
||||||
void candidateGatheringState(cricket::IceTransportInternal *transport);
|
|
||||||
void transportStateChanged(cricket::IceTransportInternal *transport);
|
|
||||||
void transportReadyToSend(cricket::IceTransportInternal *transport);
|
|
||||||
void transportPacketReceived(rtc::PacketTransportInternal *transport, const char *bytes, size_t size, const int64_t ×tamp, int unused);
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
@ -1,201 +0,0 @@
|
|||||||
#ifndef __TGVOIP_H
|
|
||||||
#define __TGVOIP_H
|
|
||||||
|
|
||||||
#include <functional>
|
|
||||||
#include <vector>
|
|
||||||
#include <string>
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
namespace rtc {
|
|
||||||
template <typename VideoFrameT>
|
|
||||||
class VideoSinkInterface;
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace webrtc {
|
|
||||||
class VideoFrame;
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
namespace TGVOIP_NAMESPACE {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
struct TgVoipProxy {
|
|
||||||
std::string host;
|
|
||||||
uint16_t port;
|
|
||||||
std::string login;
|
|
||||||
std::string password;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TgVoipRtcServer {
|
|
||||||
std::string host;
|
|
||||||
uint16_t port;
|
|
||||||
std::string login;
|
|
||||||
std::string password;
|
|
||||||
bool isTurn;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum class TgVoipEndpointType {
|
|
||||||
Inet,
|
|
||||||
Lan,
|
|
||||||
UdpRelay,
|
|
||||||
TcpRelay
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TgVoipEdpointHost {
|
|
||||||
std::string ipv4;
|
|
||||||
std::string ipv6;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TgVoipEndpoint {
|
|
||||||
int64_t endpointId;
|
|
||||||
TgVoipEdpointHost host;
|
|
||||||
uint16_t port;
|
|
||||||
TgVoipEndpointType type;
|
|
||||||
unsigned char peerTag[16];
|
|
||||||
};
|
|
||||||
|
|
||||||
enum class TgVoipNetworkType {
|
|
||||||
Unknown,
|
|
||||||
Gprs,
|
|
||||||
Edge,
|
|
||||||
ThirdGeneration,
|
|
||||||
Hspa,
|
|
||||||
Lte,
|
|
||||||
WiFi,
|
|
||||||
Ethernet,
|
|
||||||
OtherHighSpeed,
|
|
||||||
OtherLowSpeed,
|
|
||||||
OtherMobile,
|
|
||||||
Dialup
|
|
||||||
};
|
|
||||||
|
|
||||||
enum class TgVoipDataSaving {
|
|
||||||
Never,
|
|
||||||
Mobile,
|
|
||||||
Always
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TgVoipPersistentState {
|
|
||||||
std::vector<uint8_t> value;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TgVoipConfig {
|
|
||||||
double initializationTimeout;
|
|
||||||
double receiveTimeout;
|
|
||||||
TgVoipDataSaving dataSaving;
|
|
||||||
bool enableP2P;
|
|
||||||
bool enableAEC;
|
|
||||||
bool enableNS;
|
|
||||||
bool enableAGC;
|
|
||||||
bool enableCallUpgrade;
|
|
||||||
#ifndef _WIN32
|
|
||||||
std::string logPath;
|
|
||||||
#else
|
|
||||||
std::wstring logPath;
|
|
||||||
#endif
|
|
||||||
int maxApiLayer;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TgVoipEncryptionKey {
|
|
||||||
std::vector<uint8_t> value;
|
|
||||||
bool isOutgoing;
|
|
||||||
};
|
|
||||||
|
|
||||||
enum class TgVoipState {
|
|
||||||
WaitInit,
|
|
||||||
WaitInitAck,
|
|
||||||
Estabilished,
|
|
||||||
Failed,
|
|
||||||
Reconnecting
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TgVoipTrafficStats {
|
|
||||||
uint64_t bytesSentWifi;
|
|
||||||
uint64_t bytesReceivedWifi;
|
|
||||||
uint64_t bytesSentMobile;
|
|
||||||
uint64_t bytesReceivedMobile;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TgVoipFinalState {
|
|
||||||
TgVoipPersistentState persistentState;
|
|
||||||
std::string debugLog;
|
|
||||||
TgVoipTrafficStats trafficStats;
|
|
||||||
bool isRatingSuggested;
|
|
||||||
};
|
|
||||||
|
|
||||||
struct TgVoipAudioDataCallbacks {
|
|
||||||
std::function<void(int16_t*, size_t)> input;
|
|
||||||
std::function<void(int16_t*, size_t)> output;
|
|
||||||
std::function<void(int16_t*, size_t)> preprocessed;
|
|
||||||
};
|
|
||||||
|
|
||||||
class TgVoipVideoCaptureInterface {
|
|
||||||
protected:
|
|
||||||
TgVoipVideoCaptureInterface() = default;
|
|
||||||
public:
|
|
||||||
static std::shared_ptr<TgVoipVideoCaptureInterface> makeInstance();
|
|
||||||
|
|
||||||
virtual ~TgVoipVideoCaptureInterface();
|
|
||||||
|
|
||||||
virtual void switchCamera() = 0;
|
|
||||||
virtual void setIsVideoEnabled(bool isVideoEnabled) = 0;
|
|
||||||
virtual void setVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
class TgVoip {
|
|
||||||
protected:
|
|
||||||
TgVoip() = default;
|
|
||||||
|
|
||||||
public:
|
|
||||||
enum class VideoState {
|
|
||||||
possible,
|
|
||||||
outgoingRequested,
|
|
||||||
incomingRequested,
|
|
||||||
active
|
|
||||||
};
|
|
||||||
|
|
||||||
static void setLoggingFunction(std::function<void(std::string const &)> loggingFunction);
|
|
||||||
static void setGlobalServerConfig(std::string const &serverConfig);
|
|
||||||
static int getConnectionMaxLayer();
|
|
||||||
static std::string getVersion();
|
|
||||||
static TgVoip *makeInstance(
|
|
||||||
TgVoipConfig const &config,
|
|
||||||
TgVoipPersistentState const &persistentState,
|
|
||||||
std::vector<TgVoipEndpoint> const &endpoints,
|
|
||||||
std::unique_ptr<TgVoipProxy> const &proxy,
|
|
||||||
std::vector<TgVoipRtcServer> const &rtcServers,
|
|
||||||
TgVoipNetworkType initialNetworkType,
|
|
||||||
TgVoipEncryptionKey const &encryptionKey,
|
|
||||||
std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture,
|
|
||||||
std::function<void(TgVoipState, VideoState)> stateUpdated,
|
|
||||||
std::function<void(bool)> remoteVideoIsActiveUpdated,
|
|
||||||
std::function<void(const std::vector<uint8_t> &)> signalingDataEmitted
|
|
||||||
);
|
|
||||||
|
|
||||||
virtual ~TgVoip();
|
|
||||||
|
|
||||||
virtual void setNetworkType(TgVoipNetworkType networkType) = 0;
|
|
||||||
virtual void setMuteMicrophone(bool muteMicrophone) = 0;
|
|
||||||
virtual void setAudioOutputGainControlEnabled(bool enabled) = 0;
|
|
||||||
virtual void setEchoCancellationStrength(int strength) = 0;
|
|
||||||
|
|
||||||
virtual void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) = 0;
|
|
||||||
|
|
||||||
virtual std::string getLastError() = 0;
|
|
||||||
virtual std::string getDebugInfo() = 0;
|
|
||||||
virtual int64_t getPreferredRelayId() = 0;
|
|
||||||
virtual TgVoipTrafficStats getTrafficStats() = 0;
|
|
||||||
virtual TgVoipPersistentState getPersistentState() = 0;
|
|
||||||
|
|
||||||
virtual void receiveSignalingData(const std::vector<uint8_t> &data) = 0;
|
|
||||||
virtual void requestVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture) = 0;
|
|
||||||
virtual void acceptVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture) = 0;
|
|
||||||
|
|
||||||
virtual TgVoipFinalState stop() = 0;
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
@ -1,408 +0,0 @@
|
|||||||
#include <mutex>
|
|
||||||
|
|
||||||
#include "TgVoip.h"
|
|
||||||
|
|
||||||
#include "rtc_base/logging.h"
|
|
||||||
|
|
||||||
#include "Manager.h"
|
|
||||||
#include "MediaManager.h"
|
|
||||||
|
|
||||||
#include <stdarg.h>
|
|
||||||
#include <iostream>
|
|
||||||
|
|
||||||
#include "VideoCaptureInterfaceImpl.h"
|
|
||||||
|
|
||||||
#if TARGET_OS_IPHONE || TARGET_OS_OSX
|
|
||||||
|
|
||||||
#include "CodecsApple.h"
|
|
||||||
|
|
||||||
#else
|
|
||||||
#error "Unsupported platform"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#import <Foundation/Foundation.h>
|
|
||||||
|
|
||||||
#include <sys/time.h>
|
|
||||||
|
|
||||||
#ifndef TGVOIP_USE_CUSTOM_CRYPTO
|
|
||||||
/*extern "C" {
|
|
||||||
#include <openssl/sha.h>
|
|
||||||
#include <openssl/aes.h>
|
|
||||||
#include <openssl/modes.h>
|
|
||||||
#include <openssl/rand.h>
|
|
||||||
#include <openssl/crypto.h>
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tgvoip_openssl_aes_ige_encrypt(uint8_t* in, uint8_t* out, size_t length, uint8_t* key, uint8_t* iv){
|
|
||||||
AES_KEY akey;
|
|
||||||
AES_set_encrypt_key(key, 32*8, &akey);
|
|
||||||
AES_ige_encrypt(in, out, length, &akey, iv, AES_ENCRYPT);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tgvoip_openssl_aes_ige_decrypt(uint8_t* in, uint8_t* out, size_t length, uint8_t* key, uint8_t* iv){
|
|
||||||
AES_KEY akey;
|
|
||||||
AES_set_decrypt_key(key, 32*8, &akey);
|
|
||||||
AES_ige_encrypt(in, out, length, &akey, iv, AES_DECRYPT);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tgvoip_openssl_rand_bytes(uint8_t* buffer, size_t len){
|
|
||||||
RAND_bytes(buffer, (int)len);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tgvoip_openssl_sha1(uint8_t* msg, size_t len, uint8_t* output){
|
|
||||||
SHA1(msg, len, output);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tgvoip_openssl_sha256(uint8_t* msg, size_t len, uint8_t* output){
|
|
||||||
SHA256(msg, len, output);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tgvoip_openssl_aes_ctr_encrypt(uint8_t* inout, size_t length, uint8_t* key, uint8_t* iv, uint8_t* ecount, uint32_t* num){
|
|
||||||
AES_KEY akey;
|
|
||||||
AES_set_encrypt_key(key, 32*8, &akey);
|
|
||||||
CRYPTO_ctr128_encrypt(inout, inout, length, &akey, iv, ecount, num, (block128_f) AES_encrypt);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tgvoip_openssl_aes_cbc_encrypt(uint8_t* in, uint8_t* out, size_t length, uint8_t* key, uint8_t* iv){
|
|
||||||
AES_KEY akey;
|
|
||||||
AES_set_encrypt_key(key, 256, &akey);
|
|
||||||
AES_cbc_encrypt(in, out, length, &akey, iv, AES_ENCRYPT);
|
|
||||||
}
|
|
||||||
|
|
||||||
static void tgvoip_openssl_aes_cbc_decrypt(uint8_t* in, uint8_t* out, size_t length, uint8_t* key, uint8_t* iv){
|
|
||||||
AES_KEY akey;
|
|
||||||
AES_set_decrypt_key(key, 256, &akey);
|
|
||||||
AES_cbc_encrypt(in, out, length, &akey, iv, AES_DECRYPT);
|
|
||||||
}
|
|
||||||
|
|
||||||
CryptoFunctions Layer92::crypto={
|
|
||||||
tgvoip_openssl_rand_bytes,
|
|
||||||
tgvoip_openssl_sha1,
|
|
||||||
tgvoip_openssl_sha256,
|
|
||||||
tgvoip_openssl_aes_ige_encrypt,
|
|
||||||
tgvoip_openssl_aes_ige_decrypt,
|
|
||||||
tgvoip_openssl_aes_ctr_encrypt,
|
|
||||||
tgvoip_openssl_aes_cbc_encrypt,
|
|
||||||
tgvoip_openssl_aes_cbc_decrypt
|
|
||||||
};*/
|
|
||||||
#endif
|
|
||||||
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
namespace TGVOIP_NAMESPACE {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
class LogSinkImpl : public rtc::LogSink {
|
|
||||||
public:
|
|
||||||
LogSinkImpl() {
|
|
||||||
}
|
|
||||||
virtual ~LogSinkImpl() {
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void OnLogMessage(const std::string &msg, rtc::LoggingSeverity severity, const char *tag) override {
|
|
||||||
OnLogMessage(std::string(tag) + ": " + msg);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void OnLogMessage(const std::string &message, rtc::LoggingSeverity severity) override {
|
|
||||||
OnLogMessage(message);
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void OnLogMessage(const std::string &message) override {
|
|
||||||
time_t rawTime;
|
|
||||||
time(&rawTime);
|
|
||||||
struct tm timeinfo;
|
|
||||||
localtime_r(&rawTime, &timeinfo);
|
|
||||||
|
|
||||||
timeval curTime;
|
|
||||||
gettimeofday(&curTime, nullptr);
|
|
||||||
int32_t milliseconds = curTime.tv_usec / 1000;
|
|
||||||
|
|
||||||
_data << (timeinfo.tm_year + 1900);
|
|
||||||
_data << "-" << (timeinfo.tm_mon + 1);
|
|
||||||
_data << "-" << (timeinfo.tm_mday);
|
|
||||||
_data << " " << timeinfo.tm_hour;
|
|
||||||
_data << ":" << timeinfo.tm_min;
|
|
||||||
_data << ":" << timeinfo.tm_sec;
|
|
||||||
_data << ":" << milliseconds;
|
|
||||||
_data << " " << message;
|
|
||||||
}
|
|
||||||
|
|
||||||
public:
|
|
||||||
std::ostringstream _data;
|
|
||||||
};
|
|
||||||
|
|
||||||
static rtc::Thread *makeManagerThread() {
|
|
||||||
static std::unique_ptr<rtc::Thread> value = rtc::Thread::Create();
|
|
||||||
value->SetName("WebRTC-Manager", nullptr);
|
|
||||||
value->Start();
|
|
||||||
return value.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
static rtc::Thread *getManagerThread() {
|
|
||||||
static rtc::Thread *value = makeManagerThread();
|
|
||||||
return value;
|
|
||||||
}
|
|
||||||
|
|
||||||
class TgVoipImpl : public TgVoip, public sigslot::has_slots<> {
|
|
||||||
public:
|
|
||||||
TgVoipImpl(
|
|
||||||
std::vector<TgVoipEndpoint> const &endpoints,
|
|
||||||
TgVoipPersistentState const &persistentState,
|
|
||||||
std::unique_ptr<TgVoipProxy> const &proxy,
|
|
||||||
std::vector<TgVoipRtcServer> const &rtcServers,
|
|
||||||
TgVoipConfig const &config,
|
|
||||||
TgVoipEncryptionKey const &encryptionKey,
|
|
||||||
std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture,
|
|
||||||
TgVoipNetworkType initialNetworkType,
|
|
||||||
std::function<void(TgVoipState, TgVoip::VideoState)> stateUpdated,
|
|
||||||
std::function<void(bool)> remoteVideoIsActiveUpdated,
|
|
||||||
std::function<void(const std::vector<uint8_t> &)> signalingDataEmitted
|
|
||||||
) :
|
|
||||||
_stateUpdated(stateUpdated),
|
|
||||||
_signalingDataEmitted(signalingDataEmitted) {
|
|
||||||
static dispatch_once_t onceToken;
|
|
||||||
dispatch_once(&onceToken, ^{
|
|
||||||
rtc::LogMessage::LogToDebug(rtc::LS_INFO);
|
|
||||||
rtc::LogMessage::SetLogToStderr(true);
|
|
||||||
});
|
|
||||||
rtc::LogMessage::AddLogToStream(&_logSink, rtc::LS_INFO);
|
|
||||||
|
|
||||||
bool enableP2P = config.enableP2P;
|
|
||||||
|
|
||||||
_manager.reset(new ThreadLocalObject<Manager>(getManagerThread(), [encryptionKey = encryptionKey, enableP2P = enableP2P, stateUpdated, remoteVideoIsActiveUpdated, signalingDataEmitted, rtcServers, videoCapture](){
|
|
||||||
return new Manager(
|
|
||||||
getManagerThread(),
|
|
||||||
encryptionKey,
|
|
||||||
enableP2P,
|
|
||||||
rtcServers,
|
|
||||||
videoCapture,
|
|
||||||
[stateUpdated](const TgVoipState &state, Manager::VideoState videoState) {
|
|
||||||
TgVoip::VideoState mappedVideoState;
|
|
||||||
switch (videoState) {
|
|
||||||
case Manager::VideoState::possible:
|
|
||||||
mappedVideoState = TgVoip::VideoState::possible;
|
|
||||||
break;
|
|
||||||
case Manager::VideoState::outgoingRequested:
|
|
||||||
mappedVideoState = TgVoip::VideoState::outgoingRequested;
|
|
||||||
break;
|
|
||||||
case Manager::VideoState::incomingRequested:
|
|
||||||
mappedVideoState = TgVoip::VideoState::incomingRequested;
|
|
||||||
break;
|
|
||||||
case Manager::VideoState::active:
|
|
||||||
mappedVideoState = TgVoip::VideoState::active;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
stateUpdated(state, mappedVideoState);
|
|
||||||
},
|
|
||||||
[remoteVideoIsActiveUpdated](bool isActive) {
|
|
||||||
remoteVideoIsActiveUpdated(isActive);
|
|
||||||
},
|
|
||||||
[signalingDataEmitted](const std::vector<uint8_t> &data) {
|
|
||||||
signalingDataEmitted(data);
|
|
||||||
}
|
|
||||||
);
|
|
||||||
}));
|
|
||||||
_manager->perform([](Manager *manager) {
|
|
||||||
manager->start();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
~TgVoipImpl() override {
|
|
||||||
rtc::LogMessage::RemoveLogToStream(&_logSink);
|
|
||||||
}
|
|
||||||
|
|
||||||
void receiveSignalingData(const std::vector<uint8_t> &data) override {
|
|
||||||
_manager->perform([data](Manager *manager) {
|
|
||||||
manager->receiveSignalingData(data);
|
|
||||||
});
|
|
||||||
};
|
|
||||||
|
|
||||||
virtual void requestVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture) override {
|
|
||||||
_manager->perform([videoCapture](Manager *manager) {
|
|
||||||
manager->requestVideo(videoCapture);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
virtual void acceptVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture) override {
|
|
||||||
_manager->perform([videoCapture](Manager *manager) {
|
|
||||||
manager->acceptVideo(videoCapture);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void setNetworkType(TgVoipNetworkType networkType) override {
|
|
||||||
/*message::NetworkType mappedType;
|
|
||||||
|
|
||||||
switch (networkType) {
|
|
||||||
case TgVoipNetworkType::Unknown:
|
|
||||||
mappedType = message::NetworkType::nUnknown;
|
|
||||||
break;
|
|
||||||
case TgVoipNetworkType::Gprs:
|
|
||||||
mappedType = message::NetworkType::nGprs;
|
|
||||||
break;
|
|
||||||
case TgVoipNetworkType::Edge:
|
|
||||||
mappedType = message::NetworkType::nEdge;
|
|
||||||
break;
|
|
||||||
case TgVoipNetworkType::ThirdGeneration:
|
|
||||||
mappedType = message::NetworkType::n3gOrAbove;
|
|
||||||
break;
|
|
||||||
case TgVoipNetworkType::Hspa:
|
|
||||||
mappedType = message::NetworkType::n3gOrAbove;
|
|
||||||
break;
|
|
||||||
case TgVoipNetworkType::Lte:
|
|
||||||
mappedType = message::NetworkType::n3gOrAbove;
|
|
||||||
break;
|
|
||||||
case TgVoipNetworkType::WiFi:
|
|
||||||
mappedType = message::NetworkType::nHighSpeed;
|
|
||||||
break;
|
|
||||||
case TgVoipNetworkType::Ethernet:
|
|
||||||
mappedType = message::NetworkType::nHighSpeed;
|
|
||||||
break;
|
|
||||||
case TgVoipNetworkType::OtherHighSpeed:
|
|
||||||
mappedType = message::NetworkType::nHighSpeed;
|
|
||||||
break;
|
|
||||||
case TgVoipNetworkType::OtherLowSpeed:
|
|
||||||
mappedType = message::NetworkType::nEdge;
|
|
||||||
break;
|
|
||||||
case TgVoipNetworkType::OtherMobile:
|
|
||||||
mappedType = message::NetworkType::n3gOrAbove;
|
|
||||||
break;
|
|
||||||
case TgVoipNetworkType::Dialup:
|
|
||||||
mappedType = message::NetworkType::nGprs;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
mappedType = message::NetworkType::nUnknown;
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
controller_->SetNetworkType(mappedType);*/
|
|
||||||
}
|
|
||||||
|
|
||||||
void setMuteMicrophone(bool muteMicrophone) override {
|
|
||||||
_manager->perform([muteMicrophone](Manager *manager) {
|
|
||||||
manager->setMuteOutgoingAudio(muteMicrophone);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) override {
|
|
||||||
_manager->perform([sink](Manager *manager) {
|
|
||||||
manager->setIncomingVideoOutput(sink);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void setAudioOutputGainControlEnabled(bool enabled) override {
|
|
||||||
}
|
|
||||||
|
|
||||||
void setEchoCancellationStrength(int strength) override {
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string getLastError() override {
|
|
||||||
return ""; // TODO: not implemented
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string getDebugInfo() override {
|
|
||||||
return ""; // TODO: not implemented
|
|
||||||
}
|
|
||||||
|
|
||||||
int64_t getPreferredRelayId() override {
|
|
||||||
return 0; // we don't have endpoint ids
|
|
||||||
}
|
|
||||||
|
|
||||||
TgVoipTrafficStats getTrafficStats() override {
|
|
||||||
return TgVoipTrafficStats{}; // TODO: not implemented
|
|
||||||
}
|
|
||||||
|
|
||||||
TgVoipPersistentState getPersistentState() override {
|
|
||||||
return TgVoipPersistentState{}; // we dont't have such information
|
|
||||||
}
|
|
||||||
|
|
||||||
TgVoipFinalState stop() override {
|
|
||||||
TgVoipFinalState finalState;
|
|
||||||
finalState.debugLog = _logSink._data.str();
|
|
||||||
finalState.isRatingSuggested = false;
|
|
||||||
|
|
||||||
return finalState;
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
std::unique_ptr<ThreadLocalObject<Manager>> _manager;
|
|
||||||
std::function<void(TgVoipState, TgVoip::VideoState)> _stateUpdated;
|
|
||||||
std::function<void(const std::vector<uint8_t> &)> _signalingDataEmitted;
|
|
||||||
|
|
||||||
LogSinkImpl _logSink;
|
|
||||||
};
|
|
||||||
|
|
||||||
std::function<void(std::string const &)> globalLoggingFunction;
|
|
||||||
|
|
||||||
void __tgvoip_call_tglog(const char *format, ...){
|
|
||||||
va_list vaArgs;
|
|
||||||
va_start(vaArgs, format);
|
|
||||||
|
|
||||||
va_list vaCopy;
|
|
||||||
va_copy(vaCopy, vaArgs);
|
|
||||||
const int length = std::vsnprintf(nullptr, 0, format, vaCopy);
|
|
||||||
va_end(vaCopy);
|
|
||||||
|
|
||||||
std::vector<char> zc(length + 1);
|
|
||||||
std::vsnprintf(zc.data(), zc.size(), format, vaArgs);
|
|
||||||
va_end(vaArgs);
|
|
||||||
|
|
||||||
if (globalLoggingFunction != nullptr) {
|
|
||||||
globalLoggingFunction(std::string(zc.data(), zc.size()));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void TgVoip::setLoggingFunction(std::function<void(std::string const &)> loggingFunction) {
|
|
||||||
globalLoggingFunction = loggingFunction;
|
|
||||||
}
|
|
||||||
|
|
||||||
void TgVoip::setGlobalServerConfig(const std::string &serverConfig) {
|
|
||||||
}
|
|
||||||
|
|
||||||
int TgVoip::getConnectionMaxLayer() {
|
|
||||||
return 92;
|
|
||||||
}
|
|
||||||
|
|
||||||
std::string TgVoip::getVersion() {
|
|
||||||
return "";
|
|
||||||
}
|
|
||||||
|
|
||||||
TgVoip *TgVoip::makeInstance(
|
|
||||||
TgVoipConfig const &config,
|
|
||||||
TgVoipPersistentState const &persistentState,
|
|
||||||
std::vector<TgVoipEndpoint> const &endpoints,
|
|
||||||
std::unique_ptr<TgVoipProxy> const &proxy,
|
|
||||||
std::vector<TgVoipRtcServer> const &rtcServers,
|
|
||||||
TgVoipNetworkType initialNetworkType,
|
|
||||||
TgVoipEncryptionKey const &encryptionKey,
|
|
||||||
std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture,
|
|
||||||
std::function<void(TgVoipState, TgVoip::VideoState)> stateUpdated,
|
|
||||||
std::function<void(bool)> remoteVideoIsActiveUpdated,
|
|
||||||
std::function<void(const std::vector<uint8_t> &)> signalingDataEmitted
|
|
||||||
) {
|
|
||||||
return new TgVoipImpl(
|
|
||||||
endpoints,
|
|
||||||
persistentState,
|
|
||||||
proxy,
|
|
||||||
rtcServers,
|
|
||||||
config,
|
|
||||||
encryptionKey,
|
|
||||||
videoCapture,
|
|
||||||
initialNetworkType,
|
|
||||||
stateUpdated,
|
|
||||||
remoteVideoIsActiveUpdated,
|
|
||||||
signalingDataEmitted
|
|
||||||
);
|
|
||||||
}
|
|
||||||
|
|
||||||
TgVoip::~TgVoip() = default;
|
|
||||||
|
|
||||||
std::shared_ptr<TgVoipVideoCaptureInterface>TgVoipVideoCaptureInterface::makeInstance() {
|
|
||||||
return std::shared_ptr<TgVoipVideoCaptureInterface>(new TgVoipVideoCaptureInterfaceImpl());
|
|
||||||
}
|
|
||||||
|
|
||||||
TgVoipVideoCaptureInterface::~TgVoipVideoCaptureInterface() = default;
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
}
|
|
||||||
#endif
|
|
@ -1 +0,0 @@
|
|||||||
|
|
@ -1,61 +0,0 @@
|
|||||||
#ifndef TGVOIP_WEBRTC_THREAD_LOCAL_OBJECT_H
|
|
||||||
#define TGVOIP_WEBRTC_THREAD_LOCAL_OBJECT_H
|
|
||||||
|
|
||||||
#include "rtc_base/thread.h"
|
|
||||||
|
|
||||||
#include <functional>
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
namespace TGVOIP_NAMESPACE {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
template<class T>
|
|
||||||
class ThreadLocalObject {
|
|
||||||
private:
|
|
||||||
template<class TV>
|
|
||||||
class ValueHolder {
|
|
||||||
public:
|
|
||||||
std::shared_ptr<TV> _value;
|
|
||||||
};
|
|
||||||
|
|
||||||
public:
|
|
||||||
ThreadLocalObject(rtc::Thread *thread, std::function<T *()> generator) :
|
|
||||||
_thread(thread),
|
|
||||||
_valueHolder(new ThreadLocalObject::ValueHolder<T>()) {
|
|
||||||
assert(_thread != nullptr);
|
|
||||||
_thread->PostTask(RTC_FROM_HERE, [valueHolder = _valueHolder, generator](){
|
|
||||||
valueHolder->_value.reset(generator());
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
~ThreadLocalObject() {
|
|
||||||
_thread->PostTask(RTC_FROM_HERE, [valueHolder = _valueHolder](){
|
|
||||||
valueHolder->_value.reset();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
template <class FunctorT>
|
|
||||||
void perform(FunctorT&& functor) {
|
|
||||||
_thread->PostTask(RTC_FROM_HERE, [valueHolder = _valueHolder, f = std::forward<FunctorT>(functor)](){
|
|
||||||
assert(valueHolder->_value != nullptr);
|
|
||||||
f(valueHolder->_value.get());
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
T *getSyncAssumingSameThread() {
|
|
||||||
assert(_thread->IsCurrent());
|
|
||||||
assert(_valueHolder->_value != nullptr);
|
|
||||||
return _valueHolder->_value.get();
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
rtc::Thread *_thread;
|
|
||||||
std::shared_ptr<ValueHolder<T>> _valueHolder;
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
@ -1,24 +0,0 @@
|
|||||||
#ifndef VIDEOCAMERACAPTURER_H
|
|
||||||
#define VIDEOCAMERACAPTURER_H
|
|
||||||
|
|
||||||
#import <Foundation/Foundation.h>
|
|
||||||
#import <AVFoundation/AVFoundation.h>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
#include "api/scoped_refptr.h"
|
|
||||||
#include "api/media_stream_interface.h"
|
|
||||||
|
|
||||||
@interface VideoCameraCapturer : NSObject
|
|
||||||
|
|
||||||
+ (NSArray<AVCaptureDevice *> *)captureDevices;
|
|
||||||
+ (NSArray<AVCaptureDeviceFormat *> *)supportedFormatsForDevice:(AVCaptureDevice *)device;
|
|
||||||
|
|
||||||
- (instancetype)initWithSource:(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source isActiveUpdated:(void (^)(bool))isActiveUpdated;
|
|
||||||
|
|
||||||
- (void)startCaptureWithDevice:(AVCaptureDevice *)device format:(AVCaptureDeviceFormat *)format fps:(NSInteger)fps;
|
|
||||||
- (void)stopCapture;
|
|
||||||
- (void)setIsEnabled:(bool)isEnabled;
|
|
||||||
|
|
||||||
@end
|
|
||||||
|
|
||||||
#endif
|
|
@ -1,492 +0,0 @@
|
|||||||
#include "VideoCameraCapturer.h"
|
|
||||||
|
|
||||||
#import <AVFoundation/AVFoundation.h>
|
|
||||||
|
|
||||||
#import "base/RTCLogging.h"
|
|
||||||
#import "base/RTCVideoFrameBuffer.h"
|
|
||||||
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
|
|
||||||
#import "sdk/objc/native/src/objc_video_track_source.h"
|
|
||||||
#import "api/video_track_source_proxy.h"
|
|
||||||
|
|
||||||
#import "helpers/UIDevice+RTCDevice.h"
|
|
||||||
|
|
||||||
#import "helpers/AVCaptureSession+DevicePosition.h"
|
|
||||||
#import "helpers/RTCDispatcher+Private.h"
|
|
||||||
#import "base/RTCVideoFrame.h"
|
|
||||||
|
|
||||||
static const int64_t kNanosecondsPerSecond = 1000000000;
|
|
||||||
|
|
||||||
static webrtc::ObjCVideoTrackSource *getObjCVideoSource(const rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> nativeSource) {
|
|
||||||
webrtc::VideoTrackSourceProxy *proxy_source =
|
|
||||||
static_cast<webrtc::VideoTrackSourceProxy *>(nativeSource.get());
|
|
||||||
return static_cast<webrtc::ObjCVideoTrackSource *>(proxy_source->internal());
|
|
||||||
}
|
|
||||||
|
|
||||||
@interface VideoCameraCapturer () <AVCaptureVideoDataOutputSampleBufferDelegate> {
|
|
||||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> _source;
|
|
||||||
|
|
||||||
dispatch_queue_t _frameQueue;
|
|
||||||
AVCaptureDevice *_currentDevice;
|
|
||||||
BOOL _hasRetriedOnFatalError;
|
|
||||||
BOOL _isRunning;
|
|
||||||
BOOL _willBeRunning;
|
|
||||||
|
|
||||||
AVCaptureVideoDataOutput *_videoDataOutput;
|
|
||||||
AVCaptureSession *_captureSession;
|
|
||||||
FourCharCode _preferredOutputPixelFormat;
|
|
||||||
FourCharCode _outputPixelFormat;
|
|
||||||
RTCVideoRotation _rotation;
|
|
||||||
UIDeviceOrientation _orientation;
|
|
||||||
|
|
||||||
void (^_isActiveUpdated)(bool);
|
|
||||||
bool _isActiveValue;
|
|
||||||
bool _inForegroundValue;
|
|
||||||
bool _isPaused;
|
|
||||||
}
|
|
||||||
|
|
||||||
@end
|
|
||||||
|
|
||||||
@implementation VideoCameraCapturer
|
|
||||||
|
|
||||||
- (instancetype)initWithSource:(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source isActiveUpdated:(void (^)(bool))isActiveUpdated {
|
|
||||||
self = [super init];
|
|
||||||
if (self != nil) {
|
|
||||||
_source = source;
|
|
||||||
_isActiveValue = true;
|
|
||||||
_inForegroundValue = true;
|
|
||||||
_isPaused = false;
|
|
||||||
_isActiveUpdated = [isActiveUpdated copy];
|
|
||||||
|
|
||||||
if (![self setupCaptureSession:[[AVCaptureSession alloc] init]]) {
|
|
||||||
return nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
NSNotificationCenter *center = [NSNotificationCenter defaultCenter];
|
|
||||||
_orientation = UIDeviceOrientationPortrait;
|
|
||||||
_rotation = RTCVideoRotation_90;
|
|
||||||
[center addObserver:self
|
|
||||||
selector:@selector(deviceOrientationDidChange:)
|
|
||||||
name:UIDeviceOrientationDidChangeNotification
|
|
||||||
object:nil];
|
|
||||||
[center addObserver:self
|
|
||||||
selector:@selector(handleCaptureSessionInterruption:)
|
|
||||||
name:AVCaptureSessionWasInterruptedNotification
|
|
||||||
object:_captureSession];
|
|
||||||
[center addObserver:self
|
|
||||||
selector:@selector(handleCaptureSessionInterruptionEnded:)
|
|
||||||
name:AVCaptureSessionInterruptionEndedNotification
|
|
||||||
object:_captureSession];
|
|
||||||
[center addObserver:self
|
|
||||||
selector:@selector(handleApplicationDidBecomeActive:)
|
|
||||||
name:UIApplicationDidBecomeActiveNotification
|
|
||||||
object:[UIApplication sharedApplication]];
|
|
||||||
[center addObserver:self
|
|
||||||
selector:@selector(handleCaptureSessionRuntimeError:)
|
|
||||||
name:AVCaptureSessionRuntimeErrorNotification
|
|
||||||
object:_captureSession];
|
|
||||||
[center addObserver:self
|
|
||||||
selector:@selector(handleCaptureSessionDidStartRunning:)
|
|
||||||
name:AVCaptureSessionDidStartRunningNotification
|
|
||||||
object:_captureSession];
|
|
||||||
[center addObserver:self
|
|
||||||
selector:@selector(handleCaptureSessionDidStopRunning:)
|
|
||||||
name:AVCaptureSessionDidStopRunningNotification
|
|
||||||
object:_captureSession];
|
|
||||||
}
|
|
||||||
return self;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)dealloc {
|
|
||||||
NSAssert(!_willBeRunning, @"Session was still running in RTCCameraVideoCapturer dealloc. Forgot to call stopCapture?");
|
|
||||||
[[NSNotificationCenter defaultCenter] removeObserver:self];
|
|
||||||
}
|
|
||||||
|
|
||||||
+ (NSArray<AVCaptureDevice *> *)captureDevices {
|
|
||||||
AVCaptureDeviceDiscoverySession *session = [AVCaptureDeviceDiscoverySession
|
|
||||||
discoverySessionWithDeviceTypes:@[ AVCaptureDeviceTypeBuiltInWideAngleCamera ]
|
|
||||||
mediaType:AVMediaTypeVideo
|
|
||||||
position:AVCaptureDevicePositionUnspecified];
|
|
||||||
return session.devices;
|
|
||||||
}
|
|
||||||
|
|
||||||
+ (NSArray<AVCaptureDeviceFormat *> *)supportedFormatsForDevice:(AVCaptureDevice *)device {
|
|
||||||
// Support opening the device in any format. We make sure it's converted to a format we
|
|
||||||
// can handle, if needed, in the method `-setupVideoDataOutput`.
|
|
||||||
return device.formats;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (FourCharCode)preferredOutputPixelFormat {
|
|
||||||
return _preferredOutputPixelFormat;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)startCaptureWithDevice:(AVCaptureDevice *)device
|
|
||||||
format:(AVCaptureDeviceFormat *)format
|
|
||||||
fps:(NSInteger)fps {
|
|
||||||
[self startCaptureWithDevice:device format:format fps:fps completionHandler:nil];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)stopCapture {
|
|
||||||
_isActiveUpdated = nil;
|
|
||||||
[self stopCaptureWithCompletionHandler:nil];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)setIsEnabled:(bool)isEnabled {
|
|
||||||
_isPaused = !isEnabled;
|
|
||||||
[self updateIsActiveValue];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)startCaptureWithDevice:(AVCaptureDevice *)device
|
|
||||||
format:(AVCaptureDeviceFormat *)format
|
|
||||||
fps:(NSInteger)fps
|
|
||||||
completionHandler:(nullable void (^)(NSError *))completionHandler {
|
|
||||||
_willBeRunning = YES;
|
|
||||||
[RTCDispatcher
|
|
||||||
dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
|
||||||
block:^{
|
|
||||||
RTCLogInfo("startCaptureWithDevice %@ @ %ld fps", format, (long)fps);
|
|
||||||
|
|
||||||
dispatch_async(dispatch_get_main_queue(), ^{
|
|
||||||
[[UIDevice currentDevice] beginGeneratingDeviceOrientationNotifications];
|
|
||||||
});
|
|
||||||
|
|
||||||
_currentDevice = device;
|
|
||||||
|
|
||||||
NSError *error = nil;
|
|
||||||
if (![_currentDevice lockForConfiguration:&error]) {
|
|
||||||
RTCLogError(@"Failed to lock device %@. Error: %@",
|
|
||||||
_currentDevice,
|
|
||||||
error.userInfo);
|
|
||||||
if (completionHandler) {
|
|
||||||
completionHandler(error);
|
|
||||||
}
|
|
||||||
_willBeRunning = NO;
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
[self reconfigureCaptureSessionInput];
|
|
||||||
[self updateOrientation];
|
|
||||||
[self updateDeviceCaptureFormat:format fps:fps];
|
|
||||||
[self updateVideoDataOutputPixelFormat:format];
|
|
||||||
[_captureSession startRunning];
|
|
||||||
[_currentDevice unlockForConfiguration];
|
|
||||||
_isRunning = YES;
|
|
||||||
if (completionHandler) {
|
|
||||||
completionHandler(nil);
|
|
||||||
}
|
|
||||||
}];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)stopCaptureWithCompletionHandler:(nullable void (^)(void))completionHandler {
|
|
||||||
_willBeRunning = NO;
|
|
||||||
[RTCDispatcher
|
|
||||||
dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
|
||||||
block:^{
|
|
||||||
RTCLogInfo("Stop");
|
|
||||||
_currentDevice = nil;
|
|
||||||
for (AVCaptureDeviceInput *oldInput in [_captureSession.inputs copy]) {
|
|
||||||
[_captureSession removeInput:oldInput];
|
|
||||||
}
|
|
||||||
[_captureSession stopRunning];
|
|
||||||
|
|
||||||
dispatch_async(dispatch_get_main_queue(), ^{
|
|
||||||
[[UIDevice currentDevice] endGeneratingDeviceOrientationNotifications];
|
|
||||||
});
|
|
||||||
_isRunning = NO;
|
|
||||||
if (completionHandler) {
|
|
||||||
completionHandler();
|
|
||||||
}
|
|
||||||
}];
|
|
||||||
}
|
|
||||||
|
|
||||||
#pragma mark iOS notifications
|
|
||||||
|
|
||||||
#if TARGET_OS_IPHONE
|
|
||||||
- (void)deviceOrientationDidChange:(NSNotification *)notification {
|
|
||||||
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
|
||||||
block:^{
|
|
||||||
[self updateOrientation];
|
|
||||||
}];
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#pragma mark AVCaptureVideoDataOutputSampleBufferDelegate
|
|
||||||
|
|
||||||
- (void)captureOutput:(AVCaptureOutput *)captureOutput
|
|
||||||
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
|
|
||||||
fromConnection:(AVCaptureConnection *)connection {
|
|
||||||
NSParameterAssert(captureOutput == _videoDataOutput);
|
|
||||||
|
|
||||||
if (CMSampleBufferGetNumSamples(sampleBuffer) != 1 || !CMSampleBufferIsValid(sampleBuffer) ||
|
|
||||||
!CMSampleBufferDataIsReady(sampleBuffer)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
CVPixelBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
|
|
||||||
if (pixelBuffer == nil) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
// Default to portrait orientation on iPhone.
|
|
||||||
BOOL usingFrontCamera = NO;
|
|
||||||
// Check the image's EXIF for the camera the image came from as the image could have been
|
|
||||||
// delayed as we set alwaysDiscardsLateVideoFrames to NO.
|
|
||||||
AVCaptureDevicePosition cameraPosition =
|
|
||||||
[AVCaptureSession devicePositionForSampleBuffer:sampleBuffer];
|
|
||||||
if (cameraPosition != AVCaptureDevicePositionUnspecified) {
|
|
||||||
usingFrontCamera = AVCaptureDevicePositionFront == cameraPosition;
|
|
||||||
} else {
|
|
||||||
AVCaptureDeviceInput *deviceInput =
|
|
||||||
(AVCaptureDeviceInput *)((AVCaptureInputPort *)connection.inputPorts.firstObject).input;
|
|
||||||
usingFrontCamera = AVCaptureDevicePositionFront == deviceInput.device.position;
|
|
||||||
}
|
|
||||||
switch (_orientation) {
|
|
||||||
case UIDeviceOrientationPortrait:
|
|
||||||
_rotation = RTCVideoRotation_90;
|
|
||||||
break;
|
|
||||||
case UIDeviceOrientationPortraitUpsideDown:
|
|
||||||
_rotation = RTCVideoRotation_270;
|
|
||||||
break;
|
|
||||||
case UIDeviceOrientationLandscapeLeft:
|
|
||||||
_rotation = usingFrontCamera ? RTCVideoRotation_180 : RTCVideoRotation_0;
|
|
||||||
break;
|
|
||||||
case UIDeviceOrientationLandscapeRight:
|
|
||||||
_rotation = usingFrontCamera ? RTCVideoRotation_0 : RTCVideoRotation_180;
|
|
||||||
break;
|
|
||||||
case UIDeviceOrientationFaceUp:
|
|
||||||
case UIDeviceOrientationFaceDown:
|
|
||||||
case UIDeviceOrientationUnknown:
|
|
||||||
// Ignore.
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
|
|
||||||
RTCCVPixelBuffer *rtcPixelBuffer = [[RTCCVPixelBuffer alloc] initWithPixelBuffer:pixelBuffer];
|
|
||||||
int64_t timeStampNs = CMTimeGetSeconds(CMSampleBufferGetPresentationTimeStamp(sampleBuffer)) *
|
|
||||||
kNanosecondsPerSecond;
|
|
||||||
RTCVideoFrame *videoFrame = [[RTCVideoFrame alloc] initWithBuffer:rtcPixelBuffer
|
|
||||||
rotation:_rotation
|
|
||||||
timeStampNs:timeStampNs];
|
|
||||||
if (!_isPaused) {
|
|
||||||
getObjCVideoSource(_source)->OnCapturedFrame(videoFrame);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)captureOutput:(AVCaptureOutput *)captureOutput
|
|
||||||
didDropSampleBuffer:(CMSampleBufferRef)sampleBuffer
|
|
||||||
fromConnection:(AVCaptureConnection *)connection {
|
|
||||||
NSString *droppedReason =
|
|
||||||
(__bridge NSString *)CMGetAttachment(sampleBuffer, kCMSampleBufferAttachmentKey_DroppedFrameReason, nil);
|
|
||||||
RTCLogError(@"Dropped sample buffer. Reason: %@", droppedReason);
|
|
||||||
}
|
|
||||||
|
|
||||||
#pragma mark - AVCaptureSession notifications
|
|
||||||
|
|
||||||
- (void)handleCaptureSessionInterruption:(NSNotification *)notification {
|
|
||||||
NSString *reasonString = nil;
|
|
||||||
NSNumber *reason = notification.userInfo[AVCaptureSessionInterruptionReasonKey];
|
|
||||||
if (reason) {
|
|
||||||
switch (reason.intValue) {
|
|
||||||
case AVCaptureSessionInterruptionReasonVideoDeviceNotAvailableInBackground:
|
|
||||||
reasonString = @"VideoDeviceNotAvailableInBackground";
|
|
||||||
break;
|
|
||||||
case AVCaptureSessionInterruptionReasonAudioDeviceInUseByAnotherClient:
|
|
||||||
reasonString = @"AudioDeviceInUseByAnotherClient";
|
|
||||||
break;
|
|
||||||
case AVCaptureSessionInterruptionReasonVideoDeviceInUseByAnotherClient:
|
|
||||||
reasonString = @"VideoDeviceInUseByAnotherClient";
|
|
||||||
break;
|
|
||||||
case AVCaptureSessionInterruptionReasonVideoDeviceNotAvailableWithMultipleForegroundApps:
|
|
||||||
reasonString = @"VideoDeviceNotAvailableWithMultipleForegroundApps";
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
RTCLog(@"Capture session interrupted: %@", reasonString);
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)handleCaptureSessionInterruptionEnded:(NSNotification *)notification {
|
|
||||||
RTCLog(@"Capture session interruption ended.");
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)handleCaptureSessionRuntimeError:(NSNotification *)notification {
|
|
||||||
NSError *error = [notification.userInfo objectForKey:AVCaptureSessionErrorKey];
|
|
||||||
RTCLogError(@"Capture session runtime error: %@", error);
|
|
||||||
|
|
||||||
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
|
||||||
block:^{
|
|
||||||
if (error.code == AVErrorMediaServicesWereReset) {
|
|
||||||
[self handleNonFatalError];
|
|
||||||
} else {
|
|
||||||
[self handleFatalError];
|
|
||||||
}
|
|
||||||
}];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)handleCaptureSessionDidStartRunning:(NSNotification *)notification {
|
|
||||||
RTCLog(@"Capture session started.");
|
|
||||||
|
|
||||||
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
|
||||||
block:^{
|
|
||||||
// If we successfully restarted after an unknown error,
|
|
||||||
// allow future retries on fatal errors.
|
|
||||||
_hasRetriedOnFatalError = NO;
|
|
||||||
}];
|
|
||||||
|
|
||||||
_inForegroundValue = true;
|
|
||||||
[self updateIsActiveValue];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)handleCaptureSessionDidStopRunning:(NSNotification *)notification {
|
|
||||||
RTCLog(@"Capture session stopped.");
|
|
||||||
_inForegroundValue = false;
|
|
||||||
[self updateIsActiveValue];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)updateIsActiveValue {
|
|
||||||
bool isActive = _inForegroundValue && !_isPaused;
|
|
||||||
if (isActive != _isActiveValue) {
|
|
||||||
_isActiveValue = isActive;
|
|
||||||
if (_isActiveUpdated) {
|
|
||||||
_isActiveUpdated(_isActiveValue);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)handleFatalError {
|
|
||||||
[RTCDispatcher
|
|
||||||
dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
|
||||||
block:^{
|
|
||||||
if (!_hasRetriedOnFatalError) {
|
|
||||||
RTCLogWarning(@"Attempting to recover from fatal capture error.");
|
|
||||||
[self handleNonFatalError];
|
|
||||||
_hasRetriedOnFatalError = YES;
|
|
||||||
} else {
|
|
||||||
RTCLogError(@"Previous fatal error recovery failed.");
|
|
||||||
}
|
|
||||||
}];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)handleNonFatalError {
|
|
||||||
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
|
||||||
block:^{
|
|
||||||
RTCLog(@"Restarting capture session after error.");
|
|
||||||
if (_isRunning) {
|
|
||||||
[_captureSession startRunning];
|
|
||||||
}
|
|
||||||
}];
|
|
||||||
}
|
|
||||||
|
|
||||||
#pragma mark - UIApplication notifications
|
|
||||||
|
|
||||||
- (void)handleApplicationDidBecomeActive:(NSNotification *)notification {
|
|
||||||
[RTCDispatcher dispatchAsyncOnType:RTCDispatcherTypeCaptureSession
|
|
||||||
block:^{
|
|
||||||
if (_isRunning && !_captureSession.isRunning) {
|
|
||||||
RTCLog(@"Restarting capture session on active.");
|
|
||||||
[_captureSession startRunning];
|
|
||||||
}
|
|
||||||
}];
|
|
||||||
}
|
|
||||||
|
|
||||||
#pragma mark - Private
|
|
||||||
|
|
||||||
- (dispatch_queue_t)frameQueue {
|
|
||||||
if (!_frameQueue) {
|
|
||||||
_frameQueue =
|
|
||||||
dispatch_queue_create("org.webrtc.cameravideocapturer.video", DISPATCH_QUEUE_SERIAL);
|
|
||||||
dispatch_set_target_queue(_frameQueue,
|
|
||||||
dispatch_get_global_queue(DISPATCH_QUEUE_PRIORITY_HIGH, 0));
|
|
||||||
}
|
|
||||||
return _frameQueue;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (BOOL)setupCaptureSession:(AVCaptureSession *)captureSession {
|
|
||||||
NSAssert(_captureSession == nil, @"Setup capture session called twice.");
|
|
||||||
_captureSession = captureSession;
|
|
||||||
_captureSession.sessionPreset = AVCaptureSessionPresetInputPriority;
|
|
||||||
_captureSession.usesApplicationAudioSession = NO;
|
|
||||||
[self setupVideoDataOutput];
|
|
||||||
// Add the output.
|
|
||||||
if (![_captureSession canAddOutput:_videoDataOutput]) {
|
|
||||||
RTCLogError(@"Video data output unsupported.");
|
|
||||||
return NO;
|
|
||||||
}
|
|
||||||
[_captureSession addOutput:_videoDataOutput];
|
|
||||||
|
|
||||||
return YES;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)setupVideoDataOutput {
|
|
||||||
NSAssert(_videoDataOutput == nil, @"Setup video data output called twice.");
|
|
||||||
AVCaptureVideoDataOutput *videoDataOutput = [[AVCaptureVideoDataOutput alloc] init];
|
|
||||||
|
|
||||||
// `videoDataOutput.availableVideoCVPixelFormatTypes` returns the pixel formats supported by the
|
|
||||||
// device with the most efficient output format first. Find the first format that we support.
|
|
||||||
NSSet<NSNumber *> *supportedPixelFormats = [RTCCVPixelBuffer supportedPixelFormats];
|
|
||||||
NSMutableOrderedSet *availablePixelFormats =
|
|
||||||
[NSMutableOrderedSet orderedSetWithArray:videoDataOutput.availableVideoCVPixelFormatTypes];
|
|
||||||
[availablePixelFormats intersectSet:supportedPixelFormats];
|
|
||||||
NSNumber *pixelFormat = availablePixelFormats.firstObject;
|
|
||||||
NSAssert(pixelFormat, @"Output device has no supported formats.");
|
|
||||||
|
|
||||||
_preferredOutputPixelFormat = [pixelFormat unsignedIntValue];
|
|
||||||
_outputPixelFormat = _preferredOutputPixelFormat;
|
|
||||||
videoDataOutput.videoSettings = @{(NSString *)kCVPixelBufferPixelFormatTypeKey : pixelFormat};
|
|
||||||
videoDataOutput.alwaysDiscardsLateVideoFrames = NO;
|
|
||||||
[videoDataOutput setSampleBufferDelegate:self queue:self.frameQueue];
|
|
||||||
_videoDataOutput = videoDataOutput;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)updateVideoDataOutputPixelFormat:(AVCaptureDeviceFormat *)format {
|
|
||||||
FourCharCode mediaSubType = CMFormatDescriptionGetMediaSubType(format.formatDescription);
|
|
||||||
if (![[RTCCVPixelBuffer supportedPixelFormats] containsObject:@(mediaSubType)]) {
|
|
||||||
mediaSubType = _preferredOutputPixelFormat;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (mediaSubType != _outputPixelFormat) {
|
|
||||||
_outputPixelFormat = mediaSubType;
|
|
||||||
_videoDataOutput.videoSettings =
|
|
||||||
@{ (NSString *)kCVPixelBufferPixelFormatTypeKey : @(mediaSubType) };
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#pragma mark - Private, called inside capture queue
|
|
||||||
|
|
||||||
- (void)updateDeviceCaptureFormat:(AVCaptureDeviceFormat *)format fps:(NSInteger)fps {
|
|
||||||
NSAssert([RTCDispatcher isOnQueueForType:RTCDispatcherTypeCaptureSession],
|
|
||||||
@"updateDeviceCaptureFormat must be called on the capture queue.");
|
|
||||||
@try {
|
|
||||||
_currentDevice.activeFormat = format;
|
|
||||||
_currentDevice.activeVideoMinFrameDuration = CMTimeMake(1, (int32_t)fps);
|
|
||||||
} @catch (NSException *exception) {
|
|
||||||
RTCLogError(@"Failed to set active format!\n User info:%@", exception.userInfo);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)reconfigureCaptureSessionInput {
|
|
||||||
NSAssert([RTCDispatcher isOnQueueForType:RTCDispatcherTypeCaptureSession],
|
|
||||||
@"reconfigureCaptureSessionInput must be called on the capture queue.");
|
|
||||||
NSError *error = nil;
|
|
||||||
AVCaptureDeviceInput *input =
|
|
||||||
[AVCaptureDeviceInput deviceInputWithDevice:_currentDevice error:&error];
|
|
||||||
if (!input) {
|
|
||||||
RTCLogError(@"Failed to create front camera input: %@", error.localizedDescription);
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
[_captureSession beginConfiguration];
|
|
||||||
for (AVCaptureDeviceInput *oldInput in [_captureSession.inputs copy]) {
|
|
||||||
[_captureSession removeInput:oldInput];
|
|
||||||
}
|
|
||||||
if ([_captureSession canAddInput:input]) {
|
|
||||||
[_captureSession addInput:input];
|
|
||||||
} else {
|
|
||||||
RTCLogError(@"Cannot add camera as an input to the session.");
|
|
||||||
}
|
|
||||||
[_captureSession commitConfiguration];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)updateOrientation {
|
|
||||||
NSAssert([RTCDispatcher isOnQueueForType:RTCDispatcherTypeCaptureSession],
|
|
||||||
@"updateOrientation must be called on the capture queue.");
|
|
||||||
_orientation = [UIDevice currentDevice].orientation;
|
|
||||||
}
|
|
||||||
|
|
||||||
@end
|
|
@ -1,53 +0,0 @@
|
|||||||
#ifndef VIDEO_CAPTURE_INTERFACE_IMPL_H
|
|
||||||
#define VIDEO_CAPTURE_INTERFACE_IMPL_H
|
|
||||||
|
|
||||||
#include "TgVoip.h"
|
|
||||||
#include <memory>
|
|
||||||
#include "ThreadLocalObject.h"
|
|
||||||
#include "api/media_stream_interface.h"
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
namespace TGVOIP_NAMESPACE {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
class VideoCapturerInterface;
|
|
||||||
|
|
||||||
class TgVoipVideoCaptureInterfaceObject {
|
|
||||||
public:
|
|
||||||
TgVoipVideoCaptureInterfaceObject();
|
|
||||||
~TgVoipVideoCaptureInterfaceObject();
|
|
||||||
|
|
||||||
void switchCamera();
|
|
||||||
void setIsVideoEnabled(bool isVideoEnabled);
|
|
||||||
void setVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
|
||||||
void setIsActiveUpdated(std::function<void (bool)> isActiveUpdated);
|
|
||||||
|
|
||||||
public:
|
|
||||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> _videoSource;
|
|
||||||
std::unique_ptr<VideoCapturerInterface> _videoCapturer;
|
|
||||||
|
|
||||||
private:
|
|
||||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> _currentSink;
|
|
||||||
std::function<void (bool)> _isActiveUpdated;
|
|
||||||
bool _useFrontCamera;
|
|
||||||
bool _isVideoEnabled;
|
|
||||||
};
|
|
||||||
|
|
||||||
class TgVoipVideoCaptureInterfaceImpl : public TgVoipVideoCaptureInterface {
|
|
||||||
public:
|
|
||||||
TgVoipVideoCaptureInterfaceImpl();
|
|
||||||
virtual ~TgVoipVideoCaptureInterfaceImpl();
|
|
||||||
|
|
||||||
virtual void switchCamera();
|
|
||||||
virtual void setIsVideoEnabled(bool isVideoEnabled);
|
|
||||||
virtual void setVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
|
||||||
|
|
||||||
public:
|
|
||||||
std::unique_ptr<ThreadLocalObject<TgVoipVideoCaptureInterfaceObject>> _impl;
|
|
||||||
};
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
}
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#endif
|
|
@ -1,90 +0,0 @@
|
|||||||
#include "VideoCaptureInterfaceImpl.h"
|
|
||||||
|
|
||||||
#include "CodecsApple.h"
|
|
||||||
#include "Manager.h"
|
|
||||||
#include "MediaManager.h"
|
|
||||||
|
|
||||||
#ifdef TGVOIP_NAMESPACE
|
|
||||||
namespace TGVOIP_NAMESPACE {
|
|
||||||
#endif
|
|
||||||
|
|
||||||
TgVoipVideoCaptureInterfaceObject::TgVoipVideoCaptureInterfaceObject() {
|
|
||||||
_useFrontCamera = true;
|
|
||||||
_isVideoEnabled = true;
|
|
||||||
_videoSource = makeVideoSource(Manager::getMediaThread(), MediaManager::getWorkerThread());
|
|
||||||
//this should outlive the capturer
|
|
||||||
_videoCapturer = makeVideoCapturer(_videoSource, _useFrontCamera, [this](bool isActive) {
|
|
||||||
if (this->_isActiveUpdated) {
|
|
||||||
this->_isActiveUpdated(isActive);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
TgVoipVideoCaptureInterfaceObject::~TgVoipVideoCaptureInterfaceObject() {
|
|
||||||
if (_currentSink != nullptr) {
|
|
||||||
_videoSource->RemoveSink(_currentSink.get());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void TgVoipVideoCaptureInterfaceObject::switchCamera() {
|
|
||||||
_useFrontCamera = !_useFrontCamera;
|
|
||||||
_videoCapturer = makeVideoCapturer(_videoSource, _useFrontCamera, [this](bool isActive) {
|
|
||||||
if (this->_isActiveUpdated) {
|
|
||||||
this->_isActiveUpdated(isActive);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void TgVoipVideoCaptureInterfaceObject::setIsVideoEnabled(bool isVideoEnabled) {
|
|
||||||
if (_isVideoEnabled != isVideoEnabled) {
|
|
||||||
_isVideoEnabled = isVideoEnabled;
|
|
||||||
_videoCapturer->setIsEnabled(isVideoEnabled);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void TgVoipVideoCaptureInterfaceObject::setVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
|
||||||
if (_currentSink != nullptr) {
|
|
||||||
_videoSource->RemoveSink(_currentSink.get());
|
|
||||||
}
|
|
||||||
_currentSink = sink;
|
|
||||||
if (_currentSink != nullptr) {
|
|
||||||
_videoSource->AddOrUpdateSink(_currentSink.get(), rtc::VideoSinkWants());
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
void TgVoipVideoCaptureInterfaceObject::setIsActiveUpdated(std::function<void (bool)> isActiveUpdated) {
|
|
||||||
_isActiveUpdated = isActiveUpdated;
|
|
||||||
}
|
|
||||||
|
|
||||||
TgVoipVideoCaptureInterfaceImpl::TgVoipVideoCaptureInterfaceImpl() {
|
|
||||||
_impl.reset(new ThreadLocalObject<TgVoipVideoCaptureInterfaceObject>(
|
|
||||||
Manager::getMediaThread(),
|
|
||||||
[]() {
|
|
||||||
return new TgVoipVideoCaptureInterfaceObject();
|
|
||||||
}
|
|
||||||
));
|
|
||||||
}
|
|
||||||
|
|
||||||
TgVoipVideoCaptureInterfaceImpl::~TgVoipVideoCaptureInterfaceImpl() {
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
void TgVoipVideoCaptureInterfaceImpl::switchCamera() {
|
|
||||||
_impl->perform([](TgVoipVideoCaptureInterfaceObject *impl) {
|
|
||||||
impl->switchCamera();
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void TgVoipVideoCaptureInterfaceImpl::setIsVideoEnabled(bool isVideoEnabled) {
|
|
||||||
_impl->perform([isVideoEnabled](TgVoipVideoCaptureInterfaceObject *impl) {
|
|
||||||
impl->setIsVideoEnabled(isVideoEnabled);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
void TgVoipVideoCaptureInterfaceImpl::setVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
|
||||||
_impl->perform([sink](TgVoipVideoCaptureInterfaceObject *impl) {
|
|
||||||
impl->setVideoOutput(sink);
|
|
||||||
});
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
@ -1,27 +0,0 @@
|
|||||||
#ifndef VIDEOMETALVIEW_H
|
|
||||||
#define VIDEOMETALVIEW_H
|
|
||||||
|
|
||||||
#import <Foundation/Foundation.h>
|
|
||||||
#import <UIKit/UIKit.h>
|
|
||||||
|
|
||||||
#import "api/media_stream_interface.h"
|
|
||||||
#import <TgVoip/OngoingCallThreadLocalContext.h>
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
|
|
||||||
@class RTCVideoFrame;
|
|
||||||
|
|
||||||
@interface VideoMetalView : OngoingCallThreadLocalContextWebrtcVideoView
|
|
||||||
|
|
||||||
@property(nonatomic) UIViewContentMode videoContentMode;
|
|
||||||
@property(nonatomic, getter=isEnabled) BOOL enabled;
|
|
||||||
@property(nonatomic, nullable) NSValue* rotationOverride;
|
|
||||||
|
|
||||||
- (void)setSize:(CGSize)size;
|
|
||||||
- (void)renderFrame:(nullable RTCVideoFrame *)frame;
|
|
||||||
|
|
||||||
- (std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)getSink;
|
|
||||||
|
|
||||||
@end
|
|
||||||
|
|
||||||
#endif
|
|
@ -1,298 +0,0 @@
|
|||||||
#import "VideoMetalView.h"
|
|
||||||
|
|
||||||
#import <Metal/Metal.h>
|
|
||||||
#import <MetalKit/MetalKit.h>
|
|
||||||
|
|
||||||
#import "base/RTCLogging.h"
|
|
||||||
#import "base/RTCVideoFrame.h"
|
|
||||||
#import "base/RTCVideoFrameBuffer.h"
|
|
||||||
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
|
|
||||||
#include "sdk/objc/native/api/video_frame.h"
|
|
||||||
|
|
||||||
#import "api/video/video_sink_interface.h"
|
|
||||||
#import "api/media_stream_interface.h"
|
|
||||||
|
|
||||||
#import "RTCMTLI420Renderer.h"
|
|
||||||
#import "RTCMTLNV12Renderer.h"
|
|
||||||
#import "RTCMTLRGBRenderer.h"
|
|
||||||
|
|
||||||
#define MTKViewClass NSClassFromString(@"MTKView")
|
|
||||||
#define RTCMTLNV12RendererClass NSClassFromString(@"RTCMTLNV12Renderer")
|
|
||||||
#define RTCMTLI420RendererClass NSClassFromString(@"RTCMTLI420Renderer")
|
|
||||||
#define RTCMTLRGBRendererClass NSClassFromString(@"RTCMTLRGBRenderer")
|
|
||||||
|
|
||||||
class VideoRendererAdapterImpl : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
|
|
||||||
public:
|
|
||||||
VideoRendererAdapterImpl(void (^frameReceived)(CGSize, RTCVideoFrame *)) {
|
|
||||||
_frameReceived = [frameReceived copy];
|
|
||||||
}
|
|
||||||
|
|
||||||
void OnFrame(const webrtc::VideoFrame& nativeVideoFrame) override {
|
|
||||||
RTCVideoFrame* videoFrame = NativeToObjCVideoFrame(nativeVideoFrame);
|
|
||||||
|
|
||||||
CGSize currentSize = (videoFrame.rotation % 180 == 0) ? CGSizeMake(videoFrame.width, videoFrame.height) : CGSizeMake(videoFrame.height, videoFrame.width);
|
|
||||||
|
|
||||||
if (_frameReceived) {
|
|
||||||
_frameReceived(currentSize, videoFrame);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
private:
|
|
||||||
void (^_frameReceived)(CGSize, RTCVideoFrame *);
|
|
||||||
};
|
|
||||||
|
|
||||||
@interface VideoMetalView () <MTKViewDelegate> {
|
|
||||||
RTCMTLI420Renderer *_rendererI420;
|
|
||||||
RTCMTLNV12Renderer *_rendererNV12;
|
|
||||||
RTCMTLRGBRenderer *_rendererRGB;
|
|
||||||
MTKView *_metalView;
|
|
||||||
RTCVideoFrame *_videoFrame;
|
|
||||||
CGSize _videoFrameSize;
|
|
||||||
int64_t _lastFrameTimeNs;
|
|
||||||
|
|
||||||
CGSize _currentSize;
|
|
||||||
std::shared_ptr<VideoRendererAdapterImpl> _sink;
|
|
||||||
|
|
||||||
void (^_onFirstFrameReceived)();
|
|
||||||
bool _firstFrameReceivedReported;
|
|
||||||
}
|
|
||||||
|
|
||||||
@end
|
|
||||||
|
|
||||||
@implementation VideoMetalView
|
|
||||||
|
|
||||||
- (instancetype)initWithFrame:(CGRect)frameRect {
|
|
||||||
self = [super initWithFrame:frameRect];
|
|
||||||
if (self) {
|
|
||||||
[self configure];
|
|
||||||
|
|
||||||
_currentSize = CGSizeZero;
|
|
||||||
|
|
||||||
__weak VideoMetalView *weakSelf = self;
|
|
||||||
_sink.reset(new VideoRendererAdapterImpl(^(CGSize size, RTCVideoFrame *videoFrame) {
|
|
||||||
dispatch_async(dispatch_get_main_queue(), ^{
|
|
||||||
__strong VideoMetalView *strongSelf = weakSelf;
|
|
||||||
if (strongSelf == nil) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (!CGSizeEqualToSize(size, strongSelf->_currentSize)) {
|
|
||||||
strongSelf->_currentSize = size;
|
|
||||||
[strongSelf setSize:size];
|
|
||||||
}
|
|
||||||
|
|
||||||
[strongSelf renderFrame:videoFrame];
|
|
||||||
});
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
return self;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (BOOL)isEnabled {
|
|
||||||
return !_metalView.paused;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)setEnabled:(BOOL)enabled {
|
|
||||||
_metalView.paused = !enabled;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (UIViewContentMode)videoContentMode {
|
|
||||||
return _metalView.contentMode;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)setVideoContentMode:(UIViewContentMode)mode {
|
|
||||||
_metalView.contentMode = mode;
|
|
||||||
}
|
|
||||||
|
|
||||||
#pragma mark - Private
|
|
||||||
|
|
||||||
+ (BOOL)isMetalAvailable {
|
|
||||||
return MTLCreateSystemDefaultDevice() != nil;
|
|
||||||
}
|
|
||||||
|
|
||||||
+ (MTKView *)createMetalView:(CGRect)frame {
|
|
||||||
return [[MTKViewClass alloc] initWithFrame:frame];
|
|
||||||
}
|
|
||||||
|
|
||||||
+ (RTCMTLNV12Renderer *)createNV12Renderer {
|
|
||||||
return [[RTCMTLNV12RendererClass alloc] init];
|
|
||||||
}
|
|
||||||
|
|
||||||
+ (RTCMTLI420Renderer *)createI420Renderer {
|
|
||||||
return [[RTCMTLI420RendererClass alloc] init];
|
|
||||||
}
|
|
||||||
|
|
||||||
+ (RTCMTLRGBRenderer *)createRGBRenderer {
|
|
||||||
return [[RTCMTLRGBRenderer alloc] init];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)configure {
|
|
||||||
NSAssert([VideoMetalView isMetalAvailable], @"Metal not availiable on this device");
|
|
||||||
|
|
||||||
_metalView = [VideoMetalView createMetalView:self.bounds];
|
|
||||||
_metalView.delegate = self;
|
|
||||||
_metalView.contentMode = UIViewContentModeScaleAspectFill;
|
|
||||||
[self addSubview:_metalView];
|
|
||||||
_videoFrameSize = CGSizeZero;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)setMultipleTouchEnabled:(BOOL)multipleTouchEnabled {
|
|
||||||
[super setMultipleTouchEnabled:multipleTouchEnabled];
|
|
||||||
_metalView.multipleTouchEnabled = multipleTouchEnabled;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)layoutSubviews {
|
|
||||||
[super layoutSubviews];
|
|
||||||
|
|
||||||
CGRect bounds = self.bounds;
|
|
||||||
_metalView.frame = bounds;
|
|
||||||
if (!CGSizeEqualToSize(_videoFrameSize, CGSizeZero)) {
|
|
||||||
_metalView.drawableSize = [self drawableSize];
|
|
||||||
} else {
|
|
||||||
_metalView.drawableSize = bounds.size;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#pragma mark - MTKViewDelegate methods
|
|
||||||
|
|
||||||
- (void)drawInMTKView:(nonnull MTKView *)view {
|
|
||||||
NSAssert(view == _metalView, @"Receiving draw callbacks from foreign instance.");
|
|
||||||
RTCVideoFrame *videoFrame = _videoFrame;
|
|
||||||
// Skip rendering if we've already rendered this frame.
|
|
||||||
if (!videoFrame || videoFrame.timeStampNs == _lastFrameTimeNs) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (CGRectIsEmpty(view.bounds)) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
RTCMTLRenderer *renderer;
|
|
||||||
if ([videoFrame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) {
|
|
||||||
RTCCVPixelBuffer *buffer = (RTCCVPixelBuffer*)videoFrame.buffer;
|
|
||||||
const OSType pixelFormat = CVPixelBufferGetPixelFormatType(buffer.pixelBuffer);
|
|
||||||
if (pixelFormat == kCVPixelFormatType_32BGRA || pixelFormat == kCVPixelFormatType_32ARGB) {
|
|
||||||
if (!_rendererRGB) {
|
|
||||||
_rendererRGB = [VideoMetalView createRGBRenderer];
|
|
||||||
if (![_rendererRGB addRenderingDestination:_metalView]) {
|
|
||||||
_rendererRGB = nil;
|
|
||||||
RTCLogError(@"Failed to create RGB renderer");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
renderer = _rendererRGB;
|
|
||||||
} else {
|
|
||||||
if (!_rendererNV12) {
|
|
||||||
_rendererNV12 = [VideoMetalView createNV12Renderer];
|
|
||||||
if (![_rendererNV12 addRenderingDestination:_metalView]) {
|
|
||||||
_rendererNV12 = nil;
|
|
||||||
RTCLogError(@"Failed to create NV12 renderer");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
renderer = _rendererNV12;
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if (!_rendererI420) {
|
|
||||||
_rendererI420 = [VideoMetalView createI420Renderer];
|
|
||||||
if (![_rendererI420 addRenderingDestination:_metalView]) {
|
|
||||||
_rendererI420 = nil;
|
|
||||||
RTCLogError(@"Failed to create I420 renderer");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
renderer = _rendererI420;
|
|
||||||
}
|
|
||||||
|
|
||||||
renderer.rotationOverride = _rotationOverride;
|
|
||||||
|
|
||||||
[renderer drawFrame:videoFrame];
|
|
||||||
_lastFrameTimeNs = videoFrame.timeStampNs;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)mtkView:(MTKView *)view drawableSizeWillChange:(CGSize)size {
|
|
||||||
}
|
|
||||||
|
|
||||||
#pragma mark -
|
|
||||||
|
|
||||||
- (void)setRotationOverride:(NSValue *)rotationOverride {
|
|
||||||
_rotationOverride = rotationOverride;
|
|
||||||
|
|
||||||
_metalView.drawableSize = [self drawableSize];
|
|
||||||
[self setNeedsLayout];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (RTCVideoRotation)frameRotation {
|
|
||||||
if (_rotationOverride) {
|
|
||||||
RTCVideoRotation rotation;
|
|
||||||
if (@available(iOS 11, *)) {
|
|
||||||
[_rotationOverride getValue:&rotation size:sizeof(rotation)];
|
|
||||||
} else {
|
|
||||||
[_rotationOverride getValue:&rotation];
|
|
||||||
}
|
|
||||||
return rotation;
|
|
||||||
}
|
|
||||||
|
|
||||||
return _videoFrame.rotation;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (CGSize)drawableSize {
|
|
||||||
// Flip width/height if the rotations are not the same.
|
|
||||||
CGSize videoFrameSize = _videoFrameSize;
|
|
||||||
RTCVideoRotation frameRotation = [self frameRotation];
|
|
||||||
|
|
||||||
BOOL useLandscape =
|
|
||||||
(frameRotation == RTCVideoRotation_0) || (frameRotation == RTCVideoRotation_180);
|
|
||||||
BOOL sizeIsLandscape = (_videoFrame.rotation == RTCVideoRotation_0) ||
|
|
||||||
(_videoFrame.rotation == RTCVideoRotation_180);
|
|
||||||
|
|
||||||
if (useLandscape == sizeIsLandscape) {
|
|
||||||
return videoFrameSize;
|
|
||||||
} else {
|
|
||||||
return CGSizeMake(videoFrameSize.height, videoFrameSize.width);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
#pragma mark - RTCVideoRenderer
|
|
||||||
|
|
||||||
- (void)setSize:(CGSize)size {
|
|
||||||
assert([NSThread isMainThread]);
|
|
||||||
|
|
||||||
_videoFrameSize = size;
|
|
||||||
CGSize drawableSize = [self drawableSize];
|
|
||||||
|
|
||||||
_metalView.drawableSize = drawableSize;
|
|
||||||
[self setNeedsLayout];
|
|
||||||
//[strongSelf.delegate videoView:self didChangeVideoSize:size];
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)renderFrame:(nullable RTCVideoFrame *)frame {
|
|
||||||
assert([NSThread isMainThread]);
|
|
||||||
|
|
||||||
if (!_firstFrameReceivedReported && _onFirstFrameReceived) {
|
|
||||||
_firstFrameReceivedReported = true;
|
|
||||||
_onFirstFrameReceived();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!self.isEnabled) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
if (frame == nil) {
|
|
||||||
RTCLogInfo(@"Incoming frame is nil. Exiting render callback.");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
_videoFrame = frame;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)getSink {
|
|
||||||
assert([NSThread isMainThread]);
|
|
||||||
|
|
||||||
return _sink;
|
|
||||||
}
|
|
||||||
|
|
||||||
- (void)setOnFirstFrameReceived:(void (^ _Nullable)())onFirstFrameReceived {
|
|
||||||
_onFirstFrameReceived = [onFirstFrameReceived copy];
|
|
||||||
_firstFrameReceivedReported = false;
|
|
||||||
}
|
|
||||||
|
|
||||||
@end
|
|
@ -79,7 +79,7 @@ typedef NS_ENUM(int32_t, OngoingCallDataSavingWebrtc) {
|
|||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
||||||
@interface OngoingCallThreadLocalContextWebrtcVideoView : UIView
|
@protocol OngoingCallThreadLocalContextWebrtcVideoView <NSObject>
|
||||||
|
|
||||||
- (void)setOnFirstFrameReceived:(void (^ _Nullable)())onFirstFrameReceived;
|
- (void)setOnFirstFrameReceived:(void (^ _Nullable)())onFirstFrameReceived;
|
||||||
|
|
||||||
@ -92,7 +92,7 @@ typedef NS_ENUM(int32_t, OngoingCallDataSavingWebrtc) {
|
|||||||
- (void)switchVideoCamera;
|
- (void)switchVideoCamera;
|
||||||
- (void)setIsVideoEnabled:(bool)isVideoEnabled;
|
- (void)setIsVideoEnabled:(bool)isVideoEnabled;
|
||||||
|
|
||||||
- (void)makeOutgoingVideoView:(void (^_Nonnull)(OngoingCallThreadLocalContextWebrtcVideoView * _Nullable))completion;
|
- (void)makeOutgoingVideoView:(void (^_Nonnull)(UIView<OngoingCallThreadLocalContextWebrtcVideoView> * _Nullable))completion;
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
||||||
@ -117,7 +117,7 @@ typedef NS_ENUM(int32_t, OngoingCallDataSavingWebrtc) {
|
|||||||
|
|
||||||
- (void)setIsMuted:(bool)isMuted;
|
- (void)setIsMuted:(bool)isMuted;
|
||||||
- (void)setNetworkType:(OngoingCallNetworkTypeWebrtc)networkType;
|
- (void)setNetworkType:(OngoingCallNetworkTypeWebrtc)networkType;
|
||||||
- (void)makeIncomingVideoView:(void (^_Nonnull)(OngoingCallThreadLocalContextWebrtcVideoView * _Nullable))completion;
|
- (void)makeIncomingVideoView:(void (^_Nonnull)(UIView<OngoingCallThreadLocalContextWebrtcVideoView> * _Nullable))completion;
|
||||||
- (void)requestVideo:(OngoingCallThreadLocalContextVideoCapturer * _Nullable)videoCapturer;
|
- (void)requestVideo:(OngoingCallThreadLocalContextVideoCapturer * _Nullable)videoCapturer;
|
||||||
- (void)acceptVideo:(OngoingCallThreadLocalContextVideoCapturer * _Nullable)videoCapturer;
|
- (void)acceptVideo:(OngoingCallThreadLocalContextVideoCapturer * _Nullable)videoCapturer;
|
||||||
- (void)addSignalingData:(NSData * _Nonnull)data;
|
- (void)addSignalingData:(NSData * _Nonnull)data;
|
||||||
|
@ -1,9 +1,11 @@
|
|||||||
#import <TgVoip/OngoingCallThreadLocalContext.h>
|
#import <TgVoip/OngoingCallThreadLocalContext.h>
|
||||||
|
|
||||||
#import "TgVoip.h"
|
#import "Instance.h"
|
||||||
#import "VideoMetalView.h"
|
#import "InstanceImpl.h"
|
||||||
|
#import "VideoCaptureInterface.h"
|
||||||
|
|
||||||
using namespace TGVOIP_NAMESPACE;
|
#import "platform/darwin/VideoMetalView.h"
|
||||||
|
#import "platform/darwin/GLVideoView.h"
|
||||||
|
|
||||||
@implementation OngoingCallConnectionDescriptionWebrtc
|
@implementation OngoingCallConnectionDescriptionWebrtc
|
||||||
|
|
||||||
@ -22,17 +24,33 @@ using namespace TGVOIP_NAMESPACE;
|
|||||||
@end
|
@end
|
||||||
|
|
||||||
@interface OngoingCallThreadLocalContextVideoCapturer () {
|
@interface OngoingCallThreadLocalContextVideoCapturer () {
|
||||||
std::shared_ptr<TgVoipVideoCaptureInterface> _interface;
|
std::shared_ptr<tgcalls::VideoCaptureInterface> _interface;
|
||||||
}
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
||||||
|
@interface VideoMetalView (VideoViewImpl) <OngoingCallThreadLocalContextWebrtcVideoView>
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation VideoMetalView (VideoViewImpl)
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@interface GLVideoView (VideoViewImpl) <OngoingCallThreadLocalContextWebrtcVideoView>
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation GLVideoView (VideoViewImpl)
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
@implementation OngoingCallThreadLocalContextVideoCapturer
|
@implementation OngoingCallThreadLocalContextVideoCapturer
|
||||||
|
|
||||||
- (instancetype _Nonnull)init {
|
- (instancetype _Nonnull)init {
|
||||||
self = [super init];
|
self = [super init];
|
||||||
if (self != nil) {
|
if (self != nil) {
|
||||||
_interface = TgVoipVideoCaptureInterface::makeInstance();
|
_interface = tgcalls::CreateVideoCapture();
|
||||||
}
|
}
|
||||||
return self;
|
return self;
|
||||||
}
|
}
|
||||||
@ -45,20 +63,29 @@ using namespace TGVOIP_NAMESPACE;
|
|||||||
_interface->setIsVideoEnabled(isVideoEnabled);
|
_interface->setIsVideoEnabled(isVideoEnabled);
|
||||||
}
|
}
|
||||||
|
|
||||||
- (std::shared_ptr<TgVoipVideoCaptureInterface>)getInterface {
|
- (std::shared_ptr<tgcalls::VideoCaptureInterface>)getInterface {
|
||||||
return _interface;
|
return _interface;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)makeOutgoingVideoView:(void (^_Nonnull)(OngoingCallThreadLocalContextWebrtcVideoView * _Nullable))completion {
|
- (void)makeOutgoingVideoView:(void (^_Nonnull)(UIView<OngoingCallThreadLocalContextWebrtcVideoView> * _Nullable))completion {
|
||||||
std::shared_ptr<TgVoipVideoCaptureInterface> interface = _interface;
|
std::shared_ptr<tgcalls::VideoCaptureInterface> interface = _interface;
|
||||||
dispatch_async(dispatch_get_main_queue(), ^{
|
dispatch_async(dispatch_get_main_queue(), ^{
|
||||||
VideoMetalView *remoteRenderer = [[VideoMetalView alloc] initWithFrame:CGRectZero];
|
if ([VideoMetalView isSupported]) {
|
||||||
remoteRenderer.videoContentMode = UIViewContentModeScaleAspectFill;
|
VideoMetalView *remoteRenderer = [[VideoMetalView alloc] initWithFrame:CGRectZero];
|
||||||
|
remoteRenderer.videoContentMode = UIViewContentModeScaleAspectFill;
|
||||||
|
|
||||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink = [remoteRenderer getSink];
|
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink = [remoteRenderer getSink];
|
||||||
interface->setVideoOutput(sink);
|
interface->setVideoOutput(sink);
|
||||||
|
|
||||||
completion(remoteRenderer);
|
completion(remoteRenderer);
|
||||||
|
} else {
|
||||||
|
GLVideoView *remoteRenderer = [[GLVideoView alloc] initWithFrame:CGRectZero];
|
||||||
|
|
||||||
|
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink = [remoteRenderer getSink];
|
||||||
|
interface->setVideoOutput(sink);
|
||||||
|
|
||||||
|
completion(remoteRenderer);
|
||||||
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -74,7 +101,7 @@ using namespace TGVOIP_NAMESPACE;
|
|||||||
NSTimeInterval _callConnectTimeout;
|
NSTimeInterval _callConnectTimeout;
|
||||||
NSTimeInterval _callPacketTimeout;
|
NSTimeInterval _callPacketTimeout;
|
||||||
|
|
||||||
TgVoip *_tgVoip;
|
std::unique_ptr<tgcalls::Instance> _tgVoip;
|
||||||
|
|
||||||
OngoingCallStateWebrtc _state;
|
OngoingCallStateWebrtc _state;
|
||||||
OngoingCallVideoStateWebrtc _videoState;
|
OngoingCallVideoStateWebrtc _videoState;
|
||||||
@ -88,7 +115,7 @@ using namespace TGVOIP_NAMESPACE;
|
|||||||
void (^_sendSignalingData)(NSData *);
|
void (^_sendSignalingData)(NSData *);
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)controllerStateChanged:(TgVoipState)state videoState:(OngoingCallVideoStateWebrtc)videoState;
|
- (void)controllerStateChanged:(tgcalls::State)state videoState:(OngoingCallVideoStateWebrtc)videoState;
|
||||||
- (void)signalBarsChanged:(int32_t)signalBars;
|
- (void)signalBarsChanged:(int32_t)signalBars;
|
||||||
|
|
||||||
@end
|
@end
|
||||||
@ -124,31 +151,31 @@ using namespace TGVOIP_NAMESPACE;
|
|||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
||||||
static TgVoipNetworkType callControllerNetworkTypeForType(OngoingCallNetworkTypeWebrtc type) {
|
static tgcalls::NetworkType callControllerNetworkTypeForType(OngoingCallNetworkTypeWebrtc type) {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case OngoingCallNetworkTypeWifi:
|
case OngoingCallNetworkTypeWifi:
|
||||||
return TgVoipNetworkType::WiFi;
|
return tgcalls::NetworkType::WiFi;
|
||||||
case OngoingCallNetworkTypeCellularGprs:
|
case OngoingCallNetworkTypeCellularGprs:
|
||||||
return TgVoipNetworkType::Gprs;
|
return tgcalls::NetworkType::Gprs;
|
||||||
case OngoingCallNetworkTypeCellular3g:
|
case OngoingCallNetworkTypeCellular3g:
|
||||||
return TgVoipNetworkType::ThirdGeneration;
|
return tgcalls::NetworkType::ThirdGeneration;
|
||||||
case OngoingCallNetworkTypeCellularLte:
|
case OngoingCallNetworkTypeCellularLte:
|
||||||
return TgVoipNetworkType::Lte;
|
return tgcalls::NetworkType::Lte;
|
||||||
default:
|
default:
|
||||||
return TgVoipNetworkType::ThirdGeneration;
|
return tgcalls::NetworkType::ThirdGeneration;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static TgVoipDataSaving callControllerDataSavingForType(OngoingCallDataSavingWebrtc type) {
|
static tgcalls::DataSaving callControllerDataSavingForType(OngoingCallDataSavingWebrtc type) {
|
||||||
switch (type) {
|
switch (type) {
|
||||||
case OngoingCallDataSavingNever:
|
case OngoingCallDataSavingNever:
|
||||||
return TgVoipDataSaving::Never;
|
return tgcalls::DataSaving::Never;
|
||||||
case OngoingCallDataSavingCellular:
|
case OngoingCallDataSavingCellular:
|
||||||
return TgVoipDataSaving::Mobile;
|
return tgcalls::DataSaving::Mobile;
|
||||||
case OngoingCallDataSavingAlways:
|
case OngoingCallDataSavingAlways:
|
||||||
return TgVoipDataSaving::Always;
|
return tgcalls::DataSaving::Always;
|
||||||
default:
|
default:
|
||||||
return TgVoipDataSaving::Never;
|
return tgcalls::DataSaving::Never;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -158,7 +185,7 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
|||||||
|
|
||||||
+ (void)setupLoggingFunction:(void (*)(NSString *))loggingFunction {
|
+ (void)setupLoggingFunction:(void (*)(NSString *))loggingFunction {
|
||||||
InternalVoipLoggingFunction = loggingFunction;
|
InternalVoipLoggingFunction = loggingFunction;
|
||||||
TgVoip::setLoggingFunction([](std::string const &string) {
|
tgcalls::SetLoggingFunction([](std::string const &string) {
|
||||||
if (InternalVoipLoggingFunction) {
|
if (InternalVoipLoggingFunction) {
|
||||||
InternalVoipLoggingFunction([[NSString alloc] initWithUTF8String:string.c_str()]);
|
InternalVoipLoggingFunction([[NSString alloc] initWithUTF8String:string.c_str()]);
|
||||||
}
|
}
|
||||||
@ -167,7 +194,7 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
|||||||
|
|
||||||
+ (void)applyServerConfig:(NSString *)string {
|
+ (void)applyServerConfig:(NSString *)string {
|
||||||
if (string.length != 0) {
|
if (string.length != 0) {
|
||||||
TgVoip::setGlobalServerConfig(std::string(string.UTF8String));
|
//TgVoip::setGlobalServerConfig(std::string(string.UTF8String));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -204,19 +231,19 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
|||||||
derivedStateValue.resize(derivedState.length);
|
derivedStateValue.resize(derivedState.length);
|
||||||
[derivedState getBytes:derivedStateValue.data() length:derivedState.length];
|
[derivedState getBytes:derivedStateValue.data() length:derivedState.length];
|
||||||
|
|
||||||
std::unique_ptr<TgVoipProxy> proxyValue = nullptr;
|
std::unique_ptr<tgcalls::Proxy> proxyValue = nullptr;
|
||||||
if (proxy != nil) {
|
if (proxy != nil) {
|
||||||
TgVoipProxy *proxyObject = new TgVoipProxy();
|
tgcalls::Proxy *proxyObject = new tgcalls::Proxy();
|
||||||
proxyObject->host = proxy.host.UTF8String;
|
proxyObject->host = proxy.host.UTF8String;
|
||||||
proxyObject->port = (uint16_t)proxy.port;
|
proxyObject->port = (uint16_t)proxy.port;
|
||||||
proxyObject->login = proxy.username.UTF8String ?: "";
|
proxyObject->login = proxy.username.UTF8String ?: "";
|
||||||
proxyObject->password = proxy.password.UTF8String ?: "";
|
proxyObject->password = proxy.password.UTF8String ?: "";
|
||||||
proxyValue = std::unique_ptr<TgVoipProxy>(proxyObject);
|
proxyValue = std::unique_ptr<tgcalls::Proxy>(proxyObject);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<TgVoipRtcServer> parsedRtcServers;
|
std::vector<tgcalls::RtcServer> parsedRtcServers;
|
||||||
for (VoipRtcServerWebrtc *server in rtcServers) {
|
for (VoipRtcServerWebrtc *server in rtcServers) {
|
||||||
parsedRtcServers.push_back((TgVoipRtcServer){
|
parsedRtcServers.push_back((tgcalls::RtcServer){
|
||||||
.host = server.host.UTF8String,
|
.host = server.host.UTF8String,
|
||||||
.port = (uint16_t)server.port,
|
.port = (uint16_t)server.port,
|
||||||
.login = server.username.UTF8String,
|
.login = server.username.UTF8String,
|
||||||
@ -233,25 +260,25 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
|||||||
crypto.aes_ige_decrypt = &TGCallAesIgeDecrypt;
|
crypto.aes_ige_decrypt = &TGCallAesIgeDecrypt;
|
||||||
crypto.aes_ctr_encrypt = &TGCallAesCtrEncrypt;*/
|
crypto.aes_ctr_encrypt = &TGCallAesCtrEncrypt;*/
|
||||||
|
|
||||||
std::vector<TgVoipEndpoint> endpoints;
|
std::vector<tgcalls::Endpoint> endpoints;
|
||||||
NSArray<OngoingCallConnectionDescriptionWebrtc *> *connections = [@[primaryConnection] arrayByAddingObjectsFromArray:alternativeConnections];
|
NSArray<OngoingCallConnectionDescriptionWebrtc *> *connections = [@[primaryConnection] arrayByAddingObjectsFromArray:alternativeConnections];
|
||||||
for (OngoingCallConnectionDescriptionWebrtc *connection in connections) {
|
for (OngoingCallConnectionDescriptionWebrtc *connection in connections) {
|
||||||
unsigned char peerTag[16];
|
unsigned char peerTag[16];
|
||||||
[connection.peerTag getBytes:peerTag length:16];
|
[connection.peerTag getBytes:peerTag length:16];
|
||||||
|
|
||||||
TgVoipEndpoint endpoint;
|
tgcalls::Endpoint endpoint;
|
||||||
endpoint.endpointId = connection.connectionId;
|
endpoint.endpointId = connection.connectionId;
|
||||||
endpoint.host = {
|
endpoint.host = {
|
||||||
.ipv4 = std::string(connection.ip.UTF8String),
|
.ipv4 = std::string(connection.ip.UTF8String),
|
||||||
.ipv6 = std::string(connection.ipv6.UTF8String)
|
.ipv6 = std::string(connection.ipv6.UTF8String)
|
||||||
};
|
};
|
||||||
endpoint.port = (uint16_t)connection.port;
|
endpoint.port = (uint16_t)connection.port;
|
||||||
endpoint.type = TgVoipEndpointType::UdpRelay;
|
endpoint.type = tgcalls::EndpointType::UdpRelay;
|
||||||
memcpy(endpoint.peerTag, peerTag, 16);
|
memcpy(endpoint.peerTag, peerTag, 16);
|
||||||
endpoints.push_back(endpoint);
|
endpoints.push_back(endpoint);
|
||||||
}
|
}
|
||||||
|
|
||||||
TgVoipConfig config = {
|
tgcalls::Config config = {
|
||||||
.initializationTimeout = _callConnectTimeout,
|
.initializationTimeout = _callConnectTimeout,
|
||||||
.receiveTimeout = _callPacketTimeout,
|
.receiveTimeout = _callPacketTimeout,
|
||||||
.dataSaving = callControllerDataSavingForType(dataSaving),
|
.dataSaving = callControllerDataSavingForType(dataSaving),
|
||||||
@ -268,37 +295,38 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
|||||||
encryptionKeyValue.resize(key.length);
|
encryptionKeyValue.resize(key.length);
|
||||||
memcpy(encryptionKeyValue.data(), key.bytes, key.length);
|
memcpy(encryptionKeyValue.data(), key.bytes, key.length);
|
||||||
|
|
||||||
TgVoipEncryptionKey encryptionKey = {
|
tgcalls::EncryptionKey encryptionKey(encryptionKeyValue, isOutgoing);
|
||||||
.value = encryptionKeyValue,
|
|
||||||
.isOutgoing = isOutgoing,
|
|
||||||
};
|
|
||||||
|
|
||||||
__weak OngoingCallThreadLocalContextWebrtc *weakSelf = self;
|
__weak OngoingCallThreadLocalContextWebrtc *weakSelf = self;
|
||||||
_tgVoip = TgVoip::makeInstance(
|
static dispatch_once_t onceToken;
|
||||||
config,
|
dispatch_once(&onceToken, ^{
|
||||||
{ derivedStateValue },
|
tgcalls::Register<tgcalls::InstanceImpl>();
|
||||||
endpoints,
|
});
|
||||||
proxyValue,
|
_tgVoip = tgcalls::Meta::Create("2.7.7", (tgcalls::Descriptor){
|
||||||
parsedRtcServers,
|
.config = config,
|
||||||
callControllerNetworkTypeForType(networkType),
|
.persistentState = (tgcalls::PersistentState){ derivedStateValue },
|
||||||
encryptionKey,
|
.endpoints = endpoints,
|
||||||
[_videoCapturer getInterface],
|
.proxy = std::move(proxyValue),
|
||||||
[weakSelf, queue](TgVoipState state, TgVoip::VideoState videoState) {
|
.rtcServers = parsedRtcServers,
|
||||||
|
.initialNetworkType = callControllerNetworkTypeForType(networkType),
|
||||||
|
.encryptionKey = encryptionKey,
|
||||||
|
.videoCapture = [_videoCapturer getInterface],
|
||||||
|
.stateUpdated = [weakSelf, queue](tgcalls::State state, tgcalls::VideoState videoState) {
|
||||||
[queue dispatch:^{
|
[queue dispatch:^{
|
||||||
__strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf;
|
__strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf;
|
||||||
if (strongSelf) {
|
if (strongSelf) {
|
||||||
OngoingCallVideoStateWebrtc mappedVideoState;
|
OngoingCallVideoStateWebrtc mappedVideoState;
|
||||||
switch (videoState) {
|
switch (videoState) {
|
||||||
case TgVoip::VideoState::possible:
|
case tgcalls::VideoState::Possible:
|
||||||
mappedVideoState = OngoingCallVideoStatePossible;
|
mappedVideoState = OngoingCallVideoStatePossible;
|
||||||
break;
|
break;
|
||||||
case TgVoip::VideoState::outgoingRequested:
|
case tgcalls::VideoState::OutgoingRequested:
|
||||||
mappedVideoState = OngoingCallVideoStateOutgoingRequested;
|
mappedVideoState = OngoingCallVideoStateOutgoingRequested;
|
||||||
break;
|
break;
|
||||||
case TgVoip::VideoState::incomingRequested:
|
case tgcalls::VideoState::IncomingRequested:
|
||||||
mappedVideoState = OngoingCallVideoStateIncomingRequested;
|
mappedVideoState = OngoingCallVideoStateIncomingRequested;
|
||||||
break;
|
break;
|
||||||
case TgVoip::VideoState::active:
|
case tgcalls::VideoState::Active:
|
||||||
mappedVideoState = OngoingCallVideoStateActive;
|
mappedVideoState = OngoingCallVideoStateActive;
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -307,7 +335,10 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
|||||||
}
|
}
|
||||||
}];
|
}];
|
||||||
},
|
},
|
||||||
[weakSelf, queue](bool isActive) {
|
.signalBarsUpdated = [](int value) {
|
||||||
|
|
||||||
|
},
|
||||||
|
.remoteVideoIsActiveUpdated = [weakSelf, queue](bool isActive) {
|
||||||
[queue dispatch:^{
|
[queue dispatch:^{
|
||||||
__strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf;
|
__strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf;
|
||||||
if (strongSelf) {
|
if (strongSelf) {
|
||||||
@ -326,8 +357,8 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
|||||||
}
|
}
|
||||||
}];
|
}];
|
||||||
},
|
},
|
||||||
[weakSelf, queue](const std::vector<uint8_t> &data) {
|
.signalingDataEmitted = [weakSelf, queue](const std::vector<uint8_t> &data) {
|
||||||
NSData *mappedData = [[NSData alloc] initWithBytes:data.data() length:data.size()];
|
NSData *mappedData = [[NSData alloc] initWithBytes:data.data() length:data.size()];
|
||||||
[queue dispatch:^{
|
[queue dispatch:^{
|
||||||
__strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf;
|
__strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf;
|
||||||
if (strongSelf) {
|
if (strongSelf) {
|
||||||
@ -335,7 +366,7 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
|||||||
}
|
}
|
||||||
}];
|
}];
|
||||||
}
|
}
|
||||||
);
|
});
|
||||||
|
|
||||||
_state = OngoingCallStateInitializing;
|
_state = OngoingCallStateInitializing;
|
||||||
_signalBars = -1;
|
_signalBars = -1;
|
||||||
@ -356,14 +387,11 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
|||||||
|
|
||||||
- (void)stop:(void (^)(NSString *, int64_t, int64_t, int64_t, int64_t))completion {
|
- (void)stop:(void (^)(NSString *, int64_t, int64_t, int64_t, int64_t))completion {
|
||||||
if (_tgVoip) {
|
if (_tgVoip) {
|
||||||
TgVoipFinalState finalState = _tgVoip->stop();
|
tgcalls::FinalState finalState = _tgVoip->stop();
|
||||||
|
|
||||||
NSString *debugLog = [NSString stringWithUTF8String:finalState.debugLog.c_str()];
|
NSString *debugLog = [NSString stringWithUTF8String:finalState.debugLog.c_str()];
|
||||||
_lastDerivedState = [[NSData alloc] initWithBytes:finalState.persistentState.value.data() length:finalState.persistentState.value.size()];
|
_lastDerivedState = [[NSData alloc] initWithBytes:finalState.persistentState.value.data() length:finalState.persistentState.value.size()];
|
||||||
|
|
||||||
delete _tgVoip;
|
|
||||||
_tgVoip = NULL;
|
|
||||||
|
|
||||||
if (completion) {
|
if (completion) {
|
||||||
completion(debugLog, finalState.trafficStats.bytesSentWifi, finalState.trafficStats.bytesReceivedWifi, finalState.trafficStats.bytesSentMobile, finalState.trafficStats.bytesReceivedMobile);
|
completion(debugLog, finalState.trafficStats.bytesSentWifi, finalState.trafficStats.bytesReceivedWifi, finalState.trafficStats.bytesSentMobile, finalState.trafficStats.bytesReceivedMobile);
|
||||||
}
|
}
|
||||||
@ -371,7 +399,7 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
|||||||
}
|
}
|
||||||
|
|
||||||
- (NSString *)debugInfo {
|
- (NSString *)debugInfo {
|
||||||
if (_tgVoip != nil) {
|
if (_tgVoip != nullptr) {
|
||||||
NSString *version = [self version];
|
NSString *version = [self version];
|
||||||
return [NSString stringWithFormat:@"WebRTC, Version: %@", version];
|
return [NSString stringWithFormat:@"WebRTC, Version: %@", version];
|
||||||
//auto rawDebugString = _tgVoip->getDebugInfo();
|
//auto rawDebugString = _tgVoip->getDebugInfo();
|
||||||
@ -382,11 +410,7 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
|||||||
}
|
}
|
||||||
|
|
||||||
- (NSString *)version {
|
- (NSString *)version {
|
||||||
if (_tgVoip != nil) {
|
return @"2.7.7";
|
||||||
return @"2.7.7";//[NSString stringWithUTF8String:_tgVoip->getVersion().c_str()];
|
|
||||||
} else {
|
|
||||||
return nil;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
- (NSData * _Nonnull)getDerivedState {
|
- (NSData * _Nonnull)getDerivedState {
|
||||||
@ -400,16 +424,16 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)controllerStateChanged:(TgVoipState)state videoState:(OngoingCallVideoStateWebrtc)videoState {
|
- (void)controllerStateChanged:(tgcalls::State)state videoState:(OngoingCallVideoStateWebrtc)videoState {
|
||||||
OngoingCallStateWebrtc callState = OngoingCallStateInitializing;
|
OngoingCallStateWebrtc callState = OngoingCallStateInitializing;
|
||||||
switch (state) {
|
switch (state) {
|
||||||
case TgVoipState::Estabilished:
|
case tgcalls::State::Established:
|
||||||
callState = OngoingCallStateConnected;
|
callState = OngoingCallStateConnected;
|
||||||
break;
|
break;
|
||||||
case TgVoipState::Failed:
|
case tgcalls::State::Failed:
|
||||||
callState = OngoingCallStateFailed;
|
callState = OngoingCallStateFailed;
|
||||||
break;
|
break;
|
||||||
case TgVoipState::Reconnecting:
|
case tgcalls::State::Reconnecting:
|
||||||
callState = OngoingCallStateReconnecting;
|
callState = OngoingCallStateReconnecting;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
@ -466,20 +490,32 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)makeIncomingVideoView:(void (^_Nonnull)(OngoingCallThreadLocalContextWebrtcVideoView * _Nullable))completion {
|
- (void)makeIncomingVideoView:(void (^_Nonnull)(UIView<OngoingCallThreadLocalContextWebrtcVideoView> * _Nullable))completion {
|
||||||
if (_tgVoip) {
|
if (_tgVoip) {
|
||||||
__weak OngoingCallThreadLocalContextWebrtc *weakSelf = self;
|
__weak OngoingCallThreadLocalContextWebrtc *weakSelf = self;
|
||||||
dispatch_async(dispatch_get_main_queue(), ^{
|
dispatch_async(dispatch_get_main_queue(), ^{
|
||||||
VideoMetalView *remoteRenderer = [[VideoMetalView alloc] initWithFrame:CGRectZero];
|
if ([VideoMetalView isSupported]) {
|
||||||
remoteRenderer.videoContentMode = UIViewContentModeScaleAspectFill;
|
VideoMetalView *remoteRenderer = [[VideoMetalView alloc] initWithFrame:CGRectZero];
|
||||||
|
remoteRenderer.videoContentMode = UIViewContentModeScaleAspectFill;
|
||||||
|
|
||||||
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink = [remoteRenderer getSink];
|
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink = [remoteRenderer getSink];
|
||||||
__strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf;
|
__strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf;
|
||||||
if (strongSelf) {
|
if (strongSelf) {
|
||||||
strongSelf->_tgVoip->setIncomingVideoOutput(sink);
|
strongSelf->_tgVoip->setIncomingVideoOutput(sink);
|
||||||
|
}
|
||||||
|
|
||||||
|
completion(remoteRenderer);
|
||||||
|
} else {
|
||||||
|
GLVideoView *remoteRenderer = [[GLVideoView alloc] initWithFrame:CGRectZero];
|
||||||
|
|
||||||
|
std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink = [remoteRenderer getSink];
|
||||||
|
__strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf;
|
||||||
|
if (strongSelf) {
|
||||||
|
strongSelf->_tgVoip->setIncomingVideoOutput(sink);
|
||||||
|
}
|
||||||
|
|
||||||
|
completion(remoteRenderer);
|
||||||
}
|
}
|
||||||
|
|
||||||
completion(remoteRenderer);
|
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -499,10 +535,3 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
|||||||
}
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
||||||
@implementation OngoingCallThreadLocalContextWebrtcVideoView : UIView
|
|
||||||
|
|
||||||
- (void)setOnFirstFrameReceived:(void (^ _Nullable)())onFirstFrameReceived {
|
|
||||||
}
|
|
||||||
|
|
||||||
@end
|
|
||||||
|
1
submodules/TgVoipWebrtc/tgcalls
Submodule
1
submodules/TgVoipWebrtc/tgcalls
Submodule
@ -0,0 +1 @@
|
|||||||
|
Subproject commit 659712186b39c3f077e3ad091d1de036154064a7
|
Loading…
x
Reference in New Issue
Block a user