mirror of
https://github.com/Swiftgram/Telegram-iOS.git
synced 2025-07-31 15:37:01 +00:00
Merge branch 'master' of gitlab.com:peter-iakovlev/telegram-ios
This commit is contained in:
commit
a38fb8b93f
@ -106,6 +106,7 @@ public protocol PresentationCall: class {
|
|||||||
func toggleIsMuted()
|
func toggleIsMuted()
|
||||||
func setIsMuted(_ value: Bool)
|
func setIsMuted(_ value: Bool)
|
||||||
func requestVideo()
|
func requestVideo()
|
||||||
|
func acceptVideo()
|
||||||
func setOutgoingVideoIsPaused(_ isPaused: Bool)
|
func setOutgoingVideoIsPaused(_ isPaused: Bool)
|
||||||
func switchVideoCamera()
|
func switchVideoCamera()
|
||||||
func setCurrentAudioOutput(_ output: AudioSessionOutput)
|
func setCurrentAudioOutput(_ output: AudioSessionOutput)
|
||||||
|
@ -245,49 +245,89 @@ final class CallControllerButtonsNode: ASDisplayNode {
|
|||||||
|
|
||||||
height = smallButtonSize + topBottomSpacing + largeButtonSize + max(bottomInset + 32.0, 46.0)
|
height = smallButtonSize + topBottomSpacing + largeButtonSize + max(bottomInset + 32.0, 46.0)
|
||||||
case .active:
|
case .active:
|
||||||
var topButtons: [ButtonDescription] = []
|
|
||||||
|
|
||||||
let soundOutput: ButtonDescription.SoundOutput
|
|
||||||
switch speakerMode {
|
|
||||||
case .none, .builtin:
|
|
||||||
soundOutput = .builtin
|
|
||||||
case .speaker:
|
|
||||||
soundOutput = .speaker
|
|
||||||
case .headphones:
|
|
||||||
soundOutput = .builtin
|
|
||||||
case .bluetooth:
|
|
||||||
soundOutput = .bluetooth
|
|
||||||
}
|
|
||||||
|
|
||||||
switch videoState {
|
switch videoState {
|
||||||
case .active, .incomingRequested, .outgoingRequested, .possible:
|
case .active, .incomingRequested, .outgoingRequested:
|
||||||
let isCameraActive: Bool
|
let isCameraActive: Bool
|
||||||
if case .possible = videoState {
|
if case .possible = videoState {
|
||||||
isCameraActive = false
|
isCameraActive = false
|
||||||
} else {
|
} else {
|
||||||
isCameraActive = !self.isCameraPaused
|
isCameraActive = !self.isCameraPaused
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var topButtons: [ButtonDescription] = []
|
||||||
|
|
||||||
|
let soundOutput: ButtonDescription.SoundOutput
|
||||||
|
switch speakerMode {
|
||||||
|
case .none, .builtin:
|
||||||
|
soundOutput = .builtin
|
||||||
|
case .speaker:
|
||||||
|
soundOutput = .speaker
|
||||||
|
case .headphones:
|
||||||
|
soundOutput = .builtin
|
||||||
|
case .bluetooth:
|
||||||
|
soundOutput = .bluetooth
|
||||||
|
}
|
||||||
|
|
||||||
topButtons.append(.enableCamera(isCameraActive))
|
topButtons.append(.enableCamera(isCameraActive))
|
||||||
topButtons.append(.mute(isMuted))
|
topButtons.append(.mute(isMuted))
|
||||||
topButtons.append(.switchCamera)
|
topButtons.append(.switchCamera)
|
||||||
case .notAvailable:
|
topButtons.append(.end(.end))
|
||||||
topButtons.append(.mute(isMuted))
|
|
||||||
topButtons.append(.soundOutput(soundOutput))
|
let topButtonsContentWidth = CGFloat(topButtons.count) * smallButtonSize
|
||||||
|
let topButtonsAvailableSpacingWidth = width - topButtonsContentWidth - minSmallButtonSideInset * 2.0
|
||||||
|
let topButtonsSpacing = min(maxSmallButtonSpacing, topButtonsAvailableSpacingWidth / CGFloat(topButtons.count - 1))
|
||||||
|
let topButtonsWidth = CGFloat(topButtons.count) * smallButtonSize + CGFloat(topButtons.count - 1) * topButtonsSpacing
|
||||||
|
var topButtonsLeftOffset = floor((width - topButtonsWidth) / 2.0)
|
||||||
|
for button in topButtons {
|
||||||
|
buttons.append(PlacedButton(button: button, frame: CGRect(origin: CGPoint(x: topButtonsLeftOffset, y: 0.0), size: CGSize(width: smallButtonSize, height: smallButtonSize))))
|
||||||
|
topButtonsLeftOffset += smallButtonSize + topButtonsSpacing
|
||||||
|
}
|
||||||
|
|
||||||
|
height = smallButtonSize + max(bottomInset + 19.0, 46.0)
|
||||||
|
case .notAvailable, .possible:
|
||||||
|
var topButtons: [ButtonDescription] = []
|
||||||
|
var bottomButtons: [ButtonDescription] = []
|
||||||
|
|
||||||
|
let soundOutput: ButtonDescription.SoundOutput
|
||||||
|
switch speakerMode {
|
||||||
|
case .none, .builtin:
|
||||||
|
soundOutput = .builtin
|
||||||
|
case .speaker:
|
||||||
|
soundOutput = .speaker
|
||||||
|
case .headphones:
|
||||||
|
soundOutput = .bluetooth
|
||||||
|
case .bluetooth:
|
||||||
|
soundOutput = .bluetooth
|
||||||
|
}
|
||||||
|
|
||||||
|
topButtons.append(.enableCamera(false))
|
||||||
|
topButtons.append(.mute(self.isMuted))
|
||||||
|
topButtons.append(.switchCamera)
|
||||||
|
|
||||||
|
let topButtonsContentWidth = CGFloat(topButtons.count) * smallButtonSize
|
||||||
|
let topButtonsAvailableSpacingWidth = width - topButtonsContentWidth - minSmallButtonSideInset * 2.0
|
||||||
|
let topButtonsSpacing = min(maxSmallButtonSpacing, topButtonsAvailableSpacingWidth / CGFloat(topButtons.count - 1))
|
||||||
|
let topButtonsWidth = CGFloat(topButtons.count) * smallButtonSize + CGFloat(topButtons.count - 1) * topButtonsSpacing
|
||||||
|
var topButtonsLeftOffset = floor((width - topButtonsWidth) / 2.0)
|
||||||
|
for button in topButtons {
|
||||||
|
buttons.append(PlacedButton(button: button, frame: CGRect(origin: CGPoint(x: topButtonsLeftOffset, y: 0.0), size: CGSize(width: smallButtonSize, height: smallButtonSize))))
|
||||||
|
topButtonsLeftOffset += smallButtonSize + topButtonsSpacing
|
||||||
|
}
|
||||||
|
|
||||||
|
bottomButtons.append(.end(.outgoing))
|
||||||
|
|
||||||
|
let bottomButtonsContentWidth = CGFloat(bottomButtons.count) * largeButtonSize
|
||||||
|
let bottomButtonsAvailableSpacingWidth = width - bottomButtonsContentWidth - minLargeButtonSideInset * 2.0
|
||||||
|
let bottomButtonsSpacing = min(maxLargeButtonSpacing, bottomButtonsAvailableSpacingWidth / CGFloat(bottomButtons.count - 1))
|
||||||
|
let bottomButtonsWidth = CGFloat(bottomButtons.count) * largeButtonSize + CGFloat(bottomButtons.count - 1) * bottomButtonsSpacing
|
||||||
|
var bottomButtonsLeftOffset = floor((width - bottomButtonsWidth) / 2.0)
|
||||||
|
for button in bottomButtons {
|
||||||
|
buttons.append(PlacedButton(button: button, frame: CGRect(origin: CGPoint(x: bottomButtonsLeftOffset, y: smallButtonSize + topBottomSpacing), size: CGSize(width: largeButtonSize, height: largeButtonSize))))
|
||||||
|
bottomButtonsLeftOffset += largeButtonSize + bottomButtonsSpacing
|
||||||
|
}
|
||||||
|
|
||||||
|
height = smallButtonSize + topBottomSpacing + largeButtonSize + max(bottomInset + 32.0, 46.0)
|
||||||
}
|
}
|
||||||
|
|
||||||
topButtons.append(.end(.end))
|
|
||||||
|
|
||||||
let topButtonsContentWidth = CGFloat(topButtons.count) * smallButtonSize
|
|
||||||
let topButtonsAvailableSpacingWidth = width - topButtonsContentWidth - minSmallButtonSideInset * 2.0
|
|
||||||
let topButtonsSpacing = min(maxSmallButtonSpacing, topButtonsAvailableSpacingWidth / CGFloat(topButtons.count - 1))
|
|
||||||
let topButtonsWidth = CGFloat(topButtons.count) * smallButtonSize + CGFloat(topButtons.count - 1) * topButtonsSpacing
|
|
||||||
var topButtonsLeftOffset = floor((width - topButtonsWidth) / 2.0)
|
|
||||||
for button in topButtons {
|
|
||||||
buttons.append(PlacedButton(button: button, frame: CGRect(origin: CGPoint(x: topButtonsLeftOffset, y: 0.0), size: CGSize(width: smallButtonSize, height: smallButtonSize))))
|
|
||||||
topButtonsLeftOffset += smallButtonSize + topButtonsSpacing
|
|
||||||
}
|
|
||||||
|
|
||||||
height = smallButtonSize + max(bottomInset + 19.0, 46.0)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let delayIncrement = 0.015
|
let delayIncrement = 0.015
|
||||||
|
@ -156,6 +156,7 @@ final class CallControllerNode: ViewControllerTracingNode, CallControllerNodePro
|
|||||||
private var expandedVideoNode: CallVideoNode?
|
private var expandedVideoNode: CallVideoNode?
|
||||||
private var minimizedVideoNode: CallVideoNode?
|
private var minimizedVideoNode: CallVideoNode?
|
||||||
private var disableAnimationForExpandedVideoOnce: Bool = false
|
private var disableAnimationForExpandedVideoOnce: Bool = false
|
||||||
|
private var animationForExpandedVideoSnapshotView: UIView? = nil
|
||||||
|
|
||||||
private var outgoingVideoNodeCorner: VideoNodeCorner = .bottomRight
|
private var outgoingVideoNodeCorner: VideoNodeCorner = .bottomRight
|
||||||
private let backButtonArrowNode: ASImageNode
|
private let backButtonArrowNode: ASImageNode
|
||||||
@ -297,7 +298,17 @@ final class CallControllerNode: ViewControllerTracingNode, CallControllerNodePro
|
|||||||
}
|
}
|
||||||
|
|
||||||
self.buttonsNode.accept = { [weak self] in
|
self.buttonsNode.accept = { [weak self] in
|
||||||
self?.acceptCall?()
|
guard let strongSelf = self, let callState = strongSelf.callState else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
switch callState.state {
|
||||||
|
case .active, .connecting, .reconnecting:
|
||||||
|
strongSelf.call.acceptVideo()
|
||||||
|
case .ringing:
|
||||||
|
strongSelf.acceptCall?()
|
||||||
|
default:
|
||||||
|
break
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
self.buttonsNode.toggleVideo = { [weak self] in
|
self.buttonsNode.toggleVideo = { [weak self] in
|
||||||
@ -447,21 +458,26 @@ final class CallControllerNode: ViewControllerTracingNode, CallControllerNodePro
|
|||||||
}
|
}
|
||||||
|
|
||||||
if let incomingVideoNode = self.incomingVideoNodeValue {
|
if let incomingVideoNode = self.incomingVideoNodeValue {
|
||||||
let isActive: Bool
|
switch callState.state {
|
||||||
switch callState.remoteVideoState {
|
case .terminating, .terminated:
|
||||||
case .inactive:
|
break
|
||||||
isActive = false
|
default:
|
||||||
case .active:
|
let isActive: Bool
|
||||||
isActive = true
|
switch callState.remoteVideoState {
|
||||||
}
|
case .inactive:
|
||||||
incomingVideoNode.updateIsBlurred(isBlurred: !isActive)
|
isActive = false
|
||||||
if isActive != self.videoPausedNode.alpha.isZero {
|
case .active:
|
||||||
if isActive {
|
isActive = true
|
||||||
self.videoPausedNode.alpha = 0.0
|
}
|
||||||
self.videoPausedNode.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.3)
|
incomingVideoNode.updateIsBlurred(isBlurred: !isActive)
|
||||||
} else {
|
if isActive != self.videoPausedNode.alpha.isZero {
|
||||||
self.videoPausedNode.alpha = 1.0
|
if isActive {
|
||||||
self.videoPausedNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.3)
|
self.videoPausedNode.alpha = 0.0
|
||||||
|
self.videoPausedNode.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.3)
|
||||||
|
} else {
|
||||||
|
self.videoPausedNode.alpha = 1.0
|
||||||
|
self.videoPausedNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.3)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -510,13 +526,6 @@ final class CallControllerNode: ViewControllerTracingNode, CallControllerNodePro
|
|||||||
if case .reconnecting = callState.state {
|
if case .reconnecting = callState.state {
|
||||||
isReconnecting = true
|
isReconnecting = true
|
||||||
}
|
}
|
||||||
statusValue = .timer({ value in
|
|
||||||
if isReconnecting {
|
|
||||||
return strings.Call_StatusConnecting
|
|
||||||
} else {
|
|
||||||
return value
|
|
||||||
}
|
|
||||||
}, timestamp)
|
|
||||||
if self.keyTextData?.0 != keyVisualHash {
|
if self.keyTextData?.0 != keyVisualHash {
|
||||||
let text = stringForEmojiHashOfData(keyVisualHash, 4)!
|
let text = stringForEmojiHashOfData(keyVisualHash, 4)!
|
||||||
self.keyTextData = (keyVisualHash, text)
|
self.keyTextData = (keyVisualHash, text)
|
||||||
@ -531,7 +540,26 @@ final class CallControllerNode: ViewControllerTracingNode, CallControllerNodePro
|
|||||||
self.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: .immediate)
|
self.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: .immediate)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
statusReception = reception
|
switch callState.videoState {
|
||||||
|
case .notAvailable, .active, .possible:
|
||||||
|
statusValue = .timer({ value in
|
||||||
|
if isReconnecting {
|
||||||
|
return strings.Call_StatusConnecting
|
||||||
|
} else {
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
}, timestamp)
|
||||||
|
statusReception = reception
|
||||||
|
case .incomingRequested:
|
||||||
|
var text: String
|
||||||
|
text = self.presentationData.strings.Call_IncomingVideoCall
|
||||||
|
if !self.statusNode.subtitle.isEmpty {
|
||||||
|
text += "\n\(self.statusNode.subtitle)"
|
||||||
|
}
|
||||||
|
statusValue = .text(string: text, displayLogo: true)
|
||||||
|
case .outgoingRequested:
|
||||||
|
statusValue = .text(string: self.presentationData.strings.Call_StatusRequesting, displayLogo: false)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
if self.shouldStayHiddenUntilConnection {
|
if self.shouldStayHiddenUntilConnection {
|
||||||
switch callState.state {
|
switch callState.state {
|
||||||
@ -836,6 +864,16 @@ final class CallControllerNode: ViewControllerTracingNode, CallControllerNodePro
|
|||||||
}
|
}
|
||||||
if let expandedVideoNode = self.expandedVideoNode, expandedVideoNode.isReady {
|
if let expandedVideoNode = self.expandedVideoNode, expandedVideoNode.isReady {
|
||||||
if self.minimizedVideoDraggingPosition == nil {
|
if self.minimizedVideoDraggingPosition == nil {
|
||||||
|
if let animationForExpandedVideoSnapshotView = self.animationForExpandedVideoSnapshotView {
|
||||||
|
self.containerNode.view.addSubview(animationForExpandedVideoSnapshotView)
|
||||||
|
transition.updateAlpha(layer: animationForExpandedVideoSnapshotView.layer, alpha: 0.0, completion: { [weak animationForExpandedVideoSnapshotView] _ in
|
||||||
|
animationForExpandedVideoSnapshotView?.removeFromSuperview()
|
||||||
|
})
|
||||||
|
transition.updateTransformScale(layer: animationForExpandedVideoSnapshotView.layer, scale: previewVideoFrame.width / fullscreenVideoFrame.width)
|
||||||
|
|
||||||
|
transition.updatePosition(layer: animationForExpandedVideoSnapshotView.layer, position: CGPoint(x: previewVideoFrame.minX + previewVideoFrame.center.x / fullscreenVideoFrame.width * previewVideoFrame.width, y: previewVideoFrame.minY + previewVideoFrame.center.y / fullscreenVideoFrame.height * previewVideoFrame.height))
|
||||||
|
self.animationForExpandedVideoSnapshotView = nil
|
||||||
|
}
|
||||||
minimizedVideoTransition.updateFrame(node: minimizedVideoNode, frame: previewVideoFrame)
|
minimizedVideoTransition.updateFrame(node: minimizedVideoNode, frame: previewVideoFrame)
|
||||||
minimizedVideoNode.updateLayout(size: minimizedVideoNode.frame.size, cornerRadius: interpolate(from: 14.0, to: 24.0, value: self.pictureInPictureTransitionFraction), transition: minimizedVideoTransition)
|
minimizedVideoNode.updateLayout(size: minimizedVideoNode.frame.size, cornerRadius: interpolate(from: 14.0, to: 24.0, value: self.pictureInPictureTransitionFraction), transition: minimizedVideoTransition)
|
||||||
}
|
}
|
||||||
@ -843,6 +881,7 @@ final class CallControllerNode: ViewControllerTracingNode, CallControllerNodePro
|
|||||||
minimizedVideoNode.frame = fullscreenVideoFrame
|
minimizedVideoNode.frame = fullscreenVideoFrame
|
||||||
minimizedVideoNode.updateLayout(size: layout.size, cornerRadius: 0.0, transition: minimizedVideoTransition)
|
minimizedVideoNode.updateLayout(size: layout.size, cornerRadius: 0.0, transition: minimizedVideoTransition)
|
||||||
}
|
}
|
||||||
|
self.animationForExpandedVideoSnapshotView = nil
|
||||||
}
|
}
|
||||||
|
|
||||||
let keyTextSize = self.keyButtonNode.frame.size
|
let keyTextSize = self.keyButtonNode.frame.size
|
||||||
@ -902,6 +941,8 @@ final class CallControllerNode: ViewControllerTracingNode, CallControllerNodePro
|
|||||||
if let expandedVideoNode = self.expandedVideoNode, let minimizedVideoNode = self.minimizedVideoNode {
|
if let expandedVideoNode = self.expandedVideoNode, let minimizedVideoNode = self.minimizedVideoNode {
|
||||||
let point = recognizer.location(in: recognizer.view)
|
let point = recognizer.location(in: recognizer.view)
|
||||||
if minimizedVideoNode.frame.contains(point) {
|
if minimizedVideoNode.frame.contains(point) {
|
||||||
|
let copyView = minimizedVideoNode.view.snapshotView(afterScreenUpdates: false)
|
||||||
|
copyView?.frame = minimizedVideoNode.frame
|
||||||
self.expandedVideoNode = minimizedVideoNode
|
self.expandedVideoNode = minimizedVideoNode
|
||||||
self.minimizedVideoNode = expandedVideoNode
|
self.minimizedVideoNode = expandedVideoNode
|
||||||
if let supernode = expandedVideoNode.supernode {
|
if let supernode = expandedVideoNode.supernode {
|
||||||
@ -909,6 +950,7 @@ final class CallControllerNode: ViewControllerTracingNode, CallControllerNodePro
|
|||||||
}
|
}
|
||||||
if let (layout, navigationBarHeight) = self.validLayout {
|
if let (layout, navigationBarHeight) = self.validLayout {
|
||||||
self.disableAnimationForExpandedVideoOnce = true
|
self.disableAnimationForExpandedVideoOnce = true
|
||||||
|
self.animationForExpandedVideoSnapshotView = copyView
|
||||||
self.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: .animated(duration: 0.3, curve: .easeInOut))
|
self.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: .animated(duration: 0.3, curve: .easeInOut))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
@ -189,6 +189,7 @@ public final class PresentationCallImpl: PresentationCall {
|
|||||||
|
|
||||||
private var callWasActive = false
|
private var callWasActive = false
|
||||||
private var shouldPresentCallRating = false
|
private var shouldPresentCallRating = false
|
||||||
|
private var videoWasActive = false
|
||||||
|
|
||||||
private var sessionStateDisposable: Disposable?
|
private var sessionStateDisposable: Disposable?
|
||||||
|
|
||||||
@ -269,7 +270,7 @@ public final class PresentationCallImpl: PresentationCall {
|
|||||||
self.videoCapturer = OngoingCallVideoCapturer()
|
self.videoCapturer = OngoingCallVideoCapturer()
|
||||||
self.statePromise.set(PresentationCallState(state: isOutgoing ? .waiting : .ringing, videoState: .outgoingRequested, remoteVideoState: .inactive))
|
self.statePromise.set(PresentationCallState(state: isOutgoing ? .waiting : .ringing, videoState: .outgoingRequested, remoteVideoState: .inactive))
|
||||||
} else {
|
} else {
|
||||||
self.statePromise.set(PresentationCallState(state: isOutgoing ? .waiting : .ringing, videoState: .notAvailable, remoteVideoState: .inactive))
|
self.statePromise.set(PresentationCallState(state: isOutgoing ? .waiting : .ringing, videoState: self.isVideoPossible ? .possible : .notAvailable, remoteVideoState: .inactive))
|
||||||
}
|
}
|
||||||
|
|
||||||
self.serializedData = serializedData
|
self.serializedData = serializedData
|
||||||
@ -446,6 +447,7 @@ public final class PresentationCallImpl: PresentationCall {
|
|||||||
mappedVideoState = .incomingRequested
|
mappedVideoState = .incomingRequested
|
||||||
case .active:
|
case .active:
|
||||||
mappedVideoState = .active
|
mappedVideoState = .active
|
||||||
|
self.videoWasActive = true
|
||||||
}
|
}
|
||||||
switch callContextState.remoteVideoState {
|
switch callContextState.remoteVideoState {
|
||||||
case .inactive:
|
case .inactive:
|
||||||
@ -461,7 +463,11 @@ public final class PresentationCallImpl: PresentationCall {
|
|||||||
} else {
|
} else {
|
||||||
mappedVideoState = .notAvailable
|
mappedVideoState = .notAvailable
|
||||||
}
|
}
|
||||||
mappedRemoteVideoState = .inactive
|
if videoWasActive {
|
||||||
|
mappedRemoteVideoState = .active
|
||||||
|
} else {
|
||||||
|
mappedRemoteVideoState = .inactive
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
switch sessionState.state {
|
switch sessionState.state {
|
||||||
@ -739,10 +745,22 @@ public final class PresentationCallImpl: PresentationCall {
|
|||||||
if self.videoCapturer == nil {
|
if self.videoCapturer == nil {
|
||||||
let videoCapturer = OngoingCallVideoCapturer()
|
let videoCapturer = OngoingCallVideoCapturer()
|
||||||
self.videoCapturer = videoCapturer
|
self.videoCapturer = videoCapturer
|
||||||
|
}
|
||||||
|
if let videoCapturer = self.videoCapturer {
|
||||||
self.ongoingContext?.requestVideo(videoCapturer)
|
self.ongoingContext?.requestVideo(videoCapturer)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public func acceptVideo() {
|
||||||
|
if self.videoCapturer == nil {
|
||||||
|
let videoCapturer = OngoingCallVideoCapturer()
|
||||||
|
self.videoCapturer = videoCapturer
|
||||||
|
}
|
||||||
|
if let videoCapturer = self.videoCapturer {
|
||||||
|
self.ongoingContext?.acceptVideo(videoCapturer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public func setOutgoingVideoIsPaused(_ isPaused: Bool) {
|
public func setOutgoingVideoIsPaused(_ isPaused: Bool) {
|
||||||
self.videoCapturer?.setIsVideoEnabled(!isPaused)
|
self.videoCapturer?.setIsVideoEnabled(!isPaused)
|
||||||
}
|
}
|
||||||
@ -784,6 +802,11 @@ public final class PresentationCallImpl: PresentationCall {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public func makeOutgoingVideoView(completion: @escaping (PresentationCallVideoView?) -> Void) {
|
public func makeOutgoingVideoView(completion: @escaping (PresentationCallVideoView?) -> Void) {
|
||||||
|
if self.videoCapturer == nil {
|
||||||
|
let videoCapturer = OngoingCallVideoCapturer()
|
||||||
|
self.videoCapturer = videoCapturer
|
||||||
|
}
|
||||||
|
|
||||||
self.videoCapturer?.makeOutgoingVideoView(completion: { view in
|
self.videoCapturer?.makeOutgoingVideoView(completion: { view in
|
||||||
if let view = view {
|
if let view = view {
|
||||||
completion(PresentationCallVideoView(
|
completion(PresentationCallVideoView(
|
||||||
|
@ -246,6 +246,7 @@ private protocol OngoingCallThreadLocalContextProtocol: class {
|
|||||||
func nativeSetNetworkType(_ type: NetworkType)
|
func nativeSetNetworkType(_ type: NetworkType)
|
||||||
func nativeSetIsMuted(_ value: Bool)
|
func nativeSetIsMuted(_ value: Bool)
|
||||||
func nativeRequestVideo(_ capturer: OngoingCallVideoCapturer)
|
func nativeRequestVideo(_ capturer: OngoingCallVideoCapturer)
|
||||||
|
func nativeAcceptVideo(_ capturer: OngoingCallVideoCapturer)
|
||||||
func nativeStop(_ completion: @escaping (String?, Int64, Int64, Int64, Int64) -> Void)
|
func nativeStop(_ completion: @escaping (String?, Int64, Int64, Int64, Int64) -> Void)
|
||||||
func nativeDebugInfo() -> String
|
func nativeDebugInfo() -> String
|
||||||
func nativeVersion() -> String
|
func nativeVersion() -> String
|
||||||
@ -276,6 +277,9 @@ extension OngoingCallThreadLocalContext: OngoingCallThreadLocalContextProtocol {
|
|||||||
func nativeRequestVideo(_ capturer: OngoingCallVideoCapturer) {
|
func nativeRequestVideo(_ capturer: OngoingCallVideoCapturer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func nativeAcceptVideo(_ capturer: OngoingCallVideoCapturer) {
|
||||||
|
}
|
||||||
|
|
||||||
func nativeSwitchVideoCamera() {
|
func nativeSwitchVideoCamera() {
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -329,6 +333,10 @@ extension OngoingCallThreadLocalContextWebrtc: OngoingCallThreadLocalContextProt
|
|||||||
self.requestVideo(capturer.impl)
|
self.requestVideo(capturer.impl)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func nativeAcceptVideo(_ capturer: OngoingCallVideoCapturer) {
|
||||||
|
self.acceptVideo(capturer.impl)
|
||||||
|
}
|
||||||
|
|
||||||
func nativeDebugInfo() -> String {
|
func nativeDebugInfo() -> String {
|
||||||
return self.debugInfo() ?? ""
|
return self.debugInfo() ?? ""
|
||||||
}
|
}
|
||||||
@ -646,6 +654,12 @@ public final class OngoingCallContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public func acceptVideo(_ capturer: OngoingCallVideoCapturer) {
|
||||||
|
self.withContext { context in
|
||||||
|
context.nativeAcceptVideo(capturer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public func debugInfo() -> Signal<(String, String), NoError> {
|
public func debugInfo() -> Signal<(String, String), NoError> {
|
||||||
let poll = Signal<(String, String), NoError> { subscriber in
|
let poll = Signal<(String, String), NoError> { subscriber in
|
||||||
self.withContext { context in
|
self.withContext { context in
|
||||||
|
@ -171,6 +171,14 @@ void Manager::receiveSignalingData(const std::vector<uint8_t> &data) {
|
|||||||
_stateUpdated(_state, _videoState);
|
_stateUpdated(_state, _videoState);
|
||||||
}
|
}
|
||||||
} else if (mode == 2) {
|
} else if (mode == 2) {
|
||||||
|
if (_videoState == VideoState::outgoingRequested) {
|
||||||
|
_videoState = VideoState::active;
|
||||||
|
_stateUpdated(_state, _videoState);
|
||||||
|
|
||||||
|
_mediaManager->perform([videoCapture = _videoCapture](MediaManager *mediaManager) {
|
||||||
|
mediaManager->setSendVideo(videoCapture);
|
||||||
|
});
|
||||||
|
}
|
||||||
} else if (mode == 3) {
|
} else if (mode == 3) {
|
||||||
auto candidatesData = buffer.Slice(1, buffer.size() - 1);
|
auto candidatesData = buffer.Slice(1, buffer.size() - 1);
|
||||||
_networkManager->perform([candidatesData](NetworkManager *networkManager) {
|
_networkManager->perform([candidatesData](NetworkManager *networkManager) {
|
||||||
@ -186,6 +194,7 @@ void Manager::receiveSignalingData(const std::vector<uint8_t> &data) {
|
|||||||
|
|
||||||
void Manager::requestVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture) {
|
void Manager::requestVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture) {
|
||||||
if (videoCapture != nullptr) {
|
if (videoCapture != nullptr) {
|
||||||
|
_videoCapture = videoCapture;
|
||||||
if (_videoState == VideoState::possible) {
|
if (_videoState == VideoState::possible) {
|
||||||
_videoState = VideoState::outgoingRequested;
|
_videoState = VideoState::outgoingRequested;
|
||||||
|
|
||||||
@ -198,16 +207,35 @@ void Manager::requestVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCap
|
|||||||
memcpy(data.data(), buffer.data(), buffer.size());
|
memcpy(data.data(), buffer.data(), buffer.size());
|
||||||
|
|
||||||
_signalingDataEmitted(data);
|
_signalingDataEmitted(data);
|
||||||
|
|
||||||
/*_mediaManager->perform([](MediaManager *mediaManager) {
|
|
||||||
mediaManager->setSendVideo(true);
|
|
||||||
});*/
|
|
||||||
|
|
||||||
_stateUpdated(_state, _videoState);
|
_stateUpdated(_state, _videoState);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void Manager::acceptVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture) {
|
||||||
|
if (videoCapture != nullptr) {
|
||||||
|
_videoCapture = videoCapture;
|
||||||
|
if (_videoState == VideoState::incomingRequested) {
|
||||||
|
_videoState = VideoState::active;
|
||||||
|
|
||||||
|
rtc::CopyOnWriteBuffer buffer;
|
||||||
|
uint8_t mode = 2;
|
||||||
|
buffer.AppendData(&mode, 1);
|
||||||
|
|
||||||
|
std::vector<uint8_t> data;
|
||||||
|
data.resize(buffer.size());
|
||||||
|
memcpy(data.data(), buffer.data(), buffer.size());
|
||||||
|
|
||||||
|
_signalingDataEmitted(data);
|
||||||
|
_stateUpdated(_state, _videoState);
|
||||||
|
|
||||||
|
_mediaManager->perform([videoCapture](MediaManager *mediaManager) {
|
||||||
|
mediaManager->setSendVideo(videoCapture);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void Manager::setMuteOutgoingAudio(bool mute) {
|
void Manager::setMuteOutgoingAudio(bool mute) {
|
||||||
_mediaManager->perform([mute](MediaManager *mediaManager) {
|
_mediaManager->perform([mute](MediaManager *mediaManager) {
|
||||||
mediaManager->setMuteOutgoingAudio(mute);
|
mediaManager->setMuteOutgoingAudio(mute);
|
||||||
|
@ -36,6 +36,7 @@ public:
|
|||||||
void start();
|
void start();
|
||||||
void receiveSignalingData(const std::vector<uint8_t> &data);
|
void receiveSignalingData(const std::vector<uint8_t> &data);
|
||||||
void requestVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture);
|
void requestVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture);
|
||||||
|
void acceptVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture);
|
||||||
void setMuteOutgoingAudio(bool mute);
|
void setMuteOutgoingAudio(bool mute);
|
||||||
void notifyIsLocalVideoActive(bool isActive);
|
void notifyIsLocalVideoActive(bool isActive);
|
||||||
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||||
|
@ -287,9 +287,7 @@ _videoCapture(videoCapture) {
|
|||||||
_videoChannel->SetInterface(_videoNetworkInterface.get(), webrtc::MediaTransportConfig());
|
_videoChannel->SetInterface(_videoNetworkInterface.get(), webrtc::MediaTransportConfig());
|
||||||
|
|
||||||
if (_videoCapture != nullptr) {
|
if (_videoCapture != nullptr) {
|
||||||
((TgVoipVideoCaptureInterfaceImpl *)_videoCapture.get())->_impl->getSyncAssumingSameThread()->setIsActiveUpdated(this->_localVideoCaptureActiveUpdated);
|
setSendVideo(_videoCapture);
|
||||||
|
|
||||||
setSendVideo(true);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -310,7 +308,7 @@ MediaManager::~MediaManager() {
|
|||||||
|
|
||||||
_audioChannel->SetInterface(nullptr, webrtc::MediaTransportConfig());
|
_audioChannel->SetInterface(nullptr, webrtc::MediaTransportConfig());
|
||||||
|
|
||||||
setSendVideo(false);
|
setSendVideo(nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MediaManager::setIsConnected(bool isConnected) {
|
void MediaManager::setIsConnected(bool isConnected) {
|
||||||
@ -360,11 +358,16 @@ void MediaManager::notifyPacketSent(const rtc::SentPacket &sentPacket) {
|
|||||||
_call->OnSentPacket(sentPacket);
|
_call->OnSentPacket(sentPacket);
|
||||||
}
|
}
|
||||||
|
|
||||||
void MediaManager::setSendVideo(bool sendVideo) {
|
void MediaManager::setSendVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture) {
|
||||||
if (_isSendingVideo == sendVideo) {
|
if (_isSendingVideo == (videoCapture != nullptr)) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
_isSendingVideo = sendVideo;
|
_isSendingVideo = videoCapture != nullptr;
|
||||||
|
_videoCapture = videoCapture;
|
||||||
|
|
||||||
|
if (_videoCapture != nullptr) {
|
||||||
|
((TgVoipVideoCaptureInterfaceImpl *)_videoCapture.get())->_impl->getSyncAssumingSameThread()->setIsActiveUpdated(this->_localVideoCaptureActiveUpdated);
|
||||||
|
}
|
||||||
|
|
||||||
if (_isSendingVideo) {
|
if (_isSendingVideo) {
|
||||||
auto videoCodec = selectVideoCodec(_videoCodecs);
|
auto videoCodec = selectVideoCodec(_videoCodecs);
|
||||||
|
@ -70,7 +70,7 @@ public:
|
|||||||
void setIsConnected(bool isConnected);
|
void setIsConnected(bool isConnected);
|
||||||
void receivePacket(const rtc::CopyOnWriteBuffer &packet);
|
void receivePacket(const rtc::CopyOnWriteBuffer &packet);
|
||||||
void notifyPacketSent(const rtc::SentPacket &sentPacket);
|
void notifyPacketSent(const rtc::SentPacket &sentPacket);
|
||||||
void setSendVideo(bool sendVideo);
|
void setSendVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture);
|
||||||
void setMuteOutgoingAudio(bool mute);
|
void setMuteOutgoingAudio(bool mute);
|
||||||
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||||
|
|
||||||
|
@ -189,6 +189,7 @@ public:
|
|||||||
|
|
||||||
virtual void receiveSignalingData(const std::vector<uint8_t> &data) = 0;
|
virtual void receiveSignalingData(const std::vector<uint8_t> &data) = 0;
|
||||||
virtual void requestVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture) = 0;
|
virtual void requestVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture) = 0;
|
||||||
|
virtual void acceptVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture) = 0;
|
||||||
|
|
||||||
virtual TgVoipFinalState stop() = 0;
|
virtual TgVoipFinalState stop() = 0;
|
||||||
};
|
};
|
||||||
|
@ -223,6 +223,12 @@ public:
|
|||||||
manager->requestVideo(videoCapture);
|
manager->requestVideo(videoCapture);
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
virtual void acceptVideo(std::shared_ptr<TgVoipVideoCaptureInterface> videoCapture) override {
|
||||||
|
_manager->perform([videoCapture](Manager *manager) {
|
||||||
|
manager->acceptVideo(videoCapture);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
void setNetworkType(TgVoipNetworkType networkType) override {
|
void setNetworkType(TgVoipNetworkType networkType) override {
|
||||||
/*message::NetworkType mappedType;
|
/*message::NetworkType mappedType;
|
||||||
|
@ -119,6 +119,7 @@ typedef NS_ENUM(int32_t, OngoingCallDataSavingWebrtc) {
|
|||||||
- (void)setNetworkType:(OngoingCallNetworkTypeWebrtc)networkType;
|
- (void)setNetworkType:(OngoingCallNetworkTypeWebrtc)networkType;
|
||||||
- (void)makeIncomingVideoView:(void (^_Nonnull)(OngoingCallThreadLocalContextWebrtcVideoView * _Nullable))completion;
|
- (void)makeIncomingVideoView:(void (^_Nonnull)(OngoingCallThreadLocalContextWebrtcVideoView * _Nullable))completion;
|
||||||
- (void)requestVideo:(OngoingCallThreadLocalContextVideoCapturer * _Nullable)videoCapturer;
|
- (void)requestVideo:(OngoingCallThreadLocalContextVideoCapturer * _Nullable)videoCapturer;
|
||||||
|
- (void)acceptVideo:(OngoingCallThreadLocalContextVideoCapturer * _Nullable)videoCapturer;
|
||||||
- (void)addSignalingData:(NSData * _Nonnull)data;
|
- (void)addSignalingData:(NSData * _Nonnull)data;
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
@ -491,6 +491,13 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
- (void)acceptVideo:(OngoingCallThreadLocalContextVideoCapturer * _Nullable)videoCapturer {
|
||||||
|
if (_tgVoip && _videoCapturer == nil) {
|
||||||
|
_videoCapturer = videoCapturer;
|
||||||
|
_tgVoip->acceptVideo([_videoCapturer getInterface]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
||||||
@implementation OngoingCallThreadLocalContextWebrtcVideoView : UIView
|
@implementation OngoingCallThreadLocalContextWebrtcVideoView : UIView
|
||||||
|
Loading…
x
Reference in New Issue
Block a user