mirror of
https://github.com/Swiftgram/Telegram-iOS.git
synced 2025-06-16 05:55:20 +00:00
Experimental media improvements
This commit is contained in:
parent
8f87fe8f54
commit
9195ffa96b
@ -11,15 +11,31 @@ public enum RequestCallResult {
|
||||
case alreadyInProgress(PeerId)
|
||||
}
|
||||
|
||||
public enum PresentationCallState: Equatable {
|
||||
case waiting
|
||||
case ringing
|
||||
case requesting(Bool)
|
||||
case connecting(Data?)
|
||||
case active(Double, Int32?, Data)
|
||||
case reconnecting(Double, Int32?, Data)
|
||||
case terminating
|
||||
case terminated(CallId?, CallSessionTerminationReason?, Bool)
|
||||
public struct PresentationCallState: Equatable {
|
||||
public enum State: Equatable {
|
||||
case waiting
|
||||
case ringing
|
||||
case requesting(Bool)
|
||||
case connecting(Data?)
|
||||
case active(Double, Int32?, Data)
|
||||
case reconnecting(Double, Int32?, Data)
|
||||
case terminating
|
||||
case terminated(CallId?, CallSessionTerminationReason?, Bool)
|
||||
}
|
||||
|
||||
public enum VideoState: Equatable {
|
||||
case notAvailable
|
||||
case available(Bool)
|
||||
case active
|
||||
}
|
||||
|
||||
public var state: State
|
||||
public var videoState: VideoState
|
||||
|
||||
public init(state: State, videoState: VideoState) {
|
||||
self.state = state
|
||||
self.videoState = videoState
|
||||
}
|
||||
}
|
||||
|
||||
public protocol PresentationCall: class {
|
||||
@ -44,6 +60,8 @@ public protocol PresentationCall: class {
|
||||
|
||||
func toggleIsMuted()
|
||||
func setIsMuted(_ value: Bool)
|
||||
func setEnableVideo(_ value: Bool)
|
||||
func switchVideoCamera()
|
||||
func setCurrentAudioOutput(_ output: AudioSessionOutput)
|
||||
func debugInfo() -> Signal<(String, String), NoError>
|
||||
|
||||
|
@ -178,6 +178,10 @@ public final class CallController: ViewController {
|
||||
let _ = self?.call.hangUp()
|
||||
}
|
||||
|
||||
self.controllerNode.toggleVideo = { [weak self] in
|
||||
let _ = self?.call.setEnableVideo(true)
|
||||
}
|
||||
|
||||
self.controllerNode.back = { [weak self] in
|
||||
let _ = self?.dismiss()
|
||||
}
|
||||
|
@ -11,6 +11,7 @@ enum CallControllerButtonType {
|
||||
case accept
|
||||
case speaker
|
||||
case bluetooth
|
||||
case video
|
||||
}
|
||||
|
||||
private let buttonSize = CGSize(width: 75.0, height: 75.0)
|
||||
@ -123,6 +124,11 @@ final class CallControllerButtonNode: HighlightTrackingButtonNode {
|
||||
regularImage = generateEmptyButtonImage(icon: UIImage(bundleImageName: "Call/CallBluetoothButton"), strokeColor: emptyStroke, fillColor: .clear)
|
||||
highlightedImage = generateEmptyButtonImage(icon: UIImage(bundleImageName: "Call/CallBluetoothButton"), strokeColor: emptyStroke, fillColor: emptyHighlightedFill)
|
||||
filledImage = generateEmptyButtonImage(icon: UIImage(bundleImageName: "Call/CallBluetoothButton"), strokeColor: nil, fillColor: invertedFill, knockout: true)
|
||||
case .video:
|
||||
let patternImage = generateTintedImage(image: UIImage(bundleImageName: "Chat/Input/Text/IconVideo"), color: .white)
|
||||
regularImage = generateEmptyButtonImage(icon: patternImage, strokeColor: emptyStroke, fillColor: .clear)
|
||||
highlightedImage = generateEmptyButtonImage(icon: patternImage, strokeColor: emptyStroke, fillColor: emptyHighlightedFill)
|
||||
filledImage = generateEmptyButtonImage(icon: patternImage, strokeColor: nil, fillColor: invertedFill, knockout: true)
|
||||
}
|
||||
|
||||
self.regularImage = regularImage
|
||||
@ -209,6 +215,11 @@ final class CallControllerButtonNode: HighlightTrackingButtonNode {
|
||||
regularImage = generateEmptyButtonImage(icon: UIImage(bundleImageName: "Call/CallBluetoothButton"), strokeColor: emptyStroke, fillColor: .clear)
|
||||
highlightedImage = generateEmptyButtonImage(icon: UIImage(bundleImageName: "Call/CallBluetoothButton"), strokeColor: emptyStroke, fillColor: emptyHighlightedFill)
|
||||
filledImage = generateEmptyButtonImage(icon: UIImage(bundleImageName: "Call/CallBluetoothButton"), strokeColor: nil, fillColor: invertedFill, knockout: true)
|
||||
case .video:
|
||||
let patternImage = generateTintedImage(image: UIImage(bundleImageName: "Chat/Input/Text/IconVideo"), color: .white)
|
||||
regularImage = generateEmptyButtonImage(icon: patternImage, strokeColor: emptyStroke, fillColor: .clear)
|
||||
highlightedImage = generateEmptyButtonImage(icon: patternImage, strokeColor: emptyStroke, fillColor: emptyHighlightedFill)
|
||||
filledImage = generateEmptyButtonImage(icon: patternImage, strokeColor: nil, fillColor: invertedFill, knockout: true)
|
||||
}
|
||||
|
||||
self.regularImage = regularImage
|
||||
|
@ -15,7 +15,13 @@ enum CallControllerButtonsSpeakerMode {
|
||||
}
|
||||
|
||||
enum CallControllerButtonsMode: Equatable {
|
||||
case active(CallControllerButtonsSpeakerMode)
|
||||
enum VideoState: Equatable {
|
||||
case notAvailable
|
||||
case available(Bool)
|
||||
case active
|
||||
}
|
||||
|
||||
case active(speakerMode: CallControllerButtonsSpeakerMode, videoState: VideoState)
|
||||
case incoming
|
||||
}
|
||||
|
||||
@ -27,6 +33,8 @@ final class CallControllerButtonsNode: ASDisplayNode {
|
||||
private let endButton: CallControllerButtonNode
|
||||
private let speakerButton: CallControllerButtonNode
|
||||
|
||||
private let videoButton: CallControllerButtonNode
|
||||
|
||||
private var mode: CallControllerButtonsMode?
|
||||
|
||||
private var validLayout: CGFloat?
|
||||
@ -41,6 +49,7 @@ final class CallControllerButtonsNode: ASDisplayNode {
|
||||
var mute: (() -> Void)?
|
||||
var end: (() -> Void)?
|
||||
var speaker: (() -> Void)?
|
||||
var toggleVideo: (() -> Void)?
|
||||
|
||||
init(strings: PresentationStrings) {
|
||||
self.acceptButton = CallControllerButtonNode(type: .accept, label: strings.Call_Accept)
|
||||
@ -55,6 +64,9 @@ final class CallControllerButtonsNode: ASDisplayNode {
|
||||
self.speakerButton = CallControllerButtonNode(type: .speaker, label: nil)
|
||||
self.speakerButton.alpha = 0.0
|
||||
|
||||
self.videoButton = CallControllerButtonNode(type: .video, label: nil)
|
||||
self.videoButton.alpha = 0.0
|
||||
|
||||
super.init()
|
||||
|
||||
self.addSubnode(self.acceptButton)
|
||||
@ -62,12 +74,14 @@ final class CallControllerButtonsNode: ASDisplayNode {
|
||||
self.addSubnode(self.muteButton)
|
||||
self.addSubnode(self.endButton)
|
||||
self.addSubnode(self.speakerButton)
|
||||
self.addSubnode(self.videoButton)
|
||||
|
||||
self.acceptButton.addTarget(self, action: #selector(self.buttonPressed(_:)), forControlEvents: .touchUpInside)
|
||||
self.declineButton.addTarget(self, action: #selector(self.buttonPressed(_:)), forControlEvents: .touchUpInside)
|
||||
self.muteButton.addTarget(self, action: #selector(self.buttonPressed(_:)), forControlEvents: .touchUpInside)
|
||||
self.endButton.addTarget(self, action: #selector(self.buttonPressed(_:)), forControlEvents: .touchUpInside)
|
||||
self.speakerButton.addTarget(self, action: #selector(self.buttonPressed(_:)), forControlEvents: .touchUpInside)
|
||||
self.videoButton.addTarget(self, action: #selector(self.buttonPressed(_:)), forControlEvents: .touchUpInside)
|
||||
}
|
||||
|
||||
func updateLayout(constrainedWidth: CGFloat, transition: ContainedViewLayoutTransition) {
|
||||
@ -107,6 +121,11 @@ final class CallControllerButtonsNode: ASDisplayNode {
|
||||
var origin = CGPoint(x: floor((width - threeButtonsWidth) / 2.0), y: 0.0)
|
||||
for button in [self.muteButton, self.endButton, self.speakerButton] {
|
||||
transition.updateFrame(node: button, frame: CGRect(origin: origin, size: buttonSize))
|
||||
|
||||
if button === self.endButton {
|
||||
transition.updateFrame(node: self.videoButton, frame: CGRect(origin: CGPoint(x: origin.x, y: origin.y - buttonSize.height - 20.0), size: buttonSize))
|
||||
}
|
||||
|
||||
origin.x += buttonSize.width + threeButtonSpacing
|
||||
}
|
||||
|
||||
@ -121,10 +140,10 @@ final class CallControllerButtonsNode: ASDisplayNode {
|
||||
for button in [self.declineButton, self.acceptButton] {
|
||||
button.alpha = 1.0
|
||||
}
|
||||
for button in [self.muteButton, self.endButton, self.speakerButton] {
|
||||
for button in [self.muteButton, self.endButton, self.speakerButton, self.videoButton] {
|
||||
button.alpha = 0.0
|
||||
}
|
||||
case let .active(speakerMode):
|
||||
case let .active(speakerMode, videoState):
|
||||
for button in [self.muteButton, self.speakerButton] {
|
||||
if animated && button.alpha.isZero {
|
||||
button.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.3)
|
||||
@ -152,6 +171,23 @@ final class CallControllerButtonsNode: ASDisplayNode {
|
||||
self.endButton.alpha = 1.0
|
||||
}
|
||||
|
||||
switch videoState {
|
||||
case .notAvailable:
|
||||
self.videoButton.alpha = 0.0
|
||||
case let .available(isEnabled):
|
||||
self.videoButton.isUserInteractionEnabled = isEnabled
|
||||
if animated {
|
||||
self.videoButton.alpha = isEnabled ? 1.0 : 0.5
|
||||
self.videoButton.layer.animateAlpha(from: 0.0, to: self.videoButton.alpha, duration: 0.2)
|
||||
} else {
|
||||
self.videoButton.alpha = isEnabled ? 1.0 : 0.5
|
||||
}
|
||||
case .active:
|
||||
self.videoButton.isUserInteractionEnabled = true
|
||||
self.videoButton.alpha = 0.0
|
||||
}
|
||||
|
||||
|
||||
if !self.declineButton.alpha.isZero {
|
||||
if animated {
|
||||
self.declineButton.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.2)
|
||||
@ -187,6 +223,26 @@ final class CallControllerButtonsNode: ASDisplayNode {
|
||||
self.speaker?()
|
||||
} else if button === self.acceptButton {
|
||||
self.accept?()
|
||||
} else if button === self.videoButton {
|
||||
self.toggleVideo?()
|
||||
}
|
||||
}
|
||||
|
||||
override func hitTest(_ point: CGPoint, with event: UIEvent?) -> UIView? {
|
||||
let buttons = [
|
||||
self.acceptButton,
|
||||
self.declineButton,
|
||||
self.muteButton,
|
||||
self.endButton,
|
||||
self.speakerButton,
|
||||
self.videoButton
|
||||
]
|
||||
for button in buttons {
|
||||
if let result = button.view.hitTest(self.view.convert(point, to: button.view), with: event) {
|
||||
return result
|
||||
}
|
||||
}
|
||||
|
||||
return super.hitTest(point, with: event)
|
||||
}
|
||||
}
|
||||
|
@ -14,6 +14,49 @@ import LocalizedPeerData
|
||||
import PhotoResources
|
||||
import CallsEmoji
|
||||
|
||||
private final class IncomingVideoNode: ASDisplayNode {
|
||||
private let videoView: UIView
|
||||
|
||||
init(videoView: UIView) {
|
||||
self.videoView = videoView
|
||||
|
||||
super.init()
|
||||
|
||||
self.view.addSubview(self.videoView)
|
||||
}
|
||||
|
||||
func updateLayout(size: CGSize) {
|
||||
self.videoView.frame = CGRect(origin: CGPoint(), size: size)
|
||||
}
|
||||
}
|
||||
|
||||
private final class OutgoingVideoNode: ASDisplayNode {
|
||||
private let videoView: UIView
|
||||
private let switchCameraButton: HighlightableButtonNode
|
||||
private let switchCamera: () -> Void
|
||||
|
||||
init(videoView: UIView, switchCamera: @escaping () -> Void) {
|
||||
self.videoView = videoView
|
||||
self.switchCameraButton = HighlightableButtonNode()
|
||||
self.switchCamera = switchCamera
|
||||
|
||||
super.init()
|
||||
|
||||
self.view.addSubview(self.videoView)
|
||||
self.addSubnode(self.switchCameraButton)
|
||||
self.switchCameraButton.addTarget(self, action: #selector(self.buttonPressed), forControlEvents: .touchUpInside)
|
||||
}
|
||||
|
||||
@objc private func buttonPressed() {
|
||||
self.switchCamera()
|
||||
}
|
||||
|
||||
func updateLayout(size: CGSize) {
|
||||
self.videoView.frame = CGRect(origin: CGPoint(), size: size)
|
||||
self.switchCameraButton.frame = CGRect(origin: CGPoint(), size: size)
|
||||
}
|
||||
}
|
||||
|
||||
final class CallControllerNode: ASDisplayNode {
|
||||
private let sharedContext: SharedAccountContext
|
||||
private let account: Account
|
||||
@ -31,8 +74,8 @@ final class CallControllerNode: ASDisplayNode {
|
||||
|
||||
private let imageNode: TransformImageNode
|
||||
private let dimNode: ASDisplayNode
|
||||
private var incomingVideoView: UIView?
|
||||
private var outgoingVideoView: UIView?
|
||||
private var incomingVideoNode: IncomingVideoNode?
|
||||
private var outgoingVideoNode: OutgoingVideoNode?
|
||||
private var videoViewsRequested: Bool = false
|
||||
private let backButtonArrowNode: ASImageNode
|
||||
private let backButtonNode: HighlightableButtonNode
|
||||
@ -63,6 +106,7 @@ final class CallControllerNode: ASDisplayNode {
|
||||
var beginAudioOuputSelection: (() -> Void)?
|
||||
var acceptCall: (() -> Void)?
|
||||
var endCall: (() -> Void)?
|
||||
var toggleVideo: (() -> Void)?
|
||||
var back: (() -> Void)?
|
||||
var presentCallRating: ((CallId) -> Void)?
|
||||
var callEnded: ((Bool) -> Void)?
|
||||
@ -151,6 +195,10 @@ final class CallControllerNode: ASDisplayNode {
|
||||
self?.acceptCall?()
|
||||
}
|
||||
|
||||
self.buttonsNode.toggleVideo = { [weak self] in
|
||||
self?.toggleVideo?()
|
||||
}
|
||||
|
||||
self.keyButtonNode.addTarget(self, action: #selector(self.keyPressed), forControlEvents: .touchUpInside)
|
||||
|
||||
self.backButtonNode.addTarget(self, action: #selector(self.backPressed), forControlEvents: .touchUpInside)
|
||||
@ -205,7 +253,58 @@ final class CallControllerNode: ASDisplayNode {
|
||||
|
||||
let statusValue: CallControllerStatusValue
|
||||
var statusReception: Int32?
|
||||
switch callState {
|
||||
|
||||
switch callState.videoState {
|
||||
case .active:
|
||||
if !self.videoViewsRequested {
|
||||
self.videoViewsRequested = true
|
||||
self.call.makeIncomingVideoView(completion: { [weak self] incomingVideoView in
|
||||
guard let strongSelf = self else {
|
||||
return
|
||||
}
|
||||
if let incomingVideoView = incomingVideoView {
|
||||
strongSelf.setCurrentAudioOutput?(.speaker)
|
||||
let incomingVideoNode = IncomingVideoNode(videoView: incomingVideoView)
|
||||
strongSelf.incomingVideoNode = incomingVideoNode
|
||||
strongSelf.containerNode.insertSubnode(incomingVideoNode, aboveSubnode: strongSelf.dimNode)
|
||||
if let (layout, navigationBarHeight) = strongSelf.validLayout {
|
||||
strongSelf.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: .immediate)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
self.call.makeOutgoingVideoView(completion: { [weak self] outgoingVideoView in
|
||||
guard let strongSelf = self else {
|
||||
return
|
||||
}
|
||||
if let outgoingVideoView = outgoingVideoView {
|
||||
outgoingVideoView.backgroundColor = .black
|
||||
outgoingVideoView.clipsToBounds = true
|
||||
outgoingVideoView.layer.cornerRadius = 16.0
|
||||
strongSelf.setCurrentAudioOutput?(.speaker)
|
||||
let outgoingVideoNode = OutgoingVideoNode(videoView: outgoingVideoView, switchCamera: {
|
||||
guard let strongSelf = self else {
|
||||
return
|
||||
}
|
||||
strongSelf.call.switchVideoCamera()
|
||||
})
|
||||
strongSelf.outgoingVideoNode = outgoingVideoNode
|
||||
if let incomingVideoNode = strongSelf.incomingVideoNode {
|
||||
strongSelf.containerNode.insertSubnode(outgoingVideoNode, aboveSubnode: incomingVideoNode)
|
||||
} else {
|
||||
strongSelf.containerNode.insertSubnode(outgoingVideoNode, aboveSubnode: strongSelf.dimNode)
|
||||
}
|
||||
if let (layout, navigationBarHeight) = strongSelf.validLayout {
|
||||
strongSelf.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: .immediate)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
default:
|
||||
break
|
||||
}
|
||||
|
||||
switch callState.state {
|
||||
case .waiting, .connecting:
|
||||
statusValue = .text(self.presentationData.strings.Call_StatusConnecting)
|
||||
case let .requesting(ringing):
|
||||
@ -241,7 +340,7 @@ final class CallControllerNode: ASDisplayNode {
|
||||
case .active(let timestamp, let reception, let keyVisualHash), .reconnecting(let timestamp, let reception, let keyVisualHash):
|
||||
let strings = self.presentationData.strings
|
||||
var isReconnecting = false
|
||||
if case .reconnecting = callState {
|
||||
if case .reconnecting = callState.state {
|
||||
isReconnecting = true
|
||||
}
|
||||
statusValue = .timer({ value in
|
||||
@ -266,45 +365,8 @@ final class CallControllerNode: ASDisplayNode {
|
||||
}
|
||||
}
|
||||
statusReception = reception
|
||||
if !self.videoViewsRequested {
|
||||
self.videoViewsRequested = true
|
||||
self.call.makeIncomingVideoView(completion: { [weak self] incomingVideoView in
|
||||
guard let strongSelf = self else {
|
||||
return
|
||||
}
|
||||
if let incomingVideoView = incomingVideoView {
|
||||
strongSelf.setCurrentAudioOutput?(.speaker)
|
||||
strongSelf.incomingVideoView = incomingVideoView
|
||||
strongSelf.containerNode.view.insertSubview(incomingVideoView, aboveSubview: strongSelf.dimNode.view)
|
||||
if let (layout, navigationBarHeight) = strongSelf.validLayout {
|
||||
strongSelf.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: .immediate)
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
self.call.makeOutgoingVideoView(completion: { [weak self] outgoingVideoView in
|
||||
guard let strongSelf = self else {
|
||||
return
|
||||
}
|
||||
if let outgoingVideoView = outgoingVideoView {
|
||||
outgoingVideoView.backgroundColor = .black
|
||||
outgoingVideoView.clipsToBounds = true
|
||||
outgoingVideoView.layer.cornerRadius = 16.0
|
||||
strongSelf.setCurrentAudioOutput?(.speaker)
|
||||
strongSelf.outgoingVideoView = outgoingVideoView
|
||||
if let incomingVideoView = strongSelf.incomingVideoView {
|
||||
strongSelf.containerNode.view.insertSubview(outgoingVideoView, aboveSubview: incomingVideoView)
|
||||
} else {
|
||||
strongSelf.containerNode.view.insertSubview(outgoingVideoView, aboveSubview: strongSelf.dimNode.view)
|
||||
}
|
||||
if let (layout, navigationBarHeight) = strongSelf.validLayout {
|
||||
strongSelf.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: .immediate)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
switch callState {
|
||||
switch callState.state {
|
||||
case .terminated, .terminating:
|
||||
if !self.statusNode.alpha.isEqual(to: 0.5) {
|
||||
self.statusNode.alpha = 0.5
|
||||
@ -327,7 +389,7 @@ final class CallControllerNode: ASDisplayNode {
|
||||
}
|
||||
}
|
||||
if self.shouldStayHiddenUntilConnection {
|
||||
switch callState {
|
||||
switch callState.state {
|
||||
case .connecting, .active:
|
||||
self.containerNode.alpha = 1.0
|
||||
default:
|
||||
@ -339,7 +401,7 @@ final class CallControllerNode: ASDisplayNode {
|
||||
|
||||
self.updateButtonsMode()
|
||||
|
||||
if case let .terminated(id, _, reportRating) = callState, let callId = id {
|
||||
if case let .terminated(id, _, reportRating) = callState.state, let callId = id {
|
||||
let presentRating = reportRating || self.forceReportRating
|
||||
if presentRating {
|
||||
self.presentCallRating?(callId)
|
||||
@ -353,7 +415,7 @@ final class CallControllerNode: ASDisplayNode {
|
||||
return
|
||||
}
|
||||
|
||||
switch callState {
|
||||
switch callState.state {
|
||||
case .ringing:
|
||||
self.buttonsNode.updateMode(.incoming)
|
||||
default:
|
||||
@ -373,7 +435,16 @@ final class CallControllerNode: ASDisplayNode {
|
||||
mode = .none
|
||||
}
|
||||
}
|
||||
self.buttonsNode.updateMode(.active(mode))
|
||||
let mappedVideoState: CallControllerButtonsMode.VideoState
|
||||
switch callState.videoState {
|
||||
case .notAvailable:
|
||||
mappedVideoState = .notAvailable
|
||||
case .available:
|
||||
mappedVideoState = .available(true)
|
||||
case .active:
|
||||
mappedVideoState = .active
|
||||
}
|
||||
self.buttonsNode.updateMode(.active(speakerMode: mode, videoState: mappedVideoState))
|
||||
}
|
||||
}
|
||||
|
||||
@ -466,12 +537,15 @@ final class CallControllerNode: ASDisplayNode {
|
||||
let buttonsOriginY: CGFloat = layout.size.height - (buttonsOffset - 40.0) - buttonsHeight - layout.intrinsicInsets.bottom
|
||||
transition.updateFrame(node: self.buttonsNode, frame: CGRect(origin: CGPoint(x: 0.0, y: buttonsOriginY), size: CGSize(width: layout.size.width, height: buttonsHeight)))
|
||||
|
||||
if let incomingVideoView = self.incomingVideoView {
|
||||
incomingVideoView.frame = CGRect(origin: CGPoint(), size: layout.size)
|
||||
if let incomingVideoNode = self.incomingVideoNode {
|
||||
incomingVideoNode.frame = CGRect(origin: CGPoint(), size: layout.size)
|
||||
incomingVideoNode.updateLayout(size: layout.size)
|
||||
}
|
||||
if let outgoingVideoView = self.outgoingVideoView {
|
||||
if let outgoingVideoNode = self.outgoingVideoNode {
|
||||
let outgoingSize = layout.size.aspectFitted(CGSize(width: 200.0, height: 200.0))
|
||||
outgoingVideoView.frame = CGRect(origin: CGPoint(x: layout.size.width - 16.0 - outgoingSize.width, y: buttonsOriginY - 32.0 - outgoingSize.height), size: outgoingSize)
|
||||
let outgoingFrame = CGRect(origin: CGPoint(x: layout.size.width - 16.0 - outgoingSize.width, y: buttonsOriginY - 32.0 - outgoingSize.height), size: outgoingSize)
|
||||
outgoingVideoNode.frame = outgoingFrame
|
||||
outgoingVideoNode.updateLayout(size: outgoingFrame.size)
|
||||
}
|
||||
|
||||
let keyTextSize = self.keyButtonNode.frame.size
|
||||
|
@ -188,7 +188,7 @@ public final class PresentationCallImpl: PresentationCall {
|
||||
|
||||
private var sessionStateDisposable: Disposable?
|
||||
|
||||
private let statePromise = ValuePromise<PresentationCallState>(.waiting, ignoreRepeated: true)
|
||||
private let statePromise = ValuePromise<PresentationCallState>(PresentationCallState(state: .waiting, videoState: .notAvailable), ignoreRepeated: true)
|
||||
public var state: Signal<PresentationCallState, NoError> {
|
||||
return self.statePromise.get()
|
||||
}
|
||||
@ -402,7 +402,7 @@ public final class PresentationCallImpl: PresentationCall {
|
||||
|
||||
switch sessionState.state {
|
||||
case .ringing:
|
||||
presentationState = .ringing
|
||||
presentationState = PresentationCallState(state: .ringing, videoState: .notAvailable)
|
||||
if previous == nil || previousControl == nil {
|
||||
if !self.reportedIncomingCall {
|
||||
self.reportedIncomingCall = true
|
||||
@ -429,19 +429,28 @@ public final class PresentationCallImpl: PresentationCall {
|
||||
}
|
||||
case .accepting:
|
||||
self.callWasActive = true
|
||||
presentationState = .connecting(nil)
|
||||
presentationState = PresentationCallState(state: .connecting(nil), videoState: .notAvailable)
|
||||
case .dropping:
|
||||
presentationState = .terminating
|
||||
presentationState = PresentationCallState(state: .terminating, videoState: .notAvailable)
|
||||
case let .terminated(id, reason, options):
|
||||
presentationState = .terminated(id, reason, self.callWasActive && (options.contains(.reportRating) || self.shouldPresentCallRating))
|
||||
presentationState = PresentationCallState(state: .terminated(id, reason, self.callWasActive && (options.contains(.reportRating) || self.shouldPresentCallRating)), videoState: .notAvailable)
|
||||
case let .requesting(ringing):
|
||||
presentationState = .requesting(ringing)
|
||||
presentationState = PresentationCallState(state: .requesting(ringing), videoState: .notAvailable)
|
||||
case let .active(_, _, keyVisualHash, _, _, _, _):
|
||||
self.callWasActive = true
|
||||
if let callContextState = callContextState {
|
||||
switch callContextState {
|
||||
let mappedVideoState: PresentationCallState.VideoState
|
||||
switch callContextState.videoState {
|
||||
case .notAvailable:
|
||||
mappedVideoState = .notAvailable
|
||||
case let .available(enabled):
|
||||
mappedVideoState = .available(enabled)
|
||||
case .active:
|
||||
mappedVideoState = .active
|
||||
}
|
||||
switch callContextState.state {
|
||||
case .initializing:
|
||||
presentationState = .connecting(keyVisualHash)
|
||||
presentationState = PresentationCallState(state: .connecting(keyVisualHash), videoState: mappedVideoState)
|
||||
case .failed:
|
||||
presentationState = nil
|
||||
self.callSessionManager.drop(internalId: self.internalId, reason: .disconnect, debugLog: .single(nil))
|
||||
@ -453,7 +462,7 @@ public final class PresentationCallImpl: PresentationCall {
|
||||
timestamp = CFAbsoluteTimeGetCurrent()
|
||||
self.activeTimestamp = timestamp
|
||||
}
|
||||
presentationState = .active(timestamp, reception, keyVisualHash)
|
||||
presentationState = PresentationCallState(state: .active(timestamp, reception, keyVisualHash), videoState: mappedVideoState)
|
||||
case .reconnecting:
|
||||
let timestamp: Double
|
||||
if let activeTimestamp = self.activeTimestamp {
|
||||
@ -462,10 +471,10 @@ public final class PresentationCallImpl: PresentationCall {
|
||||
timestamp = CFAbsoluteTimeGetCurrent()
|
||||
self.activeTimestamp = timestamp
|
||||
}
|
||||
presentationState = .reconnecting(timestamp, reception, keyVisualHash)
|
||||
presentationState = PresentationCallState(state: .reconnecting(timestamp, reception, keyVisualHash), videoState: mappedVideoState)
|
||||
}
|
||||
} else {
|
||||
presentationState = .connecting(keyVisualHash)
|
||||
presentationState = PresentationCallState(state: .connecting(keyVisualHash), videoState: .notAvailable)
|
||||
}
|
||||
}
|
||||
|
||||
@ -555,12 +564,12 @@ public final class PresentationCallImpl: PresentationCall {
|
||||
|
||||
private func updateTone(_ state: PresentationCallState, callContextState: OngoingCallContextState?, previous: CallSession?) {
|
||||
var tone: PresentationCallTone?
|
||||
if let callContextState = callContextState, case .reconnecting = callContextState {
|
||||
if let callContextState = callContextState, case .reconnecting = callContextState.state {
|
||||
tone = .connecting
|
||||
} else if let previous = previous {
|
||||
switch previous.state {
|
||||
case .accepting, .active, .dropping, .requesting:
|
||||
switch state {
|
||||
switch state.state {
|
||||
case .connecting:
|
||||
if case .requesting = previous.state {
|
||||
tone = .ringing
|
||||
@ -652,6 +661,14 @@ public final class PresentationCallImpl: PresentationCall {
|
||||
self.ongoingContext?.setIsMuted(self.isMutedValue)
|
||||
}
|
||||
|
||||
public func setEnableVideo(_ value: Bool) {
|
||||
self.ongoingContext?.setEnableVideo(value)
|
||||
}
|
||||
|
||||
public func switchVideoCamera() {
|
||||
self.ongoingContext?.switchVideoCamera()
|
||||
}
|
||||
|
||||
public func setCurrentAudioOutput(_ output: AudioSessionOutput) {
|
||||
guard self.currentAudioOutputValue != output else {
|
||||
return
|
||||
|
@ -597,7 +597,7 @@ public final class SharedAccountContextImpl: SharedAccountContext {
|
||||
if let strongSelf = self {
|
||||
let resolvedText: CallStatusText
|
||||
if let state = state {
|
||||
switch state {
|
||||
switch state.state {
|
||||
case .connecting, .requesting, .terminating, .ringing, .waiting:
|
||||
resolvedText = .inProgress(nil)
|
||||
case .terminated:
|
||||
|
@ -496,7 +496,7 @@ public final class SharedNotificationManager {
|
||||
if isIntegratedWithCallKit {
|
||||
return nil
|
||||
}
|
||||
if case .ringing = state {
|
||||
if case .ringing = state.state {
|
||||
return (peer, internalId)
|
||||
} else {
|
||||
return nil
|
||||
|
@ -93,11 +93,20 @@ private let setupLogs: Bool = {
|
||||
return true
|
||||
}()
|
||||
|
||||
public enum OngoingCallContextState {
|
||||
case initializing
|
||||
case connected
|
||||
case reconnecting
|
||||
case failed
|
||||
public struct OngoingCallContextState: Equatable {
|
||||
public enum State {
|
||||
case initializing
|
||||
case connected
|
||||
case reconnecting
|
||||
case failed
|
||||
}
|
||||
public enum VideoState: Equatable {
|
||||
case notAvailable
|
||||
case available(Bool)
|
||||
case active
|
||||
}
|
||||
public let state: State
|
||||
public let videoState: VideoState
|
||||
}
|
||||
|
||||
private final class OngoingCallThreadLocalContextQueueImpl: NSObject, OngoingCallThreadLocalContextQueue, OngoingCallThreadLocalContextQueueWebrtc /*, OngoingCallThreadLocalContextQueueWebrtcCustom*/ {
|
||||
@ -226,6 +235,8 @@ private func ongoingDataSavingForTypeWebrtc(_ type: VoiceCallDataSaving) -> Ongo
|
||||
private protocol OngoingCallThreadLocalContextProtocol: class {
|
||||
func nativeSetNetworkType(_ type: NetworkType)
|
||||
func nativeSetIsMuted(_ value: Bool)
|
||||
func nativeSetVideoEnabled(_ value: Bool)
|
||||
func nativeSwitchVideoCamera()
|
||||
func nativeStop(_ completion: @escaping (String?, Int64, Int64, Int64, Int64) -> Void)
|
||||
func nativeDebugInfo() -> String
|
||||
func nativeVersion() -> String
|
||||
@ -253,6 +264,12 @@ extension OngoingCallThreadLocalContext: OngoingCallThreadLocalContextProtocol {
|
||||
self.setIsMuted(value)
|
||||
}
|
||||
|
||||
func nativeSetVideoEnabled(_ value: Bool) {
|
||||
}
|
||||
|
||||
func nativeSwitchVideoCamera() {
|
||||
}
|
||||
|
||||
func nativeDebugInfo() -> String {
|
||||
return self.debugInfo() ?? ""
|
||||
}
|
||||
@ -279,6 +296,14 @@ extension OngoingCallThreadLocalContextWebrtc: OngoingCallThreadLocalContextProt
|
||||
self.setIsMuted(value)
|
||||
}
|
||||
|
||||
func nativeSetVideoEnabled(_ value: Bool) {
|
||||
self.setVideoEnabled(value)
|
||||
}
|
||||
|
||||
func nativeSwitchVideoCamera() {
|
||||
self.switchVideoCamera()
|
||||
}
|
||||
|
||||
func nativeDebugInfo() -> String {
|
||||
return self.debugInfo() ?? ""
|
||||
}
|
||||
@ -318,7 +343,7 @@ extension OngoingCallThreadLocalContextWebrtc: OngoingCallThreadLocalContextProt
|
||||
}
|
||||
}*/
|
||||
|
||||
private extension OngoingCallContextState {
|
||||
private extension OngoingCallContextState.State {
|
||||
init(_ state: OngoingCallState) {
|
||||
switch state {
|
||||
case .initializing:
|
||||
@ -335,7 +360,7 @@ private extension OngoingCallContextState {
|
||||
}
|
||||
}
|
||||
|
||||
private extension OngoingCallContextState {
|
||||
private extension OngoingCallContextState.State {
|
||||
init(_ state: OngoingCallStateWebrtc) {
|
||||
switch state {
|
||||
case .initializing:
|
||||
@ -471,8 +496,25 @@ public final class OngoingCallContext {
|
||||
})
|
||||
|
||||
strongSelf.contextRef = Unmanaged.passRetained(OngoingCallThreadLocalContextHolder(context))
|
||||
context.stateChanged = { state in
|
||||
self?.contextState.set(.single(OngoingCallContextState(state)))
|
||||
context.stateChanged = { state, videoState in
|
||||
queue.async {
|
||||
guard let strongSelf = self else {
|
||||
return
|
||||
}
|
||||
let mappedState = OngoingCallContextState.State(state)
|
||||
let mappedVideoState: OngoingCallContextState.VideoState
|
||||
switch videoState {
|
||||
case .inactive:
|
||||
mappedVideoState = .available(true)
|
||||
case .active:
|
||||
mappedVideoState = .active
|
||||
case .invited, .requesting:
|
||||
mappedVideoState = .available(false)
|
||||
@unknown default:
|
||||
mappedVideoState = .available(false)
|
||||
}
|
||||
strongSelf.contextState.set(.single(OngoingCallContextState(state: mappedState, videoState: mappedVideoState)))
|
||||
}
|
||||
}
|
||||
context.signalBarsChanged = { signalBars in
|
||||
self?.receptionPromise.set(.single(signalBars))
|
||||
@ -498,7 +540,7 @@ public final class OngoingCallContext {
|
||||
|
||||
strongSelf.contextRef = Unmanaged.passRetained(OngoingCallThreadLocalContextHolder(context))
|
||||
context.stateChanged = { state in
|
||||
self?.contextState.set(.single(OngoingCallContextState(state)))
|
||||
self?.contextState.set(.single(OngoingCallContextState(state: OngoingCallContextState.State(state), videoState: .notAvailable)))
|
||||
}
|
||||
context.signalBarsChanged = { signalBars in
|
||||
self?.receptionPromise.set(.single(signalBars))
|
||||
@ -588,6 +630,18 @@ public final class OngoingCallContext {
|
||||
}
|
||||
}
|
||||
|
||||
public func setEnableVideo(_ value: Bool) {
|
||||
self.withContext { context in
|
||||
context.nativeSetVideoEnabled(value)
|
||||
}
|
||||
}
|
||||
|
||||
public func switchVideoCamera() {
|
||||
self.withContext { context in
|
||||
context.nativeSwitchVideoCamera()
|
||||
}
|
||||
}
|
||||
|
||||
public func debugInfo() -> Signal<(String, String), NoError> {
|
||||
let poll = Signal<(String, String), NoError> { subscriber in
|
||||
self.withContext { context in
|
||||
|
@ -29,6 +29,7 @@ objc_library(
|
||||
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/components/renderer/metal",
|
||||
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/components/video_codec",
|
||||
"-Ithird-party/webrtc/webrtc-ios/src/third_party/libyuv/include",
|
||||
"-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/api/video_codec",
|
||||
"-DWEBRTC_IOS",
|
||||
"-DWEBRTC_MAC",
|
||||
"-DWEBRTC_POSIX",
|
||||
|
@ -18,8 +18,9 @@ public:
|
||||
void configurePlatformAudio();
|
||||
std::unique_ptr<webrtc::VideoEncoderFactory> makeVideoEncoderFactory();
|
||||
std::unique_ptr<webrtc::VideoDecoderFactory> makeVideoDecoderFactory();
|
||||
bool supportsH265Encoding();
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread);
|
||||
std::unique_ptr<VideoCapturerInterface> makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source);
|
||||
std::unique_ptr<VideoCapturerInterface> makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera);
|
||||
|
||||
#ifdef TGVOIP_NAMESPACE
|
||||
}
|
||||
|
@ -30,6 +30,8 @@
|
||||
|
||||
#import "VideoCameraCapturer.h"
|
||||
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
|
||||
@interface VideoCapturerInterfaceImplReference : NSObject {
|
||||
VideoCameraCapturer *_videoCapturer;
|
||||
}
|
||||
@ -38,7 +40,7 @@
|
||||
|
||||
@implementation VideoCapturerInterfaceImplReference
|
||||
|
||||
- (instancetype)initWithSource:(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source {
|
||||
- (instancetype)initWithSource:(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface>)source useFrontCamera:(bool)useFrontCamera {
|
||||
self = [super init];
|
||||
if (self != nil) {
|
||||
assert([NSThread isMainThread]);
|
||||
@ -46,18 +48,27 @@
|
||||
_videoCapturer = [[VideoCameraCapturer alloc] initWithSource:source];
|
||||
|
||||
AVCaptureDevice *frontCamera = nil;
|
||||
AVCaptureDevice *backCamera = nil;
|
||||
for (AVCaptureDevice *device in [VideoCameraCapturer captureDevices]) {
|
||||
if (device.position == AVCaptureDevicePositionFront) {
|
||||
frontCamera = device;
|
||||
break;
|
||||
} else if (device.position == AVCaptureDevicePositionBack) {
|
||||
backCamera = device;
|
||||
}
|
||||
}
|
||||
|
||||
if (frontCamera == nil) {
|
||||
AVCaptureDevice *selectedCamera = nil;
|
||||
if (useFrontCamera && frontCamera != nil) {
|
||||
selectedCamera = frontCamera;
|
||||
} else {
|
||||
selectedCamera = backCamera;
|
||||
}
|
||||
|
||||
if (selectedCamera == nil) {
|
||||
return nil;
|
||||
}
|
||||
|
||||
NSArray<AVCaptureDeviceFormat *> *sortedFormats = [[VideoCameraCapturer supportedFormatsForDevice:frontCamera] sortedArrayUsingComparator:^NSComparisonResult(AVCaptureDeviceFormat* lhs, AVCaptureDeviceFormat *rhs) {
|
||||
NSArray<AVCaptureDeviceFormat *> *sortedFormats = [[VideoCameraCapturer supportedFormatsForDevice:selectedCamera] sortedArrayUsingComparator:^NSComparisonResult(AVCaptureDeviceFormat* lhs, AVCaptureDeviceFormat *rhs) {
|
||||
int32_t width1 = CMVideoFormatDescriptionGetDimensions(lhs.formatDescription).width;
|
||||
int32_t width2 = CMVideoFormatDescriptionGetDimensions(rhs.formatDescription).width;
|
||||
return width1 < width2 ? NSOrderedAscending : NSOrderedDescending;
|
||||
@ -90,7 +101,7 @@
|
||||
return nil;
|
||||
}
|
||||
|
||||
[_videoCapturer startCaptureWithDevice:frontCamera format:bestFormat fps:30];
|
||||
[_videoCapturer startCaptureWithDevice:selectedCamera format:bestFormat fps:30];
|
||||
}
|
||||
return self;
|
||||
}
|
||||
@ -119,12 +130,12 @@ namespace TGVOIP_NAMESPACE {
|
||||
|
||||
class VideoCapturerInterfaceImpl: public VideoCapturerInterface {
|
||||
public:
|
||||
VideoCapturerInterfaceImpl(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source) :
|
||||
VideoCapturerInterfaceImpl(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera) :
|
||||
_source(source) {
|
||||
_implReference = [[VideoCapturerInterfaceImplHolder alloc] init];
|
||||
VideoCapturerInterfaceImplHolder *implReference = _implReference;
|
||||
dispatch_async(dispatch_get_main_queue(), ^{
|
||||
VideoCapturerInterfaceImplReference *value = [[VideoCapturerInterfaceImplReference alloc] initWithSource:source];
|
||||
VideoCapturerInterfaceImplReference *value = [[VideoCapturerInterfaceImplReference alloc] initWithSource:source useFrontCamera:useFrontCamera];
|
||||
if (value != nil) {
|
||||
implReference.reference = (void *)CFBridgingRetain(value);
|
||||
}
|
||||
@ -161,13 +172,21 @@ std::unique_ptr<webrtc::VideoDecoderFactory> makeVideoDecoderFactory() {
|
||||
return webrtc::ObjCToNativeVideoDecoderFactory([[TGRTCDefaultVideoDecoderFactory alloc] init]);
|
||||
}
|
||||
|
||||
bool supportsH265Encoding() {
|
||||
if (@available(iOS 11.0, *)) {
|
||||
return [[AVAssetExportSession allExportPresets] containsObject:AVAssetExportPresetHEVCHighestQuality];
|
||||
} else {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread) {
|
||||
rtc::scoped_refptr<webrtc::ObjCVideoTrackSource> objCVideoTrackSource(new rtc::RefCountedObject<webrtc::ObjCVideoTrackSource>());
|
||||
return webrtc::VideoTrackSourceProxy::Create(signalingThread, workerThread, objCVideoTrackSource);
|
||||
}
|
||||
|
||||
std::unique_ptr<VideoCapturerInterface> makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source) {
|
||||
return std::make_unique<VideoCapturerInterfaceImpl>(source);
|
||||
std::unique_ptr<VideoCapturerInterface> makeVideoCapturer(rtc::scoped_refptr<webrtc::VideoTrackSourceInterface> source, bool useFrontCamera) {
|
||||
return std::make_unique<VideoCapturerInterfaceImpl>(source, useFrontCamera);
|
||||
}
|
||||
|
||||
#ifdef TGVOIP_NAMESPACE
|
||||
|
@ -1,5 +1,7 @@
|
||||
#include "Manager.h"
|
||||
|
||||
#include "rtc_base/byte_buffer.h"
|
||||
|
||||
#ifdef TGVOIP_NAMESPACE
|
||||
namespace TGVOIP_NAMESPACE {
|
||||
#endif
|
||||
@ -35,13 +37,16 @@ Manager::Manager(
|
||||
TgVoipEncryptionKey encryptionKey,
|
||||
bool enableP2P,
|
||||
std::function<void (const TgVoipState &)> stateUpdated,
|
||||
std::function<void (bool)> videoStateUpdated,
|
||||
std::function<void (const std::vector<uint8_t> &)> signalingDataEmitted
|
||||
) :
|
||||
_thread(thread),
|
||||
_encryptionKey(encryptionKey),
|
||||
_enableP2P(enableP2P),
|
||||
_stateUpdated(stateUpdated),
|
||||
_signalingDataEmitted(signalingDataEmitted) {
|
||||
_videoStateUpdated(videoStateUpdated),
|
||||
_signalingDataEmitted(signalingDataEmitted),
|
||||
_isVideoRequested(false) {
|
||||
assert(_thread->IsCurrent());
|
||||
}
|
||||
|
||||
@ -87,7 +92,14 @@ void Manager::start() {
|
||||
});
|
||||
},
|
||||
[signalingDataEmitted](const std::vector<uint8_t> &data) {
|
||||
signalingDataEmitted(data);
|
||||
rtc::CopyOnWriteBuffer buffer;
|
||||
uint8_t mode = 3;
|
||||
buffer.AppendData(&mode, 1);
|
||||
buffer.AppendData(data.data(), data.size());
|
||||
std::vector<uint8_t> augmentedData;
|
||||
augmentedData.resize(buffer.size());
|
||||
memcpy(augmentedData.data(), buffer.data(), buffer.size());
|
||||
signalingDataEmitted(augmentedData);
|
||||
}
|
||||
);
|
||||
}));
|
||||
@ -112,8 +124,59 @@ void Manager::start() {
|
||||
}
|
||||
|
||||
void Manager::receiveSignalingData(const std::vector<uint8_t> &data) {
|
||||
_networkManager->perform([data](NetworkManager *networkManager) {
|
||||
networkManager->receiveSignalingData(data);
|
||||
rtc::CopyOnWriteBuffer buffer;
|
||||
buffer.AppendData(data.data(), data.size());
|
||||
|
||||
if (buffer.size() < 1) {
|
||||
return;
|
||||
}
|
||||
|
||||
rtc::ByteBufferReader reader((const char *)buffer.data(), buffer.size());
|
||||
uint8_t mode = 0;
|
||||
if (!reader.ReadUInt8(&mode)) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (mode == 1) {
|
||||
_mediaManager->perform([](MediaManager *mediaManager) {
|
||||
mediaManager->setSendVideo(true);
|
||||
});
|
||||
_videoStateUpdated(true);
|
||||
} else if (mode == 2) {
|
||||
} else if (mode == 3) {
|
||||
auto candidatesData = buffer.Slice(1, buffer.size() - 1);
|
||||
_networkManager->perform([candidatesData](NetworkManager *networkManager) {
|
||||
networkManager->receiveSignalingData(candidatesData);
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
void Manager::setSendVideo(bool sendVideo) {
|
||||
if (sendVideo) {
|
||||
if (!_isVideoRequested) {
|
||||
_isVideoRequested = true;
|
||||
|
||||
rtc::CopyOnWriteBuffer buffer;
|
||||
uint8_t mode = 1;
|
||||
buffer.AppendData(&mode, 1);
|
||||
std::vector<uint8_t> data;
|
||||
data.resize(buffer.size());
|
||||
memcpy(data.data(), buffer.data(), buffer.size());
|
||||
|
||||
_signalingDataEmitted(data);
|
||||
|
||||
_mediaManager->perform([](MediaManager *mediaManager) {
|
||||
mediaManager->setSendVideo(true);
|
||||
});
|
||||
|
||||
_videoStateUpdated(true);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void Manager::switchVideoCamera() {
|
||||
_mediaManager->perform([](MediaManager *mediaManager) {
|
||||
mediaManager->switchVideoCamera();
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -17,12 +17,15 @@ public:
|
||||
TgVoipEncryptionKey encryptionKey,
|
||||
bool enableP2P,
|
||||
std::function<void (const TgVoipState &)> stateUpdated,
|
||||
std::function<void (bool)> videoStateUpdated,
|
||||
std::function<void (const std::vector<uint8_t> &)> signalingDataEmitted
|
||||
);
|
||||
~Manager();
|
||||
|
||||
void start();
|
||||
void receiveSignalingData(const std::vector<uint8_t> &data);
|
||||
void setSendVideo(bool sendVideo);
|
||||
void switchVideoCamera();
|
||||
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
void setOutgoingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
|
||||
@ -31,9 +34,11 @@ private:
|
||||
TgVoipEncryptionKey _encryptionKey;
|
||||
bool _enableP2P;
|
||||
std::function<void (const TgVoipState &)> _stateUpdated;
|
||||
std::function<void (bool)> _videoStateUpdated;
|
||||
std::function<void (const std::vector<uint8_t> &)> _signalingDataEmitted;
|
||||
std::unique_ptr<ThreadLocalObject<NetworkManager>> _networkManager;
|
||||
std::unique_ptr<ThreadLocalObject<MediaManager>> _mediaManager;
|
||||
bool _isVideoRequested;
|
||||
|
||||
private:
|
||||
};
|
||||
|
@ -17,6 +17,8 @@
|
||||
#include "api/video/video_bitrate_allocation.h"
|
||||
#include "call/call.h"
|
||||
|
||||
#include "api/video_codecs/builtin_video_encoder_factory.h"
|
||||
|
||||
#if TARGET_OS_IPHONE
|
||||
|
||||
#include "CodecsApple.h"
|
||||
@ -108,27 +110,50 @@ static std::vector<cricket::VideoCodec> AssignPayloadTypesAndDefaultCodecs(std::
|
||||
return output_codecs;
|
||||
}
|
||||
|
||||
static int sendCodecPriority(const cricket::VideoCodec &codec) {
|
||||
int priotity = 0;
|
||||
if (codec.name == cricket::kAv1CodecName) {
|
||||
return priotity;
|
||||
}
|
||||
priotity++;
|
||||
if (codec.name == cricket::kH265CodecName) {
|
||||
if (supportsH265Encoding()) {
|
||||
return priotity;
|
||||
}
|
||||
}
|
||||
priotity++;
|
||||
if (codec.name == cricket::kH264CodecName) {
|
||||
return priotity;
|
||||
}
|
||||
priotity++;
|
||||
if (codec.name == cricket::kVp9CodecName) {
|
||||
return priotity;
|
||||
}
|
||||
priotity++;
|
||||
if (codec.name == cricket::kVp8CodecName) {
|
||||
return priotity;
|
||||
}
|
||||
priotity++;
|
||||
return -1;
|
||||
}
|
||||
|
||||
static absl::optional<cricket::VideoCodec> selectVideoCodec(std::vector<cricket::VideoCodec> &codecs) {
|
||||
bool useVP9 = false;
|
||||
bool useH265 = true;
|
||||
|
||||
std::vector<cricket::VideoCodec> sortedCodecs;
|
||||
for (auto &codec : codecs) {
|
||||
if (useVP9) {
|
||||
if (codec.name == cricket::kVp9CodecName) {
|
||||
return absl::optional<cricket::VideoCodec>(codec);
|
||||
}
|
||||
} else if (useH265) {
|
||||
if (codec.name == cricket::kH265CodecName) {
|
||||
return absl::optional<cricket::VideoCodec>(codec);
|
||||
}
|
||||
} else {
|
||||
if (codec.name == cricket::kH264CodecName) {
|
||||
return absl::optional<cricket::VideoCodec>(codec);
|
||||
}
|
||||
if (sendCodecPriority(codec) != -1) {
|
||||
sortedCodecs.push_back(codec);
|
||||
}
|
||||
}
|
||||
|
||||
return absl::optional<cricket::VideoCodec>();
|
||||
std::sort(sortedCodecs.begin(), sortedCodecs.end(), [](const cricket::VideoCodec &lhs, const cricket::VideoCodec &rhs) {
|
||||
return sendCodecPriority(lhs) < sendCodecPriority(rhs);
|
||||
});
|
||||
|
||||
if (sortedCodecs.size() != 0) {
|
||||
return sortedCodecs[0];
|
||||
} else {
|
||||
return absl::nullopt;
|
||||
}
|
||||
}
|
||||
|
||||
static rtc::Thread *makeWorkerThread() {
|
||||
@ -162,6 +187,14 @@ _taskQueueFactory(webrtc::CreateDefaultTaskQueueFactory()) {
|
||||
_ssrcVideo.fecIncoming = isOutgoing ? ssrcVideoFecIncoming : ssrcVideoFecOutgoing;
|
||||
_ssrcVideo.fecOutgoing = (!isOutgoing) ? ssrcVideoFecIncoming : ssrcVideoFecOutgoing;
|
||||
|
||||
_isConnected = false;
|
||||
|
||||
auto videoEncoderFactory = makeVideoEncoderFactory();
|
||||
_videoCodecs = AssignPayloadTypesAndDefaultCodecs(videoEncoderFactory->GetSupportedFormats());
|
||||
|
||||
_isSendingVideo = false;
|
||||
_useFrontCamera = true;
|
||||
|
||||
_audioNetworkInterface = std::unique_ptr<MediaManager::NetworkInterfaceImpl>(new MediaManager::NetworkInterfaceImpl(this, false));
|
||||
_videoNetworkInterface = std::unique_ptr<MediaManager::NetworkInterfaceImpl>(new MediaManager::NetworkInterfaceImpl(this, true));
|
||||
|
||||
@ -182,9 +215,6 @@ _taskQueueFactory(webrtc::CreateDefaultTaskQueueFactory()) {
|
||||
mediaDeps.audio_encoder_factory = webrtc::CreateAudioEncoderFactory<webrtc::AudioEncoderOpus>();
|
||||
mediaDeps.audio_decoder_factory = webrtc::CreateAudioDecoderFactory<webrtc::AudioDecoderOpus>();
|
||||
|
||||
auto videoEncoderFactory = makeVideoEncoderFactory();
|
||||
std::vector<cricket::VideoCodec> videoCodecs = AssignPayloadTypesAndDefaultCodecs(videoEncoderFactory->GetSupportedFormats());
|
||||
|
||||
mediaDeps.video_encoder_factory = makeVideoEncoderFactory();
|
||||
mediaDeps.video_decoder_factory = makeVideoDecoderFactory();
|
||||
|
||||
@ -211,7 +241,6 @@ _taskQueueFactory(webrtc::CreateDefaultTaskQueueFactory()) {
|
||||
const uint8_t opusMaxBitrateKbps = 32;
|
||||
const uint8_t opusStartBitrateKbps = 6;
|
||||
const uint8_t opusPTimeMs = 120;
|
||||
const int extensionSequenceOne = 1;
|
||||
|
||||
cricket::AudioCodec opusCodec(opusSdpPayload, opusSdpName, opusClockrate, opusSdpBitrate, opusSdpChannels);
|
||||
opusCodec.AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamTransportCc));
|
||||
@ -223,7 +252,7 @@ _taskQueueFactory(webrtc::CreateDefaultTaskQueueFactory()) {
|
||||
|
||||
cricket::AudioSendParameters audioSendPrameters;
|
||||
audioSendPrameters.codecs.push_back(opusCodec);
|
||||
audioSendPrameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extensionSequenceOne);
|
||||
audioSendPrameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, 1);
|
||||
audioSendPrameters.options.echo_cancellation = false;
|
||||
//audioSendPrameters.options.experimental_ns = false;
|
||||
audioSendPrameters.options.noise_suppression = false;
|
||||
@ -238,7 +267,7 @@ _taskQueueFactory(webrtc::CreateDefaultTaskQueueFactory()) {
|
||||
|
||||
cricket::AudioRecvParameters audioRecvParameters;
|
||||
audioRecvParameters.codecs.emplace_back(opusSdpPayload, opusSdpName, opusClockrate, opusSdpBitrate, opusSdpChannels);
|
||||
audioRecvParameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extensionSequenceOne);
|
||||
audioRecvParameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, 1);
|
||||
audioRecvParameters.rtcp.reduced_size = true;
|
||||
audioRecvParameters.rtcp.remote_estimate = true;
|
||||
|
||||
@ -246,75 +275,9 @@ _taskQueueFactory(webrtc::CreateDefaultTaskQueueFactory()) {
|
||||
_audioChannel->AddRecvStream(cricket::StreamParams::CreateLegacy(_ssrcAudio.incoming));
|
||||
_audioChannel->SetPlayout(true);
|
||||
|
||||
cricket::StreamParams videoSendStreamParams;
|
||||
cricket::SsrcGroup videoSendSsrcGroup(cricket::kFecFrSsrcGroupSemantics, {_ssrcVideo.outgoing, _ssrcVideo.fecOutgoing});
|
||||
videoSendStreamParams.ssrcs = {_ssrcVideo.outgoing};
|
||||
videoSendStreamParams.ssrc_groups.push_back(videoSendSsrcGroup);
|
||||
videoSendStreamParams.cname = "cname";
|
||||
_videoChannel->AddSendStream(videoSendStreamParams);
|
||||
_videoChannel->SetInterface(_videoNetworkInterface.get(), webrtc::MediaTransportConfig());
|
||||
|
||||
auto videoCodec = selectVideoCodec(videoCodecs);
|
||||
if (videoCodec.has_value()) {
|
||||
_nativeVideoSource = makeVideoSource(_thread, getWorkerThread());
|
||||
|
||||
auto codec = videoCodec.value();
|
||||
|
||||
codec.SetParam(cricket::kCodecParamMinBitrate, 64);
|
||||
codec.SetParam(cricket::kCodecParamStartBitrate, 512);
|
||||
codec.SetParam(cricket::kCodecParamMaxBitrate, 2500);
|
||||
|
||||
_videoCapturer = makeVideoCapturer(_nativeVideoSource);
|
||||
|
||||
cricket::VideoSendParameters videoSendParameters;
|
||||
videoSendParameters.codecs.push_back(codec);
|
||||
|
||||
for (auto &c : videoCodecs) {
|
||||
if (c.name == cricket::kFlexfecCodecName) {
|
||||
videoSendParameters.codecs.push_back(c);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
videoSendParameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extensionSequenceOne);
|
||||
//send_parameters.max_bandwidth_bps = 800000;
|
||||
//send_parameters.rtcp.reduced_size = true;
|
||||
//videoSendParameters.rtcp.remote_estimate = true;
|
||||
_videoChannel->SetSendParameters(videoSendParameters);
|
||||
|
||||
_videoChannel->SetVideoSend(_ssrcVideo.outgoing, NULL, _nativeVideoSource.get());
|
||||
_videoChannel->SetVideoSend(_ssrcVideo.fecOutgoing, NULL, nullptr);
|
||||
|
||||
_videoChannel->SetInterface(_videoNetworkInterface.get(), webrtc::MediaTransportConfig());
|
||||
|
||||
cricket::VideoRecvParameters videoRecvParameters;
|
||||
videoRecvParameters.codecs.emplace_back(codec);
|
||||
|
||||
for (auto &c : videoCodecs) {
|
||||
if (c.name == cricket::kFlexfecCodecName) {
|
||||
videoRecvParameters.codecs.push_back(c);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
videoRecvParameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extensionSequenceOne);
|
||||
//recv_parameters.rtcp.reduced_size = true;
|
||||
videoRecvParameters.rtcp.remote_estimate = true;
|
||||
|
||||
cricket::StreamParams videoRecvStreamParams;
|
||||
cricket::SsrcGroup videoRecvSsrcGroup(cricket::kFecFrSsrcGroupSemantics, {_ssrcVideo.incoming, _ssrcVideo.fecIncoming});
|
||||
videoRecvStreamParams.ssrcs = {_ssrcVideo.incoming};
|
||||
videoRecvStreamParams.ssrc_groups.push_back(videoRecvSsrcGroup);
|
||||
videoRecvStreamParams.cname = "cname";
|
||||
|
||||
_videoChannel->AddRecvStream(videoRecvStreamParams);
|
||||
_videoChannel->SetRecvParameters(videoRecvParameters);
|
||||
|
||||
/*webrtc::FlexfecReceiveStream::Config config(_videoNetworkInterface.get());
|
||||
config.payload_type = 118;
|
||||
config.protected_media_ssrcs = {1324234};
|
||||
webrtc::FlexfecReceiveStream* stream;
|
||||
std::list<webrtc::FlexfecReceiveStream *> streams;*/
|
||||
}
|
||||
_nativeVideoSource = makeVideoSource(_thread, getWorkerThread());
|
||||
}
|
||||
|
||||
MediaManager::~MediaManager() {
|
||||
@ -334,18 +297,16 @@ MediaManager::~MediaManager() {
|
||||
|
||||
_audioChannel->SetInterface(nullptr, webrtc::MediaTransportConfig());
|
||||
|
||||
_videoChannel->RemoveRecvStream(_ssrcVideo.incoming);
|
||||
_videoChannel->RemoveRecvStream(_ssrcVideo.fecIncoming);
|
||||
_videoChannel->RemoveSendStream(_ssrcVideo.outgoing);
|
||||
_videoChannel->RemoveSendStream(_ssrcVideo.fecOutgoing);
|
||||
|
||||
_videoChannel->SetVideoSend(_ssrcVideo.outgoing, NULL, nullptr);
|
||||
_videoChannel->SetVideoSend(_ssrcVideo.fecOutgoing, NULL, nullptr);
|
||||
_videoChannel->SetInterface(nullptr, webrtc::MediaTransportConfig());
|
||||
setSendVideo(false);
|
||||
}
|
||||
|
||||
void MediaManager::setIsConnected(bool isConnected) {
|
||||
if (isConnected) {
|
||||
if (_isConnected == isConnected) {
|
||||
return;
|
||||
}
|
||||
_isConnected = isConnected;
|
||||
|
||||
if (_isConnected) {
|
||||
_call->SignalChannelNetworkState(webrtc::MediaType::AUDIO, webrtc::kNetworkUp);
|
||||
_call->SignalChannelNetworkState(webrtc::MediaType::VIDEO, webrtc::kNetworkUp);
|
||||
} else {
|
||||
@ -353,13 +314,13 @@ void MediaManager::setIsConnected(bool isConnected) {
|
||||
_call->SignalChannelNetworkState(webrtc::MediaType::VIDEO, webrtc::kNetworkDown);
|
||||
}
|
||||
if (_audioChannel) {
|
||||
_audioChannel->OnReadyToSend(isConnected);
|
||||
_audioChannel->SetSend(isConnected);
|
||||
_audioChannel->SetAudioSend(_ssrcAudio.outgoing, isConnected, nullptr, &_audioSource);
|
||||
_audioChannel->OnReadyToSend(_isConnected);
|
||||
_audioChannel->SetSend(_isConnected);
|
||||
_audioChannel->SetAudioSend(_ssrcAudio.outgoing, _isConnected, nullptr, &_audioSource);
|
||||
}
|
||||
if (_videoChannel) {
|
||||
_videoChannel->OnReadyToSend(isConnected);
|
||||
_videoChannel->SetSend(isConnected);
|
||||
if (_isSendingVideo && _videoChannel) {
|
||||
_videoChannel->OnReadyToSend(_isConnected);
|
||||
_videoChannel->SetSend(_isConnected);
|
||||
}
|
||||
}
|
||||
|
||||
@ -386,14 +347,113 @@ void MediaManager::notifyPacketSent(const rtc::SentPacket &sentPacket) {
|
||||
_call->OnSentPacket(sentPacket);
|
||||
}
|
||||
|
||||
void MediaManager::setSendVideo(bool sendVideo) {
|
||||
if (_isSendingVideo == sendVideo) {
|
||||
return;
|
||||
}
|
||||
_isSendingVideo = sendVideo;
|
||||
|
||||
if (_isSendingVideo) {
|
||||
auto videoCodec = selectVideoCodec(_videoCodecs);
|
||||
if (videoCodec.has_value()) {
|
||||
auto codec = videoCodec.value();
|
||||
|
||||
codec.SetParam(cricket::kCodecParamMinBitrate, 64);
|
||||
codec.SetParam(cricket::kCodecParamStartBitrate, 512);
|
||||
codec.SetParam(cricket::kCodecParamMaxBitrate, 2500);
|
||||
|
||||
_videoCapturer = makeVideoCapturer(_nativeVideoSource, _useFrontCamera);
|
||||
|
||||
cricket::VideoSendParameters videoSendParameters;
|
||||
videoSendParameters.codecs.push_back(codec);
|
||||
|
||||
for (auto &c : _videoCodecs) {
|
||||
if (c.name == cricket::kFlexfecCodecName) {
|
||||
videoSendParameters.codecs.push_back(c);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
videoSendParameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, 1);
|
||||
//send_parameters.max_bandwidth_bps = 800000;
|
||||
//send_parameters.rtcp.reduced_size = true;
|
||||
//videoSendParameters.rtcp.remote_estimate = true;
|
||||
_videoChannel->SetSendParameters(videoSendParameters);
|
||||
|
||||
cricket::StreamParams videoSendStreamParams;
|
||||
cricket::SsrcGroup videoSendSsrcGroup(cricket::kFecFrSsrcGroupSemantics, {_ssrcVideo.outgoing, _ssrcVideo.fecOutgoing});
|
||||
videoSendStreamParams.ssrcs = {_ssrcVideo.outgoing};
|
||||
videoSendStreamParams.ssrc_groups.push_back(videoSendSsrcGroup);
|
||||
videoSendStreamParams.cname = "cname";
|
||||
_videoChannel->AddSendStream(videoSendStreamParams);
|
||||
|
||||
_videoChannel->SetVideoSend(_ssrcVideo.outgoing, NULL, _nativeVideoSource.get());
|
||||
_videoChannel->SetVideoSend(_ssrcVideo.fecOutgoing, NULL, nullptr);
|
||||
|
||||
cricket::VideoRecvParameters videoRecvParameters;
|
||||
|
||||
for (auto &c : _videoCodecs) {
|
||||
if (c.name == cricket::kFlexfecCodecName) {
|
||||
videoRecvParameters.codecs.push_back(c);
|
||||
} else if (c.name == cricket::kH264CodecName) {
|
||||
videoRecvParameters.codecs.push_back(c);
|
||||
} else if (c.name == cricket::kH265CodecName) {
|
||||
videoRecvParameters.codecs.push_back(c);
|
||||
} else if (c.name == cricket::kVp8CodecName) {
|
||||
videoRecvParameters.codecs.push_back(c);
|
||||
} else if (c.name == cricket::kVp9CodecName) {
|
||||
videoRecvParameters.codecs.push_back(c);
|
||||
} else if (c.name == cricket::kAv1CodecName) {
|
||||
videoRecvParameters.codecs.push_back(c);
|
||||
}
|
||||
}
|
||||
|
||||
videoRecvParameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, 1);
|
||||
//recv_parameters.rtcp.reduced_size = true;
|
||||
videoRecvParameters.rtcp.remote_estimate = true;
|
||||
|
||||
cricket::StreamParams videoRecvStreamParams;
|
||||
cricket::SsrcGroup videoRecvSsrcGroup(cricket::kFecFrSsrcGroupSemantics, {_ssrcVideo.incoming, _ssrcVideo.fecIncoming});
|
||||
videoRecvStreamParams.ssrcs = {_ssrcVideo.incoming};
|
||||
videoRecvStreamParams.ssrc_groups.push_back(videoRecvSsrcGroup);
|
||||
videoRecvStreamParams.cname = "cname";
|
||||
|
||||
_videoChannel->AddRecvStream(videoRecvStreamParams);
|
||||
_videoChannel->SetRecvParameters(videoRecvParameters);
|
||||
|
||||
if (_isSendingVideo && _videoChannel) {
|
||||
_videoChannel->OnReadyToSend(_isConnected);
|
||||
_videoChannel->SetSend(_isConnected);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
_videoChannel->SetVideoSend(_ssrcVideo.outgoing, NULL, nullptr);
|
||||
_videoChannel->SetVideoSend(_ssrcVideo.fecOutgoing, NULL, nullptr);
|
||||
|
||||
_videoCapturer.reset();
|
||||
|
||||
_videoChannel->RemoveRecvStream(_ssrcVideo.incoming);
|
||||
_videoChannel->RemoveRecvStream(_ssrcVideo.fecIncoming);
|
||||
_videoChannel->RemoveSendStream(_ssrcVideo.outgoing);
|
||||
_videoChannel->RemoveSendStream(_ssrcVideo.fecOutgoing);
|
||||
}
|
||||
}
|
||||
|
||||
void MediaManager::switchVideoCamera() {
|
||||
if (_isSendingVideo) {
|
||||
_useFrontCamera = !_useFrontCamera;
|
||||
_videoCapturer = makeVideoCapturer(_nativeVideoSource, _useFrontCamera);
|
||||
}
|
||||
}
|
||||
|
||||
void MediaManager::setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
_currentIncomingVideoSink = sink;
|
||||
_videoChannel->SetSink(_ssrcVideo.incoming, sink.get());
|
||||
_videoChannel->SetSink(_ssrcVideo.incoming, _currentIncomingVideoSink.get());
|
||||
}
|
||||
|
||||
void MediaManager::setOutgoingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink) {
|
||||
_currentOutgoingVideoSink = sink;
|
||||
_nativeVideoSource->AddOrUpdateSink(sink.get(), rtc::VideoSinkWants());
|
||||
_nativeVideoSource->AddOrUpdateSink(_currentOutgoingVideoSink.get(), rtc::VideoSinkWants());
|
||||
}
|
||||
|
||||
MediaManager::NetworkInterfaceImpl::NetworkInterfaceImpl(MediaManager *mediaManager, bool isVideo) :
|
||||
|
@ -64,6 +64,8 @@ public:
|
||||
void setIsConnected(bool isConnected);
|
||||
void receivePacket(const rtc::CopyOnWriteBuffer &packet);
|
||||
void notifyPacketSent(const rtc::SentPacket &sentPacket);
|
||||
void setSendVideo(bool sendVideo);
|
||||
void switchVideoCamera();
|
||||
void setIncomingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
void setOutgoingVideoOutput(std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>> sink);
|
||||
|
||||
@ -78,6 +80,12 @@ private:
|
||||
SSRC _ssrcAudio;
|
||||
SSRC _ssrcVideo;
|
||||
|
||||
bool _isConnected;
|
||||
|
||||
std::vector<cricket::VideoCodec> _videoCodecs;
|
||||
bool _isSendingVideo;
|
||||
bool _useFrontCamera;
|
||||
|
||||
std::unique_ptr<cricket::MediaEngineInterface> _mediaEngine;
|
||||
std::unique_ptr<webrtc::Call> _call;
|
||||
webrtc::FieldTrialBasedConfig _fieldTrials;
|
||||
|
@ -175,18 +175,17 @@ _signalingDataEmitted(signalingDataEmitted) {
|
||||
flags |= cricket::PORTALLOCATOR_DISABLE_UDP;
|
||||
flags |= cricket::PORTALLOCATOR_DISABLE_STUN;
|
||||
}
|
||||
//flags |= cricket::PORTALLOCATOR_DISABLE_UDP;
|
||||
_portAllocator->set_flags(_portAllocator->flags() | flags);
|
||||
_portAllocator->Initialize();
|
||||
|
||||
rtc::SocketAddress defaultStunAddress = rtc::SocketAddress("hlgkfjdrtjfykgulhijkljhulyo.uksouth.cloudapp.azure.com", 3478);
|
||||
rtc::SocketAddress defaultStunAddress = rtc::SocketAddress("134.122.52.178", 3478);
|
||||
cricket::ServerAddresses stunServers;
|
||||
stunServers.insert(defaultStunAddress);
|
||||
std::vector<cricket::RelayServerConfig> turnServers;
|
||||
turnServers.push_back(cricket::RelayServerConfig(
|
||||
rtc::SocketAddress("hlgkfjdrtjfykgulhijkljhulyo.uksouth.cloudapp.azure.com", 3478),
|
||||
"user",
|
||||
"root",
|
||||
rtc::SocketAddress("134.122.52.178", 3478),
|
||||
"openrelay",
|
||||
"openrelay",
|
||||
cricket::PROTO_UDP
|
||||
));
|
||||
_portAllocator->SetConfiguration(stunServers, turnServers, 2, webrtc::NO_PRUNE);
|
||||
@ -233,7 +232,7 @@ NetworkManager::~NetworkManager() {
|
||||
_socketFactory.reset();
|
||||
}
|
||||
|
||||
void NetworkManager::receiveSignalingData(const std::vector<uint8_t> &data) {
|
||||
void NetworkManager::receiveSignalingData(const rtc::CopyOnWriteBuffer &data) {
|
||||
rtc::ByteBufferReader reader((const char *)data.data(), data.size());
|
||||
uint32_t candidateCount = 0;
|
||||
if (!reader.ReadUInt32(&candidateCount)) {
|
||||
|
@ -47,7 +47,7 @@ public:
|
||||
);
|
||||
~NetworkManager();
|
||||
|
||||
void receiveSignalingData(const std::vector<uint8_t> &data);
|
||||
void receiveSignalingData(const rtc::CopyOnWriteBuffer &data);
|
||||
void sendPacket(const rtc::CopyOnWriteBuffer &packet);
|
||||
|
||||
private:
|
||||
|
@ -138,6 +138,7 @@ public:
|
||||
TgVoipNetworkType initialNetworkType,
|
||||
TgVoipEncryptionKey const &encryptionKey,
|
||||
std::function<void(TgVoipState)> stateUpdated,
|
||||
std::function<void(bool)> videoStateUpdated,
|
||||
std::function<void(const std::vector<uint8_t> &)> signalingDataEmitted
|
||||
);
|
||||
|
||||
@ -158,6 +159,8 @@ public:
|
||||
virtual TgVoipPersistentState getPersistentState() = 0;
|
||||
|
||||
virtual void receiveSignalingData(const std::vector<uint8_t> &data) = 0;
|
||||
virtual void setSendVideo(bool sendVideo) = 0;
|
||||
virtual void switchVideoCamera() = 0;
|
||||
|
||||
virtual TgVoipFinalState stop() = 0;
|
||||
};
|
||||
|
@ -143,6 +143,7 @@ public:
|
||||
TgVoipEncryptionKey const &encryptionKey,
|
||||
TgVoipNetworkType initialNetworkType,
|
||||
std::function<void(TgVoipState)> stateUpdated,
|
||||
std::function<void(bool)> videoStateUpdated,
|
||||
std::function<void(const std::vector<uint8_t> &)> signalingDataEmitted
|
||||
) :
|
||||
_stateUpdated(stateUpdated),
|
||||
@ -156,7 +157,7 @@ public:
|
||||
|
||||
bool enableP2P = config.enableP2P;
|
||||
|
||||
_manager.reset(new ThreadLocalObject<Manager>(getManagerThread(), [encryptionKey = encryptionKey, enableP2P = enableP2P, stateUpdated, signalingDataEmitted](){
|
||||
_manager.reset(new ThreadLocalObject<Manager>(getManagerThread(), [encryptionKey = encryptionKey, enableP2P = enableP2P, stateUpdated, videoStateUpdated, signalingDataEmitted](){
|
||||
return new Manager(
|
||||
getManagerThread(),
|
||||
encryptionKey,
|
||||
@ -164,6 +165,9 @@ public:
|
||||
[stateUpdated](const TgVoipState &state) {
|
||||
stateUpdated(state);
|
||||
},
|
||||
[videoStateUpdated](bool isActive) {
|
||||
videoStateUpdated(isActive);
|
||||
},
|
||||
[signalingDataEmitted](const std::vector<uint8_t> &data) {
|
||||
signalingDataEmitted(data);
|
||||
}
|
||||
@ -183,6 +187,18 @@ public:
|
||||
manager->receiveSignalingData(data);
|
||||
});
|
||||
};
|
||||
|
||||
void setSendVideo(bool sendVideo) override {
|
||||
_manager->perform([sendVideo](Manager *manager) {
|
||||
manager->setSendVideo(sendVideo);
|
||||
});
|
||||
};
|
||||
|
||||
void switchVideoCamera() override {
|
||||
_manager->perform([](Manager *manager) {
|
||||
manager->switchVideoCamera();
|
||||
});
|
||||
}
|
||||
|
||||
void setNetworkType(TgVoipNetworkType networkType) override {
|
||||
/*message::NetworkType mappedType;
|
||||
@ -361,6 +377,7 @@ TgVoip *TgVoip::makeInstance(
|
||||
TgVoipNetworkType initialNetworkType,
|
||||
TgVoipEncryptionKey const &encryptionKey,
|
||||
std::function<void(TgVoipState)> stateUpdated,
|
||||
std::function<void(bool)> videoStateUpdated,
|
||||
std::function<void(const std::vector<uint8_t> &)> signalingDataEmitted
|
||||
) {
|
||||
return new TgVoipImpl(
|
||||
@ -371,6 +388,7 @@ TgVoip *TgVoip::makeInstance(
|
||||
encryptionKey,
|
||||
initialNetworkType,
|
||||
stateUpdated,
|
||||
videoStateUpdated,
|
||||
signalingDataEmitted
|
||||
);
|
||||
}
|
||||
|
@ -23,6 +23,13 @@ typedef NS_ENUM(int32_t, OngoingCallStateWebrtc) {
|
||||
OngoingCallStateReconnecting
|
||||
};
|
||||
|
||||
typedef NS_ENUM(int32_t, OngoingCallVideoStateWebrtc) {
|
||||
OngoingCallVideoStateInactive,
|
||||
OngoingCallVideoStateRequesting,
|
||||
OngoingCallVideoStateInvited,
|
||||
OngoingCallVideoStateActive
|
||||
};
|
||||
|
||||
typedef NS_ENUM(int32_t, OngoingCallNetworkTypeWebrtc) {
|
||||
OngoingCallNetworkTypeWifi,
|
||||
OngoingCallNetworkTypeCellularGprs,
|
||||
@ -62,7 +69,7 @@ typedef NS_ENUM(int32_t, OngoingCallDataSavingWebrtc) {
|
||||
+ (int32_t)maxLayer;
|
||||
+ (NSString * _Nonnull)version;
|
||||
|
||||
@property (nonatomic, copy) void (^ _Nullable stateChanged)(OngoingCallStateWebrtc);
|
||||
@property (nonatomic, copy) void (^ _Nullable stateChanged)(OngoingCallStateWebrtc, OngoingCallVideoStateWebrtc);
|
||||
@property (nonatomic, copy) void (^ _Nullable signalBarsChanged)(int32_t);
|
||||
|
||||
- (instancetype _Nonnull)initWithQueue:(id<OngoingCallThreadLocalContextQueueWebrtc> _Nonnull)queue proxy:(VoipProxyServerWebrtc * _Nullable)proxy networkType:(OngoingCallNetworkTypeWebrtc)networkType dataSaving:(OngoingCallDataSavingWebrtc)dataSaving derivedState:(NSData * _Nonnull)derivedState key:(NSData * _Nonnull)key isOutgoing:(bool)isOutgoing primaryConnection:(OngoingCallConnectionDescriptionWebrtc * _Nonnull)primaryConnection alternativeConnections:(NSArray<OngoingCallConnectionDescriptionWebrtc *> * _Nonnull)alternativeConnections maxLayer:(int32_t)maxLayer allowP2P:(BOOL)allowP2P logPath:(NSString * _Nonnull)logPath sendSignalingData:(void (^)(NSData * _Nonnull))sendSignalingData;
|
||||
@ -75,6 +82,8 @@ typedef NS_ENUM(int32_t, OngoingCallDataSavingWebrtc) {
|
||||
- (NSData * _Nonnull)getDerivedState;
|
||||
|
||||
- (void)setIsMuted:(bool)isMuted;
|
||||
- (void)setVideoEnabled:(bool)videoEnabled;
|
||||
- (void)switchVideoCamera;
|
||||
- (void)setNetworkType:(OngoingCallNetworkTypeWebrtc)networkType;
|
||||
- (void)makeIncomingVideoView:(void (^_Nonnull)(UIView * _Nullable))completion;
|
||||
- (void)makeOutgoingVideoView:(void (^_Nonnull)(UIView * _Nullable))completion;
|
||||
|
@ -34,6 +34,8 @@ using namespace TGVOIP_NAMESPACE;
|
||||
TgVoip *_tgVoip;
|
||||
|
||||
OngoingCallStateWebrtc _state;
|
||||
OngoingCallVideoStateWebrtc _videoState;
|
||||
|
||||
int32_t _signalBars;
|
||||
NSData *_lastDerivedState;
|
||||
|
||||
@ -127,6 +129,7 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
||||
_callPacketTimeout = 10.0;
|
||||
_networkType = networkType;
|
||||
_sendSignalingData = [sendSignalingData copy];
|
||||
_videoState = OngoingCallVideoStateInactive;
|
||||
|
||||
std::vector<uint8_t> derivedStateValue;
|
||||
derivedStateValue.resize(derivedState.length);
|
||||
@ -206,6 +209,25 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
||||
}
|
||||
}];
|
||||
},
|
||||
[weakSelf, queue](bool isActive) {
|
||||
[queue dispatch:^{
|
||||
__strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf;
|
||||
if (strongSelf) {
|
||||
OngoingCallVideoStateWebrtc videoState;
|
||||
if (isActive) {
|
||||
videoState = OngoingCallVideoStateActive;
|
||||
} else {
|
||||
videoState = OngoingCallVideoStateInactive;
|
||||
}
|
||||
if (strongSelf->_videoState != videoState) {
|
||||
strongSelf->_videoState = videoState;
|
||||
if (strongSelf->_stateChanged) {
|
||||
strongSelf->_stateChanged(strongSelf->_state, strongSelf->_videoState);
|
||||
}
|
||||
}
|
||||
}
|
||||
}];
|
||||
},
|
||||
[weakSelf, queue](const std::vector<uint8_t> &data) {
|
||||
NSData *mappedData = [[NSData alloc] initWithBytes:data.data() length:data.size()];
|
||||
[queue dispatch:^{
|
||||
@ -300,7 +322,7 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
||||
_state = callState;
|
||||
|
||||
if (_stateChanged) {
|
||||
_stateChanged(callState);
|
||||
_stateChanged(_state, _videoState);
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -336,6 +358,18 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL;
|
||||
}
|
||||
}
|
||||
|
||||
- (void)setVideoEnabled:(bool)videoEnabled {
|
||||
if (_tgVoip) {
|
||||
_tgVoip->setSendVideo(videoEnabled);
|
||||
}
|
||||
}
|
||||
|
||||
- (void)switchVideoCamera {
|
||||
if (_tgVoip) {
|
||||
_tgVoip->switchVideoCamera();
|
||||
}
|
||||
}
|
||||
|
||||
- (void)setNetworkType:(OngoingCallNetworkTypeWebrtc)networkType {
|
||||
if (_networkType != networkType) {
|
||||
_networkType = networkType;
|
||||
|
2
third-party/webrtc/BUILD
vendored
2
third-party/webrtc/BUILD
vendored
@ -35,7 +35,7 @@ genrule(
|
||||
echo "Unsupported architecture $(TARGET_CPU)"
|
||||
fi
|
||||
BUILD_DIR="$(RULEDIR)/$$BUILD_ARCH"
|
||||
rm -rf "$$BUILD_DIR"
|
||||
#rm -rf "$$BUILD_DIR"
|
||||
mkdir -p "$$BUILD_DIR"
|
||||
|
||||
SOURCE_PATH="third-party/webrtc/webrtc-ios/src"
|
||||
|
Loading…
x
Reference in New Issue
Block a user