diff --git a/buildbox/build-telegram.sh b/buildbox/build-telegram.sh index 8651822a63..4f82ab498d 100644 --- a/buildbox/build-telegram.sh +++ b/buildbox/build-telegram.sh @@ -5,7 +5,7 @@ set -e BUILD_TELEGRAM_VERSION="1" MACOS_VERSION="10.15" -XCODE_VERSION="11.2" +XCODE_VERSION="11.5" GUEST_SHELL="bash" VM_BASE_NAME="macos$(echo $MACOS_VERSION | sed -e 's/\.'/_/g)_Xcode$(echo $XCODE_VERSION | sed -e 's/\.'/_/g)" diff --git a/submodules/AccountContext/Sources/PresentationCallManager.swift b/submodules/AccountContext/Sources/PresentationCallManager.swift index 0f179f4dc5..2f9bbae345 100644 --- a/submodules/AccountContext/Sources/PresentationCallManager.swift +++ b/submodules/AccountContext/Sources/PresentationCallManager.swift @@ -47,7 +47,8 @@ public protocol PresentationCall: class { func setCurrentAudioOutput(_ output: AudioSessionOutput) func debugInfo() -> Signal<(String, String), NoError> - func getVideoView(completion: @escaping (UIView?) -> Void) + func makeIncomingVideoView(completion: @escaping (UIView?) -> Void) + func makeOutgoingVideoView(completion: @escaping (UIView?) -> Void) } public protocol PresentationCallManager: class { diff --git a/submodules/LegacyComponents/PublicHeaders/LegacyComponents/TGModernConversationInputMicButton.h b/submodules/LegacyComponents/PublicHeaders/LegacyComponents/TGModernConversationInputMicButton.h index 51de64059c..8f0c4434de 100644 --- a/submodules/LegacyComponents/PublicHeaders/LegacyComponents/TGModernConversationInputMicButton.h +++ b/submodules/LegacyComponents/PublicHeaders/LegacyComponents/TGModernConversationInputMicButton.h @@ -12,6 +12,7 @@ - (void)updateLevel:(CGFloat)level; - (void)tick:(CGFloat)level; +- (void)setColor:(UIColor *)color; @end diff --git a/submodules/LegacyComponents/PublicHeaders/LegacyComponents/TGVideoMessageCaptureController.h b/submodules/LegacyComponents/PublicHeaders/LegacyComponents/TGVideoMessageCaptureController.h index 0485923d6f..eb81ffdcd6 100644 --- a/submodules/LegacyComponents/PublicHeaders/LegacyComponents/TGVideoMessageCaptureController.h +++ b/submodules/LegacyComponents/PublicHeaders/LegacyComponents/TGVideoMessageCaptureController.h @@ -19,6 +19,7 @@ @property (nonatomic, copy) id (^requestActivityHolder)(); @property (nonatomic, copy) void (^micLevel)(CGFloat level); +@property (nonatomic, copy) void (^onDuration)(NSTimeInterval duration); @property (nonatomic, copy) void(^finishedWithVideo)(NSURL *videoURL, UIImage *previewImage, NSUInteger fileSize, NSTimeInterval duration, CGSize dimensions, id liveUploadData, TGVideoEditAdjustments *adjustments, bool, int32_t); @property (nonatomic, copy) void(^onDismiss)(bool isAuto); @property (nonatomic, copy) void(^onStop)(void); diff --git a/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m b/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m index f1b4c4f538..89f2dd9983 100644 --- a/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m +++ b/submodules/LegacyComponents/Sources/TGModernConversationInputMicButton.m @@ -343,6 +343,7 @@ static const CGFloat outerCircleMinScale = innerCircleRadius / outerCircleRadius [_lock updateLockness:0.0]; } + _currentScale = 1.0; _animatedIn = true; _animationStartTime = CACurrentMediaTime(); @@ -557,6 +558,7 @@ static const CGFloat outerCircleMinScale = innerCircleRadius / outerCircleRadius _previousIcon = _innerIconView.image; [self setIcon:TGTintedImage(TGComponentsImageNamed(@"RecordSendIcon"), _pallete != nil ? _pallete.iconColor : [UIColor whiteColor])]; + _currentScale = 1; _cancelTranslation = 0; id delegate = _delegate; if ([delegate respondsToSelector:@selector(micButtonInteractionUpdateCancelTranslation:)]) diff --git a/submodules/LegacyComponents/Sources/TGVideoMessageCaptureController.m b/submodules/LegacyComponents/Sources/TGVideoMessageCaptureController.m index df6afebb43..cb36c397d6 100644 --- a/submodules/LegacyComponents/Sources/TGVideoMessageCaptureController.m +++ b/submodules/LegacyComponents/Sources/TGVideoMessageCaptureController.m @@ -687,6 +687,9 @@ typedef enum if (!_capturePipeline.isRecording) return false; + if (_capturePipeline.videoDuration < 0.33) + return false; + if ([self.view.window isKindOfClass:[TGVideoMessageCaptureControllerWindow class]]) { ((TGVideoMessageCaptureControllerWindow *)self.view.window).locked = false; } @@ -1049,6 +1052,7 @@ typedef enum { [_controlsView recordingStarted]; [_controlsView setDurationString:@"0:00,00"]; + self.onDuration(0); _audioRecordingDurationSeconds = 0; _audioRecordingDurationMilliseconds = 0.0; @@ -1082,6 +1086,7 @@ typedef enum } else { + self.onDuration(recordingDuration); _audioRecordingDurationSeconds = currentDurationSeconds; _audioRecordingDurationMilliseconds = currentDurationMilliseconds; [_controlsView setDurationString:[[NSString alloc] initWithFormat:@"%d:%02d,%02d", (int)_audioRecordingDurationSeconds / 60, (int)_audioRecordingDurationSeconds % 60, (int)_audioRecordingDurationMilliseconds]]; diff --git a/submodules/LegacyComponents/Sources/TGVideoMessageControls.m b/submodules/LegacyComponents/Sources/TGVideoMessageControls.m index 4062339754..c4b6fc917c 100644 --- a/submodules/LegacyComponents/Sources/TGVideoMessageControls.m +++ b/submodules/LegacyComponents/Sources/TGVideoMessageControls.m @@ -160,7 +160,7 @@ static CGRect viewFrame(UIView *view) CGRect slideToCancelArrowFrame = viewFrame(_slideToCancelArrow); setViewFrame(_slideToCancelArrow, CGRectMake(CGFloor((self.frame.size.width - _slideToCancelLabel.frame.size.width) / 2.0f) - slideToCancelArrowFrame.size.width - 7.0f, CGFloor((self.frame.size.height - _slideToCancelLabel.frame.size.height) / 2.0f), slideToCancelArrowFrame.size.width, slideToCancelArrowFrame.size.height)); _slideToCancelArrow.alpha = 0.0f; - [self addSubview:_slideToCancelArrow]; +// [self addSubview:_slideToCancelArrow]; _slideToCancelArrow.transform = CGAffineTransformMakeTranslation(hideLeftOffset, 0.0f); _slideToCancelLabel.transform = CGAffineTransformMakeTranslation(hideLeftOffset, 0.0f); @@ -185,11 +185,11 @@ static CGRect viewFrame(UIView *view) _recordDurationLabel.text = @"0:00,00"; if (_recordIndicatorView.superview == nil) - [self addSubview:_recordIndicatorView]; +// [self addSubview:_recordIndicatorView]; [_recordIndicatorView.layer removeAllAnimations]; if (_recordDurationLabel.superview == nil) - [self addSubview:_recordDurationLabel]; +// [self addSubview:_recordDurationLabel]; [_recordDurationLabel.layer removeAllAnimations]; _slideToCancelArrow.transform = CGAffineTransformMakeTranslation(300.0f, 0.0f); @@ -211,7 +211,7 @@ static CGRect viewFrame(UIView *view) if (!isAlreadyLocked) { if (_slideToCancelLabel.superview == nil) - [self addSubview:_slideToCancelLabel]; +// [self addSubview:_slideToCancelLabel]; [UIView animateWithDuration:0.18 delay:0.0 options:animationCurveOption animations:^ { @@ -445,8 +445,7 @@ static CGRect viewFrame(UIView *view) [UIView animateWithDuration:0.2 delay:0.0 options:UIViewAnimationOptionBeginFromCurrentState | animationCurveOption animations:^ { - CGAffineTransform transform = CGAffineTransformMakeTranslation(0.0f, -22.0f); - transform = CGAffineTransformScale(transform, 0.25f, 0.25f); + CGAffineTransform transform = CGAffineTransformScale(transform, 0.25f, 0.25f); _cancelButton.transform = transform; _cancelButton.alpha = 0.0f; } completion:nil]; diff --git a/submodules/Postbox/Sources/MediaBox.swift b/submodules/Postbox/Sources/MediaBox.swift index 6efe9a4bb3..79322c4e28 100644 --- a/submodules/Postbox/Sources/MediaBox.swift +++ b/submodules/Postbox/Sources/MediaBox.swift @@ -266,6 +266,18 @@ public final class MediaBox { } } + public func copyResourceData(from: MediaResourceId, to: MediaResourceId) { + if from.isEqual(to: to) { + return + } + self.dataQueue.async { + let pathsFrom = self.storePathsForId(from) + let pathsTo = self.storePathsForId(to) + let _ = try? FileManager.default.copyItem(atPath: pathsFrom.partial, toPath: pathsTo.partial) + let _ = try? FileManager.default.copyItem(atPath: pathsFrom.complete, toPath: pathsTo.complete) + } + } + private func maybeCopiedPreFetchedResource(completePath: String, resource: MediaResource) { if let path = self.preFetchedResourcePath(resource) { let _ = try? FileManager.default.copyItem(atPath: path, toPath: completePath) diff --git a/submodules/TelegramCallsUI/Sources/CallController.swift b/submodules/TelegramCallsUI/Sources/CallController.swift index 35ac70f756..fd72e4c061 100644 --- a/submodules/TelegramCallsUI/Sources/CallController.swift +++ b/submodules/TelegramCallsUI/Sources/CallController.swift @@ -45,6 +45,8 @@ public final class CallController: ViewController { private var audioOutputStateDisposable: Disposable? private var audioOutputState: ([AudioSessionOutput], AudioSessionOutput?)? + private let idleTimerExtensionDisposable = MetaDisposable() + public init(sharedContext: SharedAccountContext, account: Account, call: PresentationCall, easyDebugAccess: Bool) { self.sharedContext = sharedContext self.account = account @@ -97,6 +99,7 @@ public final class CallController: ViewController { self.disposable?.dispose() self.callMutedDisposable?.dispose() self.audioOutputStateDisposable?.dispose() + self.idleTimerExtensionDisposable.dispose() } private func callStateUpdated(_ callState: PresentationCallState) { @@ -260,6 +263,14 @@ public final class CallController: ViewController { self.controllerNode.animateIn() } + + self.idleTimerExtensionDisposable.set(self.sharedContext.applicationBindings.pushIdleTimerExtension()) + } + + override public func viewDidDisappear(_ animated: Bool) { + super.viewDidDisappear(animated) + + self.idleTimerExtensionDisposable.set(nil) } override public func containerLayoutUpdated(_ layout: ContainerViewLayout, transition: ContainedViewLayoutTransition) { diff --git a/submodules/TelegramCallsUI/Sources/CallControllerNode.swift b/submodules/TelegramCallsUI/Sources/CallControllerNode.swift index 13dfb0591e..edd39bbb99 100644 --- a/submodules/TelegramCallsUI/Sources/CallControllerNode.swift +++ b/submodules/TelegramCallsUI/Sources/CallControllerNode.swift @@ -31,8 +31,9 @@ final class CallControllerNode: ASDisplayNode { private let imageNode: TransformImageNode private let dimNode: ASDisplayNode - private var videoView: UIView? - private var videoViewRequested: Bool = false + private var incomingVideoView: UIView? + private var outgoingVideoView: UIView? + private var videoViewsRequested: Bool = false private let backButtonArrowNode: ASImageNode private let backButtonNode: HighlightableButtonNode private let statusNode: CallControllerStatusNode @@ -265,16 +266,37 @@ final class CallControllerNode: ASDisplayNode { } } statusReception = reception - if !self.videoViewRequested { - self.videoViewRequested = true - self.call.getVideoView(completion: { [weak self] videoView in + if !self.videoViewsRequested { + self.videoViewsRequested = true + self.call.makeIncomingVideoView(completion: { [weak self] incomingVideoView in guard let strongSelf = self else { return } - if let videoView = videoView { + if let incomingVideoView = incomingVideoView { strongSelf.setCurrentAudioOutput?(.speaker) - strongSelf.videoView = videoView - strongSelf.containerNode.view.insertSubview(videoView, aboveSubview: strongSelf.dimNode.view) + strongSelf.incomingVideoView = incomingVideoView + strongSelf.containerNode.view.insertSubview(incomingVideoView, aboveSubview: strongSelf.dimNode.view) + if let (layout, navigationBarHeight) = strongSelf.validLayout { + strongSelf.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: .immediate) + } + } + }) + + self.call.makeOutgoingVideoView(completion: { [weak self] outgoingVideoView in + guard let strongSelf = self else { + return + } + if let outgoingVideoView = outgoingVideoView { + outgoingVideoView.backgroundColor = .black + outgoingVideoView.clipsToBounds = true + outgoingVideoView.layer.cornerRadius = 16.0 + strongSelf.setCurrentAudioOutput?(.speaker) + strongSelf.outgoingVideoView = outgoingVideoView + if let incomingVideoView = strongSelf.incomingVideoView { + strongSelf.containerNode.view.insertSubview(outgoingVideoView, aboveSubview: incomingVideoView) + } else { + strongSelf.containerNode.view.insertSubview(outgoingVideoView, aboveSubview: strongSelf.dimNode.view) + } if let (layout, navigationBarHeight) = strongSelf.validLayout { strongSelf.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: .immediate) } @@ -388,10 +410,6 @@ final class CallControllerNode: ASDisplayNode { transition.updateFrame(node: self.containerNode, frame: CGRect(origin: CGPoint(), size: layout.size)) transition.updateFrame(node: self.dimNode, frame: CGRect(origin: CGPoint(), size: layout.size)) - if let videoView = self.videoView { - videoView.frame = CGRect(origin: CGPoint(), size: layout.size) - } - if let keyPreviewNode = self.keyPreviewNode { transition.updateFrame(node: keyPreviewNode, frame: CGRect(origin: CGPoint(), size: layout.size)) keyPreviewNode.updateLayout(size: layout.size, transition: .immediate) @@ -445,7 +463,16 @@ final class CallControllerNode: ASDisplayNode { transition.updateFrame(node: self.statusNode, frame: CGRect(origin: CGPoint(x: 0.0, y: statusOffset), size: CGSize(width: layout.size.width, height: statusHeight))) self.buttonsNode.updateLayout(constrainedWidth: layout.size.width, transition: transition) - transition.updateFrame(node: self.buttonsNode, frame: CGRect(origin: CGPoint(x: 0.0, y: layout.size.height - (buttonsOffset - 40.0) - buttonsHeight - layout.intrinsicInsets.bottom), size: CGSize(width: layout.size.width, height: buttonsHeight))) + let buttonsOriginY: CGFloat = layout.size.height - (buttonsOffset - 40.0) - buttonsHeight - layout.intrinsicInsets.bottom + transition.updateFrame(node: self.buttonsNode, frame: CGRect(origin: CGPoint(x: 0.0, y: buttonsOriginY), size: CGSize(width: layout.size.width, height: buttonsHeight))) + + if let incomingVideoView = self.incomingVideoView { + incomingVideoView.frame = CGRect(origin: CGPoint(), size: layout.size) + } + if let outgoingVideoView = self.outgoingVideoView { + let outgoingSize = layout.size.aspectFitted(CGSize(width: 200.0, height: 200.0)) + outgoingVideoView.frame = CGRect(origin: CGPoint(x: layout.size.width - 16.0 - outgoingSize.width, y: buttonsOriginY - 32.0 - outgoingSize.height), size: outgoingSize) + } let keyTextSize = self.keyButtonNode.frame.size transition.updateFrame(node: self.keyButtonNode, frame: CGRect(origin: CGPoint(x: layout.size.width - keyTextSize.width - 8.0, y: navigationOffset + 8.0), size: keyTextSize)) @@ -462,7 +489,8 @@ final class CallControllerNode: ASDisplayNode { self?.backPressed() } }) - self.containerNode.insertSubnode(keyPreviewNode, aboveSubnode: self.dimNode) + + self.containerNode.insertSubnode(keyPreviewNode, belowSubnode: self.statusNode) self.keyPreviewNode = keyPreviewNode if let (validLayout, _) = self.validLayout { diff --git a/submodules/TelegramCallsUI/Sources/PresentationCall.swift b/submodules/TelegramCallsUI/Sources/PresentationCall.swift index 24aac90b50..551e24666b 100644 --- a/submodules/TelegramCallsUI/Sources/PresentationCall.swift +++ b/submodules/TelegramCallsUI/Sources/PresentationCall.swift @@ -673,7 +673,11 @@ public final class PresentationCallImpl: PresentationCall { return self.debugInfoValue.get() } - public func getVideoView(completion: @escaping (UIView?) -> Void) { - self.ongoingContext?.getVideoView(completion: completion) + public func makeIncomingVideoView(completion: @escaping (UIView?) -> Void) { + self.ongoingContext?.makeIncomingVideoView(completion: completion) + } + + public func makeOutgoingVideoView(completion: @escaping (UIView?) -> Void) { + self.ongoingContext?.makeOutgoingVideoView(completion: completion) } } diff --git a/submodules/TelegramCore/Sources/ApplyUpdateMessage.swift b/submodules/TelegramCore/Sources/ApplyUpdateMessage.swift index 44bc111807..ebabf4ab90 100644 --- a/submodules/TelegramCore/Sources/ApplyUpdateMessage.swift +++ b/submodules/TelegramCore/Sources/ApplyUpdateMessage.swift @@ -5,6 +5,14 @@ import SwiftSignalKit import SyncCore +private func copyOrMoveResourceData(from fromResource: MediaResource, to toResource: MediaResource, mediaBox: MediaBox) { + if fromResource is CloudFileMediaResource || fromResource is CloudDocumentMediaResource || fromResource is SecretFileMediaResource { + mediaBox.copyResourceData(from: fromResource.id, to: toResource.id) + } else { + mediaBox.moveResourceData(from: fromResource.id, to: toResource.id) + } +} + func applyMediaResourceChanges(from: Media, to: Media, postbox: Postbox, force: Bool) { if let fromImage = from as? TelegramMediaImage, let toImage = to as? TelegramMediaImage { let fromSmallestRepresentation = smallestImageRepresentation(fromImage.representations) @@ -13,21 +21,21 @@ func applyMediaResourceChanges(from: Media, to: Media, postbox: Postbox, force: let widthDifference = fromSmallestRepresentation.dimensions.width - toSmallestRepresentation.dimensions.width let heightDifference = fromSmallestRepresentation.dimensions.height - toSmallestRepresentation.dimensions.height if abs(widthDifference) < leeway && abs(heightDifference) < leeway { - postbox.mediaBox.moveResourceData(from: fromSmallestRepresentation.resource.id, to: toSmallestRepresentation.resource.id) + copyOrMoveResourceData(from: fromSmallestRepresentation.resource, to: toSmallestRepresentation.resource, mediaBox: postbox.mediaBox) } } if let fromLargestRepresentation = largestImageRepresentation(fromImage.representations), let toLargestRepresentation = largestImageRepresentation(toImage.representations) { - postbox.mediaBox.moveResourceData(from: fromLargestRepresentation.resource.id, to: toLargestRepresentation.resource.id) + copyOrMoveResourceData(from: fromLargestRepresentation.resource, to: toLargestRepresentation.resource, mediaBox: postbox.mediaBox) } } else if let fromFile = from as? TelegramMediaFile, let toFile = to as? TelegramMediaFile { if let fromPreview = smallestImageRepresentation(fromFile.previewRepresentations), let toPreview = smallestImageRepresentation(toFile.previewRepresentations) { - postbox.mediaBox.moveResourceData(from: fromPreview.resource.id, to: toPreview.resource.id) + copyOrMoveResourceData(from: fromPreview.resource, to: toPreview.resource, mediaBox: postbox.mediaBox) } if let fromVideoThumbnail = fromFile.videoThumbnails.first, let toVideoThumbnail = toFile.videoThumbnails.first, fromVideoThumbnail.resource.id.uniqueId != toVideoThumbnail.resource.id.uniqueId { - postbox.mediaBox.moveResourceData(from: fromVideoThumbnail.resource.id, to: toVideoThumbnail.resource.id) + copyOrMoveResourceData(from: fromVideoThumbnail.resource, to: toVideoThumbnail.resource, mediaBox: postbox.mediaBox) } if (force || fromFile.size == toFile.size || fromFile.resource.size == toFile.resource.size) && fromFile.mimeType == toFile.mimeType { - postbox.mediaBox.moveResourceData(from: fromFile.resource.id, to: toFile.resource.id) + copyOrMoveResourceData(from: fromFile.resource, to: toFile.resource, mediaBox: postbox.mediaBox) } } } diff --git a/submodules/TelegramCore/Sources/MultipartUpload.swift b/submodules/TelegramCore/Sources/MultipartUpload.swift index ac1246a68b..c4f2b4cb57 100644 --- a/submodules/TelegramCore/Sources/MultipartUpload.swift +++ b/submodules/TelegramCore/Sources/MultipartUpload.swift @@ -152,11 +152,11 @@ private final class MultipartUploadManager { self.progress = progress self.completed = completed - if headerSize == 0 { + //if headerSize == 0 { self.headerPartState = .ready - } else { + /*} else { self.headerPartState = .notStarted - } + }*/ if let hintFileSize = hintFileSize, hintFileSize > 10 * 1024 * 1024 { self.defaultPartSize = 512 * 1024 diff --git a/submodules/TelegramCore/Sources/PeerPhotoUpdater.swift b/submodules/TelegramCore/Sources/PeerPhotoUpdater.swift index bec3626d69..30b61bfaf3 100644 --- a/submodules/TelegramCore/Sources/PeerPhotoUpdater.swift +++ b/submodules/TelegramCore/Sources/PeerPhotoUpdater.swift @@ -71,7 +71,7 @@ public func updatePeerPhotoInternal(postbox: Postbox, network: Network, stateMan if let video = video { mappedVideo = video |> take(until: { value in - if case let .result(resultData) = value?.content, case .inputFile = resultData { + if case let .result(resultData)? = value?.content, case .inputFile = resultData { return SignalTakeAction(passthrough: true, complete: true) } else { return SignalTakeAction(passthrough: true, complete: false) diff --git a/submodules/TelegramCore/Sources/PendingMessageUploadedContent.swift b/submodules/TelegramCore/Sources/PendingMessageUploadedContent.swift index 16b490b314..6d248bfa43 100644 --- a/submodules/TelegramCore/Sources/PendingMessageUploadedContent.swift +++ b/submodules/TelegramCore/Sources/PendingMessageUploadedContent.swift @@ -662,7 +662,7 @@ private func uploadedMediaFileContent(network: Network, postbox: Postbox, auxili return .single(.pending) case let .done(media): if let media = media as? TelegramMediaFile, let smallestThumbnail = smallestImageRepresentation(media.previewRepresentations) { - if peerId.namespace == Namespaces.Peer.SecretChat { + if peerId.namespace == Namespaces.Peer.SecretChat || (smallestThumbnail.resource is LocalFileMediaResource) { return .single(.done(media, .none)) } else { let fileReference: AnyMediaReference diff --git a/submodules/TelegramUI/Sources/BlobView.swift b/submodules/TelegramUI/Sources/BlobView.swift index d4af62730a..38e831f16d 100644 --- a/submodules/TelegramUI/Sources/BlobView.swift +++ b/submodules/TelegramUI/Sources/BlobView.swift @@ -13,32 +13,32 @@ final class VoiceBlobView: UIView, TGModernConversationInputMicButtonDecoration private let smallBlob = BlobView( pointsCount: 8, minRandomness: 0.1, - maxRandomness: 1, + maxRandomness: 0.5, minSpeed: 0.2, - maxSpeed: 1, - minScale: 0.56, - maxScale: 0.56, - scaleSpeed: 0 + maxSpeed: 0.6, + minScale: 0.45, + maxScale: 0.55, + scaleSpeed: 0.2 ) private let mediumBlob = BlobView( pointsCount: 8, minRandomness: 1, - maxRandomness: 2, + maxRandomness: 1, minSpeed: 3, - maxSpeed: 8, - minScale: 0.67, + maxSpeed: 7, + minScale: 0.55, maxScale: 0.9, - scaleSpeed: 0.1 + scaleSpeed: 0.2 ) private let bigBlob = BlobView( pointsCount: 8, minRandomness: 1, - maxRandomness: 2, + maxRandomness: 1, minSpeed: 3, - maxSpeed: 8, - minScale: 0.67, + maxSpeed: 7, + minScale: 0.55, maxScale: 1, - scaleSpeed: 0.1 + scaleSpeed: 0.2 ) override init(frame: CGRect) { @@ -105,11 +105,16 @@ final class BlobView: UIView { didSet { speedLevel = max(level, speedLevel) scaleLevel = max(level, scaleLevel) + + if abs(scaleLevel - lastScaleLevel) > 0.4 { + animateToNewScale() + } } } private var speedLevel: CGFloat = 0 private var scaleLevel: CGFloat = 0 + private var lastScaleLevel: CGFloat = 0 private let shapeLayer: CAShapeLayer = { let layer = CAShapeLayer() @@ -183,12 +188,15 @@ final class BlobView: UIView { } func animateToNewScale() { + let isDownscale = lastScaleLevel > scaleLevel + lastScaleLevel = scaleLevel + shapeLayer.pop_removeAnimation(forKey: "scale") let currentScale = minScale + (maxScale - minScale) * scaleLevel let scaleAnimation = POPBasicAnimation(propertyNamed: kPOPLayerScaleXY)! scaleAnimation.toValue = CGPoint(x: currentScale, y: currentScale) - scaleAnimation.duration = CFTimeInterval(scaleSpeed) + scaleAnimation.duration = isDownscale ? 0.45 : CFTimeInterval(scaleSpeed) scaleAnimation.completionBlock = { [weak self] animation, finished in if finished { self?.animateToNewScale() diff --git a/submodules/TelegramUI/Sources/ChatController.swift b/submodules/TelegramUI/Sources/ChatController.swift index 6fc380e129..16e72f1c9d 100644 --- a/submodules/TelegramUI/Sources/ChatController.swift +++ b/submodules/TelegramUI/Sources/ChatController.swift @@ -2564,6 +2564,8 @@ public final class ChatControllerImpl: TelegramBaseController, ChatController, G videoRecorder.onDismiss = { if let strongSelf = self { + strongSelf.beginMediaRecordingRequestId += 1 + strongSelf.lockMediaRecordingRequestId = nil strongSelf.videoRecorder.set(.single(nil)) } } @@ -7296,9 +7298,13 @@ public final class ChatControllerImpl: TelegramBaseController, ChatController, G self.audioRecorder.set(.single(nil)) } else if let videoRecorderValue = self.videoRecorderValue { if case .send = updatedAction { + self.chatDisplayNode.updateRecordedMediaDeleted(false) videoRecorderValue.completeVideo() self.videoRecorder.set(.single(nil)) } else { + if case .dismiss = updatedAction { + self.chatDisplayNode.updateRecordedMediaDeleted(true) + } if case .preview = updatedAction, videoRecorderValue.stopVideo() { self.updateChatPresentationInterfaceState(animated: true, interactive: true, { $0.updatedInputTextPanelState { panelState in diff --git a/submodules/TelegramUI/Sources/ChatInterfaceStateContextMenus.swift b/submodules/TelegramUI/Sources/ChatInterfaceStateContextMenus.swift index dce400b2e5..5166202b89 100644 --- a/submodules/TelegramUI/Sources/ChatInterfaceStateContextMenus.swift +++ b/submodules/TelegramUI/Sources/ChatInterfaceStateContextMenus.swift @@ -409,12 +409,34 @@ func contextMenuForChatPresentationIntefaceState(chatPresentationInterfaceState: if let action = media as? TelegramMediaAction, case let .phoneCall(id, discardReason, _) = action.action { if discardReason != .busy && discardReason != .missed { if let logName = callLogNameForId(id: id, account: context.account) { + let logsPath = callLogsPath(account: context.account) + let logPath = logsPath + "/" + logName let start = logName.index(logName.startIndex, offsetBy: "\(id)".count + 1) let end = logName.index(logName.endIndex, offsetBy: -4) let accessHash = logName[start.. Void) { self.theme = theme self.strings = strings @@ -317,6 +329,8 @@ final class ChatTextInputMediaRecordingButton: TGModernConversationInputMicButto } self.pallete = legacyInputMicPalette(from: theme) + self.micDecoration.setColor(self.theme.chat.inputPanel.actionControlFillColor) + (self.micLock as? LockView)?.updateTheme(theme) } deinit { @@ -397,15 +411,11 @@ final class ChatTextInputMediaRecordingButton: TGModernConversationInputMicButto } func micButtonDecoration() -> (UIView & TGModernConversationInputMicButtonDecoration)! { - let blobView = VoiceBlobView(frame: CGRect(origin: CGPoint(), size: CGSize(width: 180.0, height: 180.0))) - blobView.setColor(self.theme.chat.inputPanel.actionControlFillColor) - return blobView + return micDecoration } func micButtonLock() -> (UIView & TGModernConversationInputMicButtonLock)! { - let lockView = LockView(frame: CGRect(origin: CGPoint(), size: CGSize(width: 40.0, height: 60.0)), theme: self.theme, strings: self.strings) - lockView.addTarget(self, action: #selector(handleStopTap), for: .touchUpInside) - return lockView + return micLock } @objc private func handleStopTap() { diff --git a/submodules/TelegramUI/Sources/ChatTextInputPanelNode.swift b/submodules/TelegramUI/Sources/ChatTextInputPanelNode.swift index 4f5421fc17..6647a84968 100644 --- a/submodules/TelegramUI/Sources/ChatTextInputPanelNode.swift +++ b/submodules/TelegramUI/Sources/ChatTextInputPanelNode.swift @@ -908,6 +908,68 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { var audioRecordingItemsAlpha: CGFloat = 1 if let mediaRecordingState = interfaceState.inputTextPanelState.mediaRecordingState { audioRecordingItemsAlpha = 0 + + let audioRecordingInfoContainerNode: ASDisplayNode + if let currentAudioRecordingInfoContainerNode = self.audioRecordingInfoContainerNode { + audioRecordingInfoContainerNode = currentAudioRecordingInfoContainerNode + } else { + audioRecordingInfoContainerNode = ASDisplayNode() + self.audioRecordingInfoContainerNode = audioRecordingInfoContainerNode + self.insertSubnode(audioRecordingInfoContainerNode, at: 0) + } + + var animateTimeSlideIn = false + let audioRecordingTimeNode: ChatTextInputAudioRecordingTimeNode + if let currentAudioRecordingTimeNode = self.audioRecordingTimeNode { + audioRecordingTimeNode = currentAudioRecordingTimeNode + } else { + audioRecordingTimeNode = ChatTextInputAudioRecordingTimeNode(theme: interfaceState.theme) + self.audioRecordingTimeNode = audioRecordingTimeNode + audioRecordingInfoContainerNode.addSubnode(audioRecordingTimeNode) + + if transition.isAnimated { + animateTimeSlideIn = true + } + } + + + var animateCancelSlideIn = false + let audioRecordingCancelIndicator: ChatTextInputAudioRecordingCancelIndicator + if let currentAudioRecordingCancelIndicator = self.audioRecordingCancelIndicator { + audioRecordingCancelIndicator = currentAudioRecordingCancelIndicator + } else { + animateCancelSlideIn = transition.isAnimated + + audioRecordingCancelIndicator = ChatTextInputAudioRecordingCancelIndicator(theme: interfaceState.theme, strings: interfaceState.strings, cancel: { [weak self] in + self?.interfaceInteraction?.finishMediaRecording(.dismiss) + }) + self.audioRecordingCancelIndicator = audioRecordingCancelIndicator + self.insertSubnode(audioRecordingCancelIndicator, at: 0) + } + + let isLocked = mediaRecordingState.isLocked + var hideInfo = false + + switch mediaRecordingState { + case let .audio(recorder, _): + self.actionButtons.micButton.audioRecorder = recorder + audioRecordingTimeNode.audioRecorder = recorder + case let .video(status, _): + switch status { + case let .recording(recordingStatus): + audioRecordingTimeNode.videoRecordingStatus = recordingStatus + self.actionButtons.micButton.videoRecordingStatus = recordingStatus + if isLocked { + audioRecordingCancelIndicator.layer.animateAlpha(from: audioRecordingCancelIndicator.alpha, to: 0, duration: 0.15, delay: 0, removeOnCompletion: false) + } + case .editing: + audioRecordingTimeNode.videoRecordingStatus = nil + self.actionButtons.micButton.videoRecordingStatus = nil + hideMicButton = true + hideInfo = true + } + } + transition.updateAlpha(layer: self.textInputBackgroundNode.layer, alpha: 0.0) if let textInputNode = self.textInputNode { transition.updateAlpha(node: textInputNode, alpha: 0.0) @@ -916,150 +978,105 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { transition.updateAlpha(layer: button.layer, alpha: 0.0) } - switch mediaRecordingState { - case let .audio(recorder, isLocked): - self.actionButtons.micButton.audioRecorder = recorder - let audioRecordingInfoContainerNode: ASDisplayNode - if let currentAudioRecordingInfoContainerNode = self.audioRecordingInfoContainerNode { - audioRecordingInfoContainerNode = currentAudioRecordingInfoContainerNode - } else { - audioRecordingInfoContainerNode = ASDisplayNode() - self.audioRecordingInfoContainerNode = audioRecordingInfoContainerNode - self.insertSubnode(audioRecordingInfoContainerNode, at: 0) - } - - var animateCancelSlideIn = false - let audioRecordingCancelIndicator: ChatTextInputAudioRecordingCancelIndicator - if let currentAudioRecordingCancelIndicator = self.audioRecordingCancelIndicator { - audioRecordingCancelIndicator = currentAudioRecordingCancelIndicator - } else { - animateCancelSlideIn = transition.isAnimated + let cancelTransformThreshold: CGFloat = 8.0 + + let indicatorTranslation = max(0.0, self.actionButtons.micButton.cancelTranslation - cancelTransformThreshold) + + audioRecordingCancelIndicator.frame = CGRect( + origin: CGPoint( + x: leftInset + floor((baseWidth - audioRecordingCancelIndicator.bounds.size.width - indicatorTranslation) / 2.0), + y: panelHeight - minimalHeight + floor((minimalHeight - audioRecordingCancelIndicator.bounds.size.height) / 2.0)), + size: audioRecordingCancelIndicator.bounds.size) + if self.actionButtons.micButton.cancelTranslation > cancelTransformThreshold { + let progress = 1 - (self.actionButtons.micButton.cancelTranslation - cancelTransformThreshold) / 80 + audioRecordingCancelIndicator.alpha = progress + } else { + audioRecordingCancelIndicator.alpha = 1 + } + + if animateCancelSlideIn { + let position = audioRecordingCancelIndicator.layer.position + audioRecordingCancelIndicator.layer.animatePosition(from: CGPoint(x: width + audioRecordingCancelIndicator.bounds.size.width, y: position.y), to: position, duration: 0.4, timingFunction: kCAMediaTimingFunctionSpring) + } + + audioRecordingCancelIndicator.updateIsDisplayingCancel(isLocked, animated: !animateCancelSlideIn) + + if isLocked || self.actionButtons.micButton.cancelTranslation > cancelTransformThreshold { + var deltaOffset: CGFloat = 0.0 + if audioRecordingCancelIndicator.layer.animation(forKey: "slide_juggle") != nil, let presentationLayer = audioRecordingCancelIndicator.layer.presentation() { + let translation = CGPoint(x: presentationLayer.transform.m41, y: presentationLayer.transform.m42) + deltaOffset = translation.x + } + audioRecordingCancelIndicator.layer.removeAnimation(forKey: "slide_juggle") + if !deltaOffset.isZero { + audioRecordingCancelIndicator.layer.animatePosition(from: CGPoint(x: deltaOffset, y: 0.0), to: CGPoint(), duration: 0.3, additive: true) + } + } else if audioRecordingCancelIndicator.layer.animation(forKey: "slide_juggle") == nil { + let slideJuggleAnimation = CABasicAnimation(keyPath: "transform") + slideJuggleAnimation.toValue = CATransform3DMakeTranslation(-6, 0, 0) + slideJuggleAnimation.duration = 1 + slideJuggleAnimation.timingFunction = CAMediaTimingFunction(name: CAMediaTimingFunctionName.easeInEaseOut) + slideJuggleAnimation.autoreverses = true + slideJuggleAnimation.repeatCount = Float.infinity + audioRecordingCancelIndicator.layer.add(slideJuggleAnimation, forKey: "slide_juggle") + } + + let audioRecordingTimeSize = audioRecordingTimeNode.measure(CGSize(width: 200.0, height: 100.0)) + + let cancelMinX = audioRecordingCancelIndicator.alpha > 0.5 ? audioRecordingCancelIndicator.frame.minX : width + + audioRecordingInfoContainerNode.frame = CGRect( + origin: CGPoint( + x: min(leftInset, cancelMinX - audioRecordingTimeSize.width - 8.0 - 28.0), + y: 0.0 + ), + size: CGSize(width: baseWidth, height: panelHeight) + ) + + audioRecordingTimeNode.frame = CGRect(origin: CGPoint(x: 40.0, y: panelHeight - minimalHeight + floor((minimalHeight - audioRecordingTimeSize.height) / 2.0)), size: audioRecordingTimeSize) + if animateTimeSlideIn { + let position = audioRecordingTimeNode.layer.position + audioRecordingTimeNode.layer.animatePosition(from: CGPoint(x: position.x - 10.0, y: position.y), to: position, duration: 0.5, timingFunction: kCAMediaTimingFunctionSpring) + audioRecordingTimeNode.layer.animateAlpha(from: 0, to: 1, duration: 0.5, timingFunction: kCAMediaTimingFunctionSpring) + } + + var animateDotAppearing = false + let audioRecordingDotNode: AnimationNode + if let currentAudioRecordingDotNode = self.audioRecordingDotNode, !currentAudioRecordingDotNode.played { + audioRecordingDotNode = currentAudioRecordingDotNode + } else { + self.audioRecordingDotNode?.removeFromSupernode() + audioRecordingDotNode = AnimationNode(animation: "voicebin") + self.audioRecordingDotNode = audioRecordingDotNode + self.addSubnode(audioRecordingDotNode) + } + + animateDotAppearing = transition.isAnimated && !isLocked && !hideInfo + + audioRecordingDotNode.frame = CGRect(origin: CGPoint(x: leftInset + 2.0 - UIScreenPixel, y: panelHeight - 44 + 1), size: CGSize(width: 40.0, height: 40)) + if animateDotAppearing { + audioRecordingDotNode.layer.animateScale(from: 0.3, to: 1, duration: 0.15, delay: 0, removeOnCompletion: false) + audioRecordingDotNode.layer.animateAlpha(from: 0, to: 1, duration: 0.15, delay: 0, completion: { [weak audioRecordingDotNode] finished in + if finished { + let animation = CAKeyframeAnimation(keyPath: "opacity") + animation.values = [1.0 as NSNumber, 1.0 as NSNumber, 0.0 as NSNumber] + animation.keyTimes = [0.0 as NSNumber, 0.4546 as NSNumber, 0.9091 as NSNumber, 1 as NSNumber] + animation.duration = 0.5 + animation.autoreverses = true + animation.repeatCount = Float.infinity - audioRecordingCancelIndicator = ChatTextInputAudioRecordingCancelIndicator(theme: interfaceState.theme, strings: interfaceState.strings, cancel: { [weak self] in - self?.interfaceInteraction?.finishMediaRecording(.dismiss) - }) - self.audioRecordingCancelIndicator = audioRecordingCancelIndicator - self.insertSubnode(audioRecordingCancelIndicator, at: 0) - } - - let cancelTransformThreshold: CGFloat = 8.0 - - let indicatorTranslation = max(0.0, self.actionButtons.micButton.cancelTranslation - cancelTransformThreshold) - - audioRecordingCancelIndicator.frame = CGRect( - origin: CGPoint( - x: leftInset + floor((baseWidth - audioRecordingCancelIndicator.bounds.size.width - indicatorTranslation) / 2.0), - y: panelHeight - minimalHeight + floor((minimalHeight - audioRecordingCancelIndicator.bounds.size.height) / 2.0)), - size: audioRecordingCancelIndicator.bounds.size) - if self.actionButtons.micButton.cancelTranslation > cancelTransformThreshold { - let progress = 1 - (self.actionButtons.micButton.cancelTranslation - cancelTransformThreshold) / 80 - audioRecordingCancelIndicator.alpha = progress - } else { - audioRecordingCancelIndicator.alpha = 1 - } - - if animateCancelSlideIn { - let position = audioRecordingCancelIndicator.layer.position - audioRecordingCancelIndicator.layer.animatePosition(from: CGPoint(x: width + audioRecordingCancelIndicator.bounds.size.width, y: position.y), to: position, duration: 0.4, timingFunction: kCAMediaTimingFunctionSpring) - } - - audioRecordingCancelIndicator.updateIsDisplayingCancel(isLocked, animated: !animateCancelSlideIn) - - if isLocked || self.actionButtons.micButton.cancelTranslation > cancelTransformThreshold { - var deltaOffset: CGFloat = 0.0 - if audioRecordingCancelIndicator.layer.animation(forKey: "slide_juggle") != nil, let presentationLayer = audioRecordingCancelIndicator.layer.presentation() { - let translation = CGPoint(x: presentationLayer.transform.m41, y: presentationLayer.transform.m42) - deltaOffset = translation.x - } - audioRecordingCancelIndicator.layer.removeAnimation(forKey: "slide_juggle") - if !deltaOffset.isZero { - audioRecordingCancelIndicator.layer.animatePosition(from: CGPoint(x: deltaOffset, y: 0.0), to: CGPoint(), duration: 0.3, additive: true) - } - } else if audioRecordingCancelIndicator.layer.animation(forKey: "slide_juggle") == nil { - let slideJuggleAnimation = CABasicAnimation(keyPath: "transform") - slideJuggleAnimation.toValue = CATransform3DMakeTranslation(-6, 0, 0) - slideJuggleAnimation.duration = 1 - slideJuggleAnimation.timingFunction = CAMediaTimingFunction(name: CAMediaTimingFunctionName.easeInEaseOut) - slideJuggleAnimation.autoreverses = true - slideJuggleAnimation.repeatCount = Float.infinity - audioRecordingCancelIndicator.layer.add(slideJuggleAnimation, forKey: "slide_juggle") - } - - var animateTimeSlideIn = false - let audioRecordingTimeNode: ChatTextInputAudioRecordingTimeNode - if let currentAudioRecordingTimeNode = self.audioRecordingTimeNode { - audioRecordingTimeNode = currentAudioRecordingTimeNode - } else { - audioRecordingTimeNode = ChatTextInputAudioRecordingTimeNode(theme: interfaceState.theme) - self.audioRecordingTimeNode = audioRecordingTimeNode - audioRecordingInfoContainerNode.addSubnode(audioRecordingTimeNode) - - if transition.isAnimated { - animateTimeSlideIn = true - } - } - - let audioRecordingTimeSize = audioRecordingTimeNode.measure(CGSize(width: 200.0, height: 100.0)) - - let cancelMinX = audioRecordingCancelIndicator.alpha > 0.5 ? audioRecordingCancelIndicator.frame.minX : width - - audioRecordingInfoContainerNode.frame = CGRect( - origin: CGPoint( - x: min(leftInset, cancelMinX - audioRecordingTimeSize.width - 8.0 - 28.0), - y: 0.0 - ), - size: CGSize(width: baseWidth, height: panelHeight) - ) - - audioRecordingTimeNode.frame = CGRect(origin: CGPoint(x: 40.0, y: panelHeight - minimalHeight + floor((minimalHeight - audioRecordingTimeSize.height) / 2.0)), size: audioRecordingTimeSize) - if animateTimeSlideIn { - let position = audioRecordingTimeNode.layer.position - audioRecordingTimeNode.layer.animatePosition(from: CGPoint(x: position.x - 10.0, y: position.y), to: position, duration: 0.5, timingFunction: kCAMediaTimingFunctionSpring) - audioRecordingTimeNode.layer.animateAlpha(from: 0, to: 1, duration: 0.5, timingFunction: kCAMediaTimingFunctionSpring) - } - - audioRecordingTimeNode.audioRecorder = recorder - - var animateDotAppearing = false - let audioRecordingDotNode: AnimationNode - if let currentAudioRecordingDotNode = self.audioRecordingDotNode, !currentAudioRecordingDotNode.played { - audioRecordingDotNode = currentAudioRecordingDotNode - } else { - self.audioRecordingDotNode?.removeFromSupernode() - audioRecordingDotNode = AnimationNode(animation: "voicebin") - self.audioRecordingDotNode = audioRecordingDotNode - self.addSubnode(audioRecordingDotNode) - } - - animateDotAppearing = transition.isAnimated && !isLocked - - audioRecordingDotNode.frame = CGRect(origin: CGPoint(x: leftInset + 2.0 - UIScreenPixel, y: panelHeight - 44 + 1), size: CGSize(width: 40.0, height: 40)) - if animateDotAppearing { - audioRecordingDotNode.layer.animateScale(from: 0.3, to: 1, duration: 0.15, delay: 0, removeOnCompletion: false) - audioRecordingDotNode.layer.animateAlpha(from: 0, to: 1, duration: 0.15, delay: 0, completion: { [weak audioRecordingDotNode] finished in - if finished { - let animation = CAKeyframeAnimation(keyPath: "opacity") - animation.values = [1.0 as NSNumber, 1.0 as NSNumber, 0.0 as NSNumber] - animation.keyTimes = [0.0 as NSNumber, 0.4546 as NSNumber, 0.9091 as NSNumber, 1 as NSNumber] - animation.duration = 0.5 - animation.autoreverses = true - animation.repeatCount = Float.infinity - - audioRecordingDotNode?.layer.add(animation, forKey: "recording") - } - }) - - self.attachmentButton.layer.animateAlpha(from: 1, to: 0, duration: 0.15, delay: 0, removeOnCompletion: false) - self.attachmentButton.layer.animateScale(from: 1, to: 0.3, duration: 0.15, delay: 0, removeOnCompletion: false) - } - case let .video(status, _): - switch status { - case let .recording(recordingStatus): - self.actionButtons.micButton.videoRecordingStatus = recordingStatus - case .editing: - self.actionButtons.micButton.videoRecordingStatus = nil - hideMicButton = true + audioRecordingDotNode?.layer.add(animation, forKey: "recording") } + }) + + self.attachmentButton.layer.animateAlpha(from: 1, to: 0, duration: 0.15, delay: 0, removeOnCompletion: false) + self.attachmentButton.layer.animateScale(from: 1, to: 0.3, duration: 0.15, delay: 0, removeOnCompletion: false) + } + + if hideInfo { + audioRecordingDotNode.layer.animateAlpha(from: audioRecordingDotNode.alpha, to: 0, duration: 0.15, delay: 0, removeOnCompletion: false) + audioRecordingTimeNode.layer.animateAlpha(from: audioRecordingTimeNode.alpha, to: 0, duration: 0.15, delay: 0, removeOnCompletion: false) + audioRecordingCancelIndicator.layer.animateAlpha(from: audioRecordingCancelIndicator.alpha, to: 0, duration: 0.15, delay: 0, removeOnCompletion: false) } } else { self.actionButtons.micButton.audioRecorder = nil diff --git a/submodules/TelegramUI/Sources/LegacyInstantVideoController.swift b/submodules/TelegramUI/Sources/LegacyInstantVideoController.swift index 7bcc57a6fc..1d7aa884ef 100644 --- a/submodules/TelegramUI/Sources/LegacyInstantVideoController.swift +++ b/submodules/TelegramUI/Sources/LegacyInstantVideoController.swift @@ -18,9 +18,11 @@ import LegacyMediaPickerUI final class InstantVideoControllerRecordingStatus { let micLevel: Signal + let duration: Signal - init(micLevel: Signal) { + init(micLevel: Signal, duration: Signal) { self.micLevel = micLevel + self.duration = duration } } @@ -31,12 +33,13 @@ final class InstantVideoController: LegacyController, StandalonePresentableContr var onStop: (() -> Void)? private let micLevelValue = ValuePromise(0.0) + private let durationValue = ValuePromise(0.0) let audioStatus: InstantVideoControllerRecordingStatus private var dismissedVideo = false override init(presentation: LegacyControllerPresentation, theme: PresentationTheme?, strings: PresentationStrings? = nil, initialLayout: ContainerViewLayout? = nil) { - self.audioStatus = InstantVideoControllerRecordingStatus(micLevel: self.micLevelValue.get()) + self.audioStatus = InstantVideoControllerRecordingStatus(micLevel: self.micLevelValue.get(), duration: self.durationValue.get()) super.init(presentation: presentation, theme: theme, initialLayout: initialLayout) @@ -53,6 +56,9 @@ final class InstantVideoController: LegacyController, StandalonePresentableContr captureController.micLevel = { [weak self] (level: CGFloat) -> Void in self?.micLevelValue.set(Float(level)) } + captureController.onDuration = { [weak self] duration in + self?.durationValue.set(duration) + } captureController.onDismiss = { [weak self] _ in self?.onDismiss?() } diff --git a/submodules/TelegramUI/Sources/LockView.swift b/submodules/TelegramUI/Sources/LockView.swift index 44ae5da37f..8e281bbecd 100644 --- a/submodules/TelegramUI/Sources/LockView.swift +++ b/submodules/TelegramUI/Sources/LockView.swift @@ -43,6 +43,29 @@ final class LockView: UIButton, TGModernConversationInputMicButtonLock { addSubview(lockingView) lockingView.frame = bounds + updateTheme(theme) + updateLockness(0) + } + + required init?(coder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + func updateLockness(_ lockness: CGFloat) { + idleView.isHidden = lockness > 0 + if lockness > 0 && idleView.isAnimationPlaying { + idleView.stop() + } else if lockness == 0 && !idleView.isAnimationPlaying { + idleView.play() + } + lockingView.isHidden = !idleView.isHidden + + lockingView.animationProgress = lockness + } + + func updateTheme(_ theme: PresentationTheme) { + colorCallbacks.removeAll() + [ "Rectangle.Заливка 1": theme.chat.inputPanel.panelBackgroundColor, "Rectangle.Rectangle.Обводка 1": theme.chat.inputPanel.panelControlAccentColor, @@ -65,24 +88,6 @@ final class LockView: UIButton, TGModernConversationInputMicButtonLock { self.colorCallbacks.append(colorCallback) lockingView.setValueDelegate(colorCallback, for: LOTKeypath(string: "\(key).Color")) } - - updateLockness(0) - } - - required init?(coder: NSCoder) { - fatalError("init(coder:) has not been implemented") - } - - func updateLockness(_ lockness: CGFloat) { - idleView.isHidden = lockness > 0 - if lockness > 0 && idleView.isAnimationPlaying { - idleView.stop() - } else if lockness == 0 && !idleView.isAnimationPlaying { - idleView.play() - } - lockingView.isHidden = !idleView.isHidden - - lockingView.animationProgress = lockness } override func hitTest(_ point: CGPoint, with event: UIEvent?) -> UIView? { diff --git a/submodules/TelegramUI/Sources/WaveButtonNode.swift b/submodules/TelegramUI/Sources/WaveButtonNode.swift index cba1dbeb45..fb0f816246 100644 --- a/submodules/TelegramUI/Sources/WaveButtonNode.swift +++ b/submodules/TelegramUI/Sources/WaveButtonNode.swift @@ -26,7 +26,7 @@ private struct Constants { static let idleRotationDiff: CGFloat = 0.1 * idleRotationSpeed } -class CombinedWaveView: UIView, TGModernConversationInputMicButtonDecoration { +class CombinedWaveView: UIView { private let bigWaveView: WaveView private let smallWaveView: WaveView diff --git a/submodules/TelegramVoip/Sources/OngoingCallContext.swift b/submodules/TelegramVoip/Sources/OngoingCallContext.swift index a1080aa44d..103b0e7c37 100644 --- a/submodules/TelegramVoip/Sources/OngoingCallContext.swift +++ b/submodules/TelegramVoip/Sources/OngoingCallContext.swift @@ -375,6 +375,7 @@ public final class OngoingCallContext { private let queue = Queue() private let account: Account private let callSessionManager: CallSessionManager + private let logPath: String private var contextRef: Unmanaged? @@ -415,12 +416,13 @@ public final class OngoingCallContext { self.internalId = internalId self.account = account self.callSessionManager = callSessionManager + self.logPath = logName.isEmpty ? "" : callLogsPath(account: self.account) + "/" + logName + ".log" + let logPath = self.logPath let queue = self.queue cleanupCallLogs(account: account) - let logPath = logName.isEmpty ? "" : callLogsPath(account: self.account) + "/" + logName + ".log" self.audioSessionDisposable.set((audioSessionActive |> filter { $0 } |> take(1) @@ -512,11 +514,13 @@ public final class OngoingCallContext { } })) - self.signalingDataDisposable = (callSessionManager.callSignalingData(internalId: internalId) - |> deliverOn(self.queue)).start(next: { [weak self] data in - self?.withContext { context in - if let context = context as? OngoingCallThreadLocalContextWebrtc { - context.addSignaling(data) + self.signalingDataDisposable = (callSessionManager.callSignalingData(internalId: internalId)).start(next: { [weak self] data in + print("data received") + queue.async { + self?.withContext { context in + if let context = context as? OngoingCallThreadLocalContextWebrtc { + context.addSignaling(data) + } } } }) @@ -542,6 +546,9 @@ public final class OngoingCallContext { } public func stop(callId: CallId? = nil, sendDebugLogs: Bool = false, debugLogValue: Promise) { + let account = self.account + let logPath = self.logPath + self.withContext { context in context.nativeStop { debugLog, bytesSentWifi, bytesReceivedWifi, bytesSentMobile, bytesReceivedMobile in debugLogValue.set(.single(debugLog)) @@ -554,8 +561,18 @@ public final class OngoingCallContext { outgoing: bytesSentWifi)) updateAccountNetworkUsageStats(account: self.account, category: .call, delta: delta) - if let callId = callId, let debugLog = debugLog, sendDebugLogs { - let _ = saveCallDebugLog(network: self.account.network, callId: callId, log: debugLog).start() + if !logPath.isEmpty, let debugLog = debugLog { + let logsPath = callLogsPath(account: account) + let _ = try? FileManager.default.createDirectory(atPath: logsPath, withIntermediateDirectories: true, attributes: nil) + if let data = debugLog.data(using: .utf8) { + let _ = try? data.write(to: URL(fileURLWithPath: logPath)) + } + } + + if let callId = callId, let debugLog = debugLog { + if sendDebugLogs { + let _ = saveCallDebugLog(network: self.account.network, callId: callId, log: debugLog).start() + } } } let derivedState = context.nativeGetDerivedState() @@ -585,12 +602,23 @@ public final class OngoingCallContext { return (poll |> then(.complete() |> delay(0.5, queue: Queue.concurrentDefaultQueue()))) |> restart } - public func getVideoView(completion: @escaping (UIView?) -> Void) { + public func makeIncomingVideoView(completion: @escaping (UIView?) -> Void) { self.withContext { context in if let context = context as? OngoingCallThreadLocalContextWebrtc { - context.getRemoteCameraView(completion) + context.makeIncomingVideoView(completion) + } else { + completion(nil) + } + } + } + + public func makeOutgoingVideoView(completion: @escaping (UIView?) -> Void) { + self.withContext { context in + if let context = context as? OngoingCallThreadLocalContextWebrtc { + context.makeOutgoingVideoView(completion) + } else { + completion(nil) } - completion(nil) } } } diff --git a/submodules/TgVoipWebrtc/BUCK b/submodules/TgVoipWebrtc/BUCK index 3c4738250f..4541c7fe70 100644 --- a/submodules/TgVoipWebrtc/BUCK +++ b/submodules/TgVoipWebrtc/BUCK @@ -1,13 +1,15 @@ load("//Config:buck_rule_macros.bzl", "static_library", "glob_map", "glob_sub_map", "merge_maps") +webrtc_include_prefix = "-I../../" if native.read_config("custom", "mode") == "project" else "-I" + static_library( name = "TgVoipWebrtc", srcs = glob([ "Sources/**/*.m", "Sources/**/*.mm", - "Impl/*.cpp", - "Impl/*.mm", - "Impl/*.m", + "Impl/**/*.cpp", + "Impl/**/*.mm", + "Impl/**/*.m", ]), has_cpp = True, headers = merge_maps([ @@ -15,22 +17,26 @@ static_library( "PublicHeaders/**/*.h", ]), glob_sub_map("Impl/", [ - "Impl/*.h", + "Impl/**/*.h", ]), ]), exported_headers = glob([ "PublicHeaders/**/*.h", ]), compiler_flags = [ - "-Ithird-party/webrtc/webrtc-ios/src", - "-Ithird-party/webrtc/webrtc-ios/src/third_party/abseil-cpp", - "-Ithird-party/webrtc/webrtc-ios/src/sdk/objc", - "-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/base", - "-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/components/renderer/metal", - "-Ithird-party/submodules/TgVoipWebrtc/PublicHeaders", + webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src", + webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/third_party/abseil-cpp", + webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/sdk/objc", + webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/sdk/objc/base", + webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/sdk/objc/components/renderer/metal", + webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/sdk/objc/components/video_codec", + webrtc_include_prefix + "third-party/webrtc/webrtc-ios/src/third_party/libyuv/include", + webrtc_include_prefix + "third-party/submodules/TgVoipWebrtc/PublicHeaders", "-DWEBRTC_IOS", "-DWEBRTC_MAC", "-DWEBRTC_POSIX", + "-DRTC_ENABLE_VP9", + "-DTGVOIP_NAMESPACE=tgvoip_webrtc", ], deps = [ "//submodules/MtProtoKit:MtProtoKit#shared", diff --git a/submodules/TgVoipWebrtc/BUILD b/submodules/TgVoipWebrtc/BUILD index 1b7572c72f..b4890a5b45 100644 --- a/submodules/TgVoipWebrtc/BUILD +++ b/submodules/TgVoipWebrtc/BUILD @@ -12,10 +12,10 @@ objc_library( "Sources/**/*.m", "Sources/**/*.mm", "Sources/**/*.h", - "Impl/*.h", - "Impl/*.cpp", - "Impl/*.mm", - "Impl/*.m", + "Impl/**/*.h", + "Impl/**/*.cpp", + "Impl/**/*.mm", + "Impl/**/*.m", ]), hdrs = glob([ "PublicHeaders/**/*.h", @@ -27,9 +27,13 @@ objc_library( "-Ithird-party/webrtc/webrtc-ios/src/sdk/objc", "-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/base", "-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/components/renderer/metal", + "-Ithird-party/webrtc/webrtc-ios/src/sdk/objc/components/video_codec", + "-Ithird-party/webrtc/webrtc-ios/src/third_party/libyuv/include", "-DWEBRTC_IOS", "-DWEBRTC_MAC", "-DWEBRTC_POSIX", + "-DRTC_ENABLE_VP9", + "-DTGVOIP_NAMESPACE=tgvoip_webrtc", "-std=c++14", ], includes = [ diff --git a/submodules/TgVoipWebrtc/Impl/Apple/TGRTCDefaultVideoDecoderFactory.h b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCDefaultVideoDecoderFactory.h new file mode 100644 index 0000000000..7de5cfa86d --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCDefaultVideoDecoderFactory.h @@ -0,0 +1,25 @@ +/* + * Copyright 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import + +#import "RTCMacros.h" +#import "RTCVideoDecoderFactory.h" + +NS_ASSUME_NONNULL_BEGIN + +/** This decoder factory include support for all codecs bundled with WebRTC. If using custom + * codecs, create custom implementations of RTCVideoEncoderFactory and RTCVideoDecoderFactory. + */ +RTC_OBJC_EXPORT +@interface TGRTCDefaultVideoDecoderFactory : NSObject +@end + +NS_ASSUME_NONNULL_END diff --git a/submodules/TgVoipWebrtc/Impl/Apple/TGRTCDefaultVideoDecoderFactory.mm b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCDefaultVideoDecoderFactory.mm new file mode 100644 index 0000000000..15f643fb2c --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCDefaultVideoDecoderFactory.mm @@ -0,0 +1,90 @@ +/* + * Copyright 2017 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import "TGRTCDefaultVideoDecoderFactory.h" + +#import "RTCH264ProfileLevelId.h" +#import "RTCVideoDecoderH264.h" +#import "api/video_codec/RTCVideoCodecConstants.h" +#import "api/video_codec/RTCVideoDecoderVP8.h" +#import "base/RTCVideoCodecInfo.h" +#if defined(RTC_ENABLE_VP9) +#import "api/video_codec/RTCVideoDecoderVP9.h" +#endif +#if !defined(DISABLE_H265) +#import "RTCH265ProfileLevelId.h" +#import "TGRTCVideoDecoderH265.h" +#endif + +@implementation TGRTCDefaultVideoDecoderFactory + +- (NSArray *)supportedCodecs { + NSDictionary *constrainedHighParams = @{ + @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedHigh, + @"level-asymmetry-allowed" : @"1", + @"packetization-mode" : @"1", + }; + RTCVideoCodecInfo *constrainedHighInfo = + [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH264Name + parameters:constrainedHighParams]; + + NSDictionary *constrainedBaselineParams = @{ + @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedBaseline, + @"level-asymmetry-allowed" : @"1", + @"packetization-mode" : @"1", + }; + RTCVideoCodecInfo *constrainedBaselineInfo = + [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH264Name + parameters:constrainedBaselineParams]; + + RTCVideoCodecInfo *vp8Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecVp8Name]; + +#if defined(RTC_ENABLE_VP9) + RTCVideoCodecInfo *vp9Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecVp9Name]; +#endif + +#if !defined(DISABLE_H265) + RTCVideoCodecInfo *h265Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH265Name]; +#endif + + return @[ + constrainedHighInfo, + constrainedBaselineInfo, + vp8Info, +#if defined(RTC_ENABLE_VP9) + vp9Info, +#endif +#if !defined(DISABLE_H265) + h265Info, +#endif + ]; +} + +- (id)createDecoder:(RTCVideoCodecInfo *)info { + if ([info.name isEqualToString:kRTCVideoCodecH264Name]) { + return [[RTCVideoDecoderH264 alloc] init]; + } else if ([info.name isEqualToString:kRTCVideoCodecVp8Name]) { + return [RTCVideoDecoderVP8 vp8Decoder]; +#if defined(RTC_ENABLE_VP9) + } else if ([info.name isEqualToString:kRTCVideoCodecVp9Name]) { + return [RTCVideoDecoderVP9 vp9Decoder]; +#endif +#if !defined(DISABLE_H265) + } else if (@available(iOS 11, *)) { + if ([info.name isEqualToString:kRTCVideoCodecH265Name]) { + return [[TGRTCVideoDecoderH265 alloc] init]; + } +#endif + } + + return nil; +} + +@end diff --git a/submodules/TgVoipWebrtc/Impl/Apple/TGRTCDefaultVideoEncoderFactory.h b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCDefaultVideoEncoderFactory.h new file mode 100644 index 0000000000..24c5b5980a --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCDefaultVideoEncoderFactory.h @@ -0,0 +1,30 @@ +/* + * Copyright 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import + +#import "RTCMacros.h" +#import "RTCVideoEncoderFactory.h" + +NS_ASSUME_NONNULL_BEGIN + +/** This encoder factory include support for all codecs bundled with WebRTC. If using custom + * codecs, create custom implementations of RTCVideoEncoderFactory and RTCVideoDecoderFactory. + */ +RTC_OBJC_EXPORT +@interface TGRTCDefaultVideoEncoderFactory : NSObject + +@property(nonatomic, retain) RTCVideoCodecInfo *preferredCodec; + ++ (NSArray *)supportedCodecs; + +@end + +NS_ASSUME_NONNULL_END diff --git a/submodules/TgVoipWebrtc/Impl/Apple/TGRTCDefaultVideoEncoderFactory.mm b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCDefaultVideoEncoderFactory.mm new file mode 100644 index 0000000000..7a12709539 --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCDefaultVideoEncoderFactory.mm @@ -0,0 +1,106 @@ +/* + * Copyright 2017 The WebRTC Project Authors. All rights reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import "TGRTCDefaultVideoEncoderFactory.h" + +#import "RTCH264ProfileLevelId.h" +#import "RTCVideoEncoderH264.h" +#import "api/video_codec/RTCVideoCodecConstants.h" +#import "api/video_codec/RTCVideoEncoderVP8.h" +#import "base/RTCVideoCodecInfo.h" +#if defined(RTC_ENABLE_VP9) +#import "api/video_codec/RTCVideoEncoderVP9.h" +#endif +#if !defined(DISABLE_H265) +#import "RTCH265ProfileLevelId.h" +#import "TGRTCVideoEncoderH265.h" +#endif + +@implementation TGRTCDefaultVideoEncoderFactory + +@synthesize preferredCodec; + ++ (NSArray *)supportedCodecs { + NSDictionary *constrainedHighParams = @{ + @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedHigh, + @"level-asymmetry-allowed" : @"1", + @"packetization-mode" : @"1", + }; + RTCVideoCodecInfo *constrainedHighInfo = + [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH264Name + parameters:constrainedHighParams]; + + NSDictionary *constrainedBaselineParams = @{ + @"profile-level-id" : kRTCMaxSupportedH264ProfileLevelConstrainedBaseline, + @"level-asymmetry-allowed" : @"1", + @"packetization-mode" : @"1", + }; + RTCVideoCodecInfo *constrainedBaselineInfo = + [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH264Name + parameters:constrainedBaselineParams]; + + RTCVideoCodecInfo *vp8Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecVp8Name]; + +#if defined(RTC_ENABLE_VP9) + RTCVideoCodecInfo *vp9Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecVp9Name]; +#endif + +#if !defined(DISABLE_H265) + RTCVideoCodecInfo *h265Info = [[RTCVideoCodecInfo alloc] initWithName:kRTCVideoCodecH265Name]; +#endif + + return @[ + constrainedHighInfo, + constrainedBaselineInfo, + vp8Info, +#if defined(RTC_ENABLE_VP9) + vp9Info, +#endif +#if !defined(DISABLE_H265) + h265Info, +#endif + ]; +} + +- (id)createEncoder:(RTCVideoCodecInfo *)info { + if ([info.name isEqualToString:kRTCVideoCodecH264Name]) { + return [[RTCVideoEncoderH264 alloc] initWithCodecInfo:info]; + } else if ([info.name isEqualToString:kRTCVideoCodecVp8Name]) { + return [RTCVideoEncoderVP8 vp8Encoder]; +#if defined(RTC_ENABLE_VP9) + } else if ([info.name isEqualToString:kRTCVideoCodecVp9Name]) { + return [RTCVideoEncoderVP9 vp9Encoder]; +#endif +#if !defined(DISABLE_H265) + } else if (@available(iOS 11, *)) { + if ([info.name isEqualToString:kRTCVideoCodecH265Name]) { + return [[TGRTCVideoEncoderH265 alloc] initWithCodecInfo:info]; + } +#endif + } + + return nil; +} + +- (NSArray *)supportedCodecs { + NSMutableArray *codecs = [[[self class] supportedCodecs] mutableCopy]; + + NSMutableArray *orderedCodecs = [NSMutableArray array]; + NSUInteger index = [codecs indexOfObject:self.preferredCodec]; + if (index != NSNotFound) { + [orderedCodecs addObject:[codecs objectAtIndex:index]]; + [codecs removeObjectAtIndex:index]; + } + [orderedCodecs addObjectsFromArray:codecs]; + + return [orderedCodecs copy]; +} + +@end diff --git a/submodules/TgVoipWebrtc/Impl/Apple/TGRTCVideoDecoderH265.h b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCVideoDecoderH265.h new file mode 100644 index 0000000000..63bb07db49 --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCVideoDecoderH265.h @@ -0,0 +1,19 @@ +/* + * Copyright 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import + +#import "RTCMacros.h" +#import "RTCVideoDecoder.h" + +RTC_OBJC_EXPORT +API_AVAILABLE(ios(11.0)) +@interface TGRTCVideoDecoderH265 : NSObject +@end diff --git a/submodules/TgVoipWebrtc/Impl/Apple/TGRTCVideoDecoderH265.mm b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCVideoDecoderH265.mm new file mode 100644 index 0000000000..5de95d65eb --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCVideoDecoderH265.mm @@ -0,0 +1,326 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + * + */ + +#import "TGRTCVideoDecoderH265.h" + +#import + +#import "base/RTCVideoFrame.h" +#import "base/RTCVideoFrameBuffer.h" +#import "components/video_frame_buffer/RTCCVPixelBuffer.h" +#import "helpers.h" +#import "helpers/scoped_cftyperef.h" + +#if defined(WEBRTC_IOS) +#import "helpers/UIDevice+RTCDevice.h" +#endif + +#include "modules/video_coding/include/video_error_codes.h" +#include "rtc_base/checks.h" +#include "rtc_base/logging.h" +#include "rtc_base/time_utils.h" +#include "sdk/objc/components/video_codec/nalu_rewriter.h" + +typedef void (^TGRTCVideoDecoderRequestKeyframeCallback)(); + +// Struct that we pass to the decoder per frame to decode. We receive it again +// in the decoder callback. +struct RTCH265FrameDecodeParams { + RTCH265FrameDecodeParams(RTCVideoDecoderCallback cb, int64_t ts, TGRTCVideoDecoderRequestKeyframeCallback requestFrame) + : callback(cb), timestamp(ts), requestFrame(requestFrame) {} + RTCVideoDecoderCallback callback; + int64_t timestamp; + TGRTCVideoDecoderRequestKeyframeCallback requestFrame; +}; + +// This is the callback function that VideoToolbox calls when decode is +// complete. +static void tg_h265DecompressionOutputCallback(void* decoder, + void* params, + OSStatus status, + VTDecodeInfoFlags infoFlags, + CVImageBufferRef imageBuffer, + CMTime timestamp, + CMTime duration) { + std::unique_ptr decodeParams( + reinterpret_cast(params)); + if (status != noErr) { + RTC_LOG(LS_ERROR) << "Failed to decode frame. Status: " << status; + if (status == -12909) { + decodeParams->requestFrame(); + } + return; + } + // TODO(tkchin): Handle CVO properly. + RTCCVPixelBuffer* frameBuffer = + [[RTCCVPixelBuffer alloc] initWithPixelBuffer:imageBuffer]; + RTCVideoFrame* decodedFrame = [[RTCVideoFrame alloc] + initWithBuffer:frameBuffer + rotation:RTCVideoRotation_0 + timeStampNs:CMTimeGetSeconds(timestamp) * rtc::kNumNanosecsPerSec]; + decodedFrame.timeStamp = decodeParams->timestamp; + decodeParams->callback(decodedFrame); +} + +@interface TGRTCVideoDecoderH265RequestKeyframeHolder : NSObject + +@property (nonatomic) NSLock *lock; +@property (nonatomic) bool shouldRequestKeyframe; + +@end + +@implementation TGRTCVideoDecoderH265RequestKeyframeHolder + +- (instancetype)init { + self = [super init]; + if (self != nil) { + _lock = [[NSLock alloc] init]; + } + return self; +} + +@end + +// Decoder. +@implementation TGRTCVideoDecoderH265 { + CMVideoFormatDescriptionRef _videoFormat; + VTDecompressionSessionRef _decompressionSession; + RTCVideoDecoderCallback _callback; + TGRTCVideoDecoderH265RequestKeyframeHolder *_requestKeyframeHolder; + TGRTCVideoDecoderRequestKeyframeCallback _requestFrame; + OSStatus _error; +} + +- (instancetype)init { + if (self = [super init]) { + _requestKeyframeHolder = [[TGRTCVideoDecoderH265RequestKeyframeHolder alloc] init]; + TGRTCVideoDecoderH265RequestKeyframeHolder *requestKeyframeHolder = _requestKeyframeHolder; + _requestFrame = ^{ + [requestKeyframeHolder.lock lock]; + requestKeyframeHolder.shouldRequestKeyframe = true; + [requestKeyframeHolder.lock unlock]; + }; + } + + return self; +} + +- (void)dealloc { + [self destroyDecompressionSession]; + [self setVideoFormat:nullptr]; +} + +- (NSInteger)startDecodeWithNumberOfCores:(int)numberOfCores { + return WEBRTC_VIDEO_CODEC_OK; +} + +- (NSInteger)decode:(RTCEncodedImage*)inputImage + missingFrames:(BOOL)missingFrames + codecSpecificInfo:(__nullable id)info + renderTimeMs:(int64_t)renderTimeMs { + RTC_DCHECK(inputImage.buffer); + + if (_error != noErr) { + RTC_LOG(LS_WARNING) << "Last frame decode failed."; + _error = noErr; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + rtc::ScopedCFTypeRef inputFormat = + rtc::ScopedCF(webrtc::CreateH265VideoFormatDescription( + (uint8_t*)inputImage.buffer.bytes, inputImage.buffer.length)); + if (inputFormat) { + CMVideoDimensions dimensions = + CMVideoFormatDescriptionGetDimensions(inputFormat.get()); + RTC_LOG(LS_INFO) << "Resolution: " << dimensions.width << " x " + << dimensions.height; + // Check if the video format has changed, and reinitialize decoder if + // needed. + if (!CMFormatDescriptionEqual(inputFormat.get(), _videoFormat)) { + [self setVideoFormat:inputFormat.get()]; + int resetDecompressionSessionError = [self resetDecompressionSession]; + if (resetDecompressionSessionError != WEBRTC_VIDEO_CODEC_OK) { + return resetDecompressionSessionError; + } + } + } + if (!_videoFormat) { + // We received a frame but we don't have format information so we can't + // decode it. + // This can happen after backgrounding. We need to wait for the next + // sps/pps before we can resume so we request a keyframe by returning an + // error. + RTC_LOG(LS_WARNING) << "Missing video format. Frame with sps/pps required."; + return WEBRTC_VIDEO_CODEC_ERROR; + } + CMSampleBufferRef sampleBuffer = nullptr; + if (!webrtc::H265AnnexBBufferToCMSampleBuffer( + (uint8_t*)inputImage.buffer.bytes, inputImage.buffer.length, + _videoFormat, &sampleBuffer)) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + RTC_DCHECK(sampleBuffer); + VTDecodeFrameFlags decodeFlags = + kVTDecodeFrame_EnableAsynchronousDecompression; + std::unique_ptr frameDecodeParams; + frameDecodeParams.reset( + new RTCH265FrameDecodeParams(_callback, inputImage.timeStamp, _requestFrame)); + OSStatus status = VTDecompressionSessionDecodeFrame( + _decompressionSession, sampleBuffer, decodeFlags, + frameDecodeParams.release(), nullptr); +#if defined(WEBRTC_IOS) + // Re-initialize the decoder if we have an invalid session while the app is + // active and retry the decode request. + if (status == kVTInvalidSessionErr && + [self resetDecompressionSession] == WEBRTC_VIDEO_CODEC_OK) { + frameDecodeParams.reset( + new RTCH265FrameDecodeParams(_callback, inputImage.timeStamp, _requestFrame)); + status = VTDecompressionSessionDecodeFrame( + _decompressionSession, sampleBuffer, decodeFlags, + frameDecodeParams.release(), nullptr); + } +#endif + CFRelease(sampleBuffer); + if (status != noErr) { + RTC_LOG(LS_ERROR) << "Failed to decode frame with code: " << status; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + bool requestKeyframe = false; + + [_requestKeyframeHolder.lock lock]; + if (_requestKeyframeHolder.shouldRequestKeyframe) { + _requestKeyframeHolder.shouldRequestKeyframe = false; + requestKeyframe = true; + } + [_requestKeyframeHolder.lock unlock]; + + if (requestKeyframe) { + RTC_LOG(LS_ERROR) << "Decoder asynchronously asked to request keyframe"; + return WEBRTC_VIDEO_CODEC_ERROR; + } + + return WEBRTC_VIDEO_CODEC_OK; +} + +- (void)setCallback:(RTCVideoDecoderCallback)callback { + _callback = callback; +} + +- (NSInteger)releaseDecoder { + // Need to invalidate the session so that callbacks no longer occur and it + // is safe to null out the callback. + [self destroyDecompressionSession]; + [self setVideoFormat:nullptr]; + _callback = nullptr; + return WEBRTC_VIDEO_CODEC_OK; +} + +#pragma mark - Private + +- (int)resetDecompressionSession { + [self destroyDecompressionSession]; + + // Need to wait for the first SPS to initialize decoder. + if (!_videoFormat) { + return WEBRTC_VIDEO_CODEC_OK; + } + + // Set keys for OpenGL and IOSurface compatibilty, which makes the encoder + // create pixel buffers with GPU backed memory. The intent here is to pass + // the pixel buffers directly so we avoid a texture upload later during + // rendering. This currently is moot because we are converting back to an + // I420 frame after decode, but eventually we will be able to plumb + // CVPixelBuffers directly to the renderer. + // TODO(tkchin): Maybe only set OpenGL/IOSurface keys if we know that that + // we can pass CVPixelBuffers as native handles in decoder output. + static size_t const attributesSize = 3; + CFTypeRef keys[attributesSize] = { +#if defined(WEBRTC_IOS) + kCVPixelBufferOpenGLESCompatibilityKey, +#elif defined(WEBRTC_MAC) + kCVPixelBufferOpenGLCompatibilityKey, +#endif + kCVPixelBufferIOSurfacePropertiesKey, + kCVPixelBufferPixelFormatTypeKey + }; + CFDictionaryRef ioSurfaceValue = CreateCFTypeDictionary(nullptr, nullptr, 0); + int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange; + CFNumberRef pixelFormat = + CFNumberCreate(nullptr, kCFNumberLongType, &nv12type); + CFTypeRef values[attributesSize] = {kCFBooleanTrue, ioSurfaceValue, + pixelFormat}; + CFDictionaryRef attributes = + CreateCFTypeDictionary(keys, values, attributesSize); + if (ioSurfaceValue) { + CFRelease(ioSurfaceValue); + ioSurfaceValue = nullptr; + } + if (pixelFormat) { + CFRelease(pixelFormat); + pixelFormat = nullptr; + } + VTDecompressionOutputCallbackRecord record = { + tg_h265DecompressionOutputCallback, + nullptr, + }; + OSStatus status = + VTDecompressionSessionCreate(nullptr, _videoFormat, nullptr, attributes, + &record, &_decompressionSession); + CFRelease(attributes); + if (status != noErr) { + [self destroyDecompressionSession]; + return WEBRTC_VIDEO_CODEC_ERROR; + } + [self configureDecompressionSession]; + + return WEBRTC_VIDEO_CODEC_OK; +} + +- (void)configureDecompressionSession { + RTC_DCHECK(_decompressionSession); +#if defined(WEBRTC_IOS) + // VTSessionSetProperty(_decompressionSession, + // kVTDecompressionPropertyKey_RealTime, kCFBooleanTrue); +#endif +} + +- (void)destroyDecompressionSession { + if (_decompressionSession) { +#if defined(WEBRTC_IOS) + if ([UIDevice isIOS11OrLater]) { + VTDecompressionSessionWaitForAsynchronousFrames(_decompressionSession); + } +#endif + VTDecompressionSessionInvalidate(_decompressionSession); + CFRelease(_decompressionSession); + _decompressionSession = nullptr; + } +} + +- (void)setVideoFormat:(CMVideoFormatDescriptionRef)videoFormat { + if (_videoFormat == videoFormat) { + return; + } + if (_videoFormat) { + CFRelease(_videoFormat); + } + _videoFormat = videoFormat; + if (_videoFormat) { + CFRetain(_videoFormat); + } +} + +- (NSString*)implementationName { + return @"VideoToolbox"; +} + +@end diff --git a/submodules/TgVoipWebrtc/Impl/Apple/TGRTCVideoEncoderH265.h b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCVideoEncoderH265.h new file mode 100644 index 0000000000..b68192298a --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCVideoEncoderH265.h @@ -0,0 +1,23 @@ +/* + * Copyright 2017 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + */ + +#import + +#import "RTCMacros.h" +#import "RTCVideoCodecInfo.h" +#import "RTCVideoEncoder.h" + +RTC_OBJC_EXPORT +API_AVAILABLE(ios(11.0)) +@interface TGRTCVideoEncoderH265 : NSObject + +- (instancetype)initWithCodecInfo:(RTCVideoCodecInfo *)codecInfo; + +@end diff --git a/submodules/TgVoipWebrtc/Impl/Apple/TGRTCVideoEncoderH265.mm b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCVideoEncoderH265.mm new file mode 100644 index 0000000000..d9ef797336 --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/Apple/TGRTCVideoEncoderH265.mm @@ -0,0 +1,613 @@ +/* + * Copyright (c) 2018 The WebRTC project authors. All Rights Reserved. + * + * Use of this source code is governed by a BSD-style license + * that can be found in the LICENSE file in the root of the source + * tree. An additional intellectual property rights grant can be found + * in the file PATENTS. All contributing project authors may + * be found in the AUTHORS file in the root of the source tree. + * + */ + +#import "TGRTCVideoEncoderH265.h" + +#import +#include + +#import "RTCCodecSpecificInfoH265.h" +#import "api/peerconnection/RTCRtpFragmentationHeader+Private.h" +#import "api/peerconnection/RTCVideoCodecInfo+Private.h" +#import "base/RTCI420Buffer.h" +#import "base/RTCVideoFrame.h" +#import "base/RTCVideoFrameBuffer.h" +#import "components/video_frame_buffer/RTCCVPixelBuffer.h" +#import "helpers.h" +#if defined(WEBRTC_IOS) +#import "helpers/UIDevice+RTCDevice.h" +#endif + +#include "common_video/h264/profile_level_id.h" +#include "common_video/h265/h265_bitstream_parser.h" +#include "common_video/include/bitrate_adjuster.h" +#include "libyuv/convert_from.h" +#include "modules/include/module_common_types.h" +#include "modules/video_coding/include/video_error_codes.h" +#include "rtc_base/buffer.h" +#include "rtc_base/logging.h" +#include "rtc_base/time_utils.h" +#include "sdk/objc/Framework/Classes/VideoToolbox/nalu_rewriter.h" +#include "system_wrappers/include/clock.h" + +@interface TGRTCVideoEncoderH265 () + +- (void)frameWasEncoded:(OSStatus)status + flags:(VTEncodeInfoFlags)infoFlags + sampleBuffer:(CMSampleBufferRef)sampleBuffer + width:(int32_t)width + height:(int32_t)height + renderTimeMs:(int64_t)renderTimeMs + timestamp:(uint32_t)timestamp + rotation:(RTCVideoRotation)rotation; + +@end + +namespace { // anonymous namespace + +// The ratio between kVTCompressionPropertyKey_DataRateLimits and +// kVTCompressionPropertyKey_AverageBitRate. The data rate limit is set higher +// than the average bit rate to avoid undershooting the target. +const float kLimitToAverageBitRateFactor = 1.5f; +// These thresholds deviate from the default h265 QP thresholds, as they +// have been found to work better on devices that support VideoToolbox +const int kLowh265QpThreshold = 28; +const int kHighh265QpThreshold = 39; + +// Struct that we pass to the encoder per frame to encode. We receive it again +// in the encoder callback. +struct API_AVAILABLE(ios(11.0)) RTCFrameEncodeParams { + RTCFrameEncodeParams(TGRTCVideoEncoderH265* e, + int32_t w, + int32_t h, + int64_t rtms, + uint32_t ts, + RTCVideoRotation r) + : encoder(e), + width(w), + height(h), + render_time_ms(rtms), + timestamp(ts), + rotation(r) {} + + TGRTCVideoEncoderH265* encoder; + int32_t width; + int32_t height; + int64_t render_time_ms; + uint32_t timestamp; + RTCVideoRotation rotation; +}; + +// We receive I420Frames as input, but we need to feed CVPixelBuffers into the +// encoder. This performs the copy and format conversion. +// TODO(tkchin): See if encoder will accept i420 frames and compare performance. +bool CopyVideoFrameToPixelBuffer(id frameBuffer, + CVPixelBufferRef pixelBuffer) { + RTC_DCHECK(pixelBuffer); + RTC_DCHECK_EQ(CVPixelBufferGetPixelFormatType(pixelBuffer), + kCVPixelFormatType_420YpCbCr8BiPlanarFullRange); + RTC_DCHECK_EQ(CVPixelBufferGetHeightOfPlane(pixelBuffer, 0), + frameBuffer.height); + RTC_DCHECK_EQ(CVPixelBufferGetWidthOfPlane(pixelBuffer, 0), + frameBuffer.width); + + CVReturn cvRet = CVPixelBufferLockBaseAddress(pixelBuffer, 0); + if (cvRet != kCVReturnSuccess) { + RTC_LOG(LS_ERROR) << "Failed to lock base address: " << cvRet; + return false; + } + + uint8_t* dstY = reinterpret_cast( + CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0)); + int dstStrideY = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0); + uint8_t* dstUV = reinterpret_cast( + CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 1)); + int dstStrideUV = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 1); + // Convert I420 to NV12. + int ret = libyuv::I420ToNV12( + frameBuffer.dataY, frameBuffer.strideY, frameBuffer.dataU, + frameBuffer.strideU, frameBuffer.dataV, frameBuffer.strideV, dstY, + dstStrideY, dstUV, dstStrideUV, frameBuffer.width, frameBuffer.height); + CVPixelBufferUnlockBaseAddress(pixelBuffer, 0); + if (ret) { + RTC_LOG(LS_ERROR) << "Error converting I420 VideoFrame to NV12 :" << ret; + return false; + } + return true; +} + +CVPixelBufferRef CreatePixelBuffer(CVPixelBufferPoolRef pixel_buffer_pool) { + if (!pixel_buffer_pool) { + RTC_LOG(LS_ERROR) << "Failed to get pixel buffer pool."; + return nullptr; + } + CVPixelBufferRef pixel_buffer; + CVReturn ret = CVPixelBufferPoolCreatePixelBuffer(nullptr, pixel_buffer_pool, + &pixel_buffer); + if (ret != kCVReturnSuccess) { + RTC_LOG(LS_ERROR) << "Failed to create pixel buffer: " << ret; + // We probably want to drop frames here, since failure probably means + // that the pool is empty. + return nullptr; + } + return pixel_buffer; +} + +// This is the callback function that VideoToolbox calls when encode is +// complete. From inspection this happens on its own queue. +void compressionOutputCallback(void* encoder, + void* params, + OSStatus status, + VTEncodeInfoFlags infoFlags, + CMSampleBufferRef sampleBuffer) + API_AVAILABLE(ios(11.0)) { + RTC_CHECK(params); + std::unique_ptr encodeParams( + reinterpret_cast(params)); + RTC_CHECK(encodeParams->encoder); + [encodeParams->encoder frameWasEncoded:status + flags:infoFlags + sampleBuffer:sampleBuffer + width:encodeParams->width + height:encodeParams->height + renderTimeMs:encodeParams->render_time_ms + timestamp:encodeParams->timestamp + rotation:encodeParams->rotation]; +} +} // namespace + +@implementation TGRTCVideoEncoderH265 { + RTCVideoCodecInfo* _codecInfo; + std::unique_ptr _bitrateAdjuster; + uint32_t _targetBitrateBps; + uint32_t _encoderBitrateBps; + CFStringRef _profile; + RTCVideoEncoderCallback _callback; + int32_t _width; + int32_t _height; + VTCompressionSessionRef _compressionSession; + RTCVideoCodecMode _mode; + int framesLeft; + + webrtc::H265BitstreamParser _h265BitstreamParser; + std::vector _nv12ScaleBuffer; +} + +// .5 is set as a mininum to prevent overcompensating for large temporary +// overshoots. We don't want to degrade video quality too badly. +// .95 is set to prevent oscillations. When a lower bitrate is set on the +// encoder than previously set, its output seems to have a brief period of +// drastically reduced bitrate, so we want to avoid that. In steady state +// conditions, 0.95 seems to give us better overall bitrate over long periods +// of time. +- (instancetype)initWithCodecInfo:(RTCVideoCodecInfo*)codecInfo { + if (self = [super init]) { + _codecInfo = codecInfo; + _bitrateAdjuster.reset(new webrtc::BitrateAdjuster(.5, .95)); + RTC_CHECK([codecInfo.name isEqualToString:@"H265"]); + } + return self; +} + +- (void)dealloc { + [self destroyCompressionSession]; +} + +- (NSInteger)startEncodeWithSettings:(RTCVideoEncoderSettings*)settings + numberOfCores:(int)numberOfCores { + RTC_DCHECK(settings); + RTC_DCHECK([settings.name isEqualToString:@"H265"]); + + _width = settings.width; + _height = settings.height; + _mode = settings.mode; + + // We can only set average bitrate on the HW encoder. + _targetBitrateBps = settings.startBitrate; + _bitrateAdjuster->SetTargetBitrateBps(_targetBitrateBps); + + // TODO(tkchin): Try setting payload size via + // kVTCompressionPropertyKey_Maxh265SliceBytes. + + return [self resetCompressionSession]; +} + +- (NSInteger)encode:(RTCVideoFrame*)frame + codecSpecificInfo:(id)codecSpecificInfo + frameTypes:(NSArray*)frameTypes { + RTC_DCHECK_EQ(frame.width, _width); + RTC_DCHECK_EQ(frame.height, _height); + if (!_callback || !_compressionSession) { + return WEBRTC_VIDEO_CODEC_UNINITIALIZED; + } + BOOL isKeyframeRequired = NO; + + // Get a pixel buffer from the pool and copy frame data over. + CVPixelBufferPoolRef pixelBufferPool = + VTCompressionSessionGetPixelBufferPool(_compressionSession); + +#if defined(WEBRTC_IOS) + if (!pixelBufferPool) { + // Kind of a hack. On backgrounding, the compression session seems to get + // invalidated, which causes this pool call to fail when the application + // is foregrounded and frames are being sent for encoding again. + // Resetting the session when this happens fixes the issue. + // In addition we request a keyframe so video can recover quickly. + [self resetCompressionSession]; + pixelBufferPool = + VTCompressionSessionGetPixelBufferPool(_compressionSession); + isKeyframeRequired = YES; + RTC_LOG(LS_INFO) << "Resetting compression session due to invalid pool."; + } +#endif + + CVPixelBufferRef pixelBuffer = nullptr; + if ([frame.buffer isKindOfClass:[RTCCVPixelBuffer class]]) { + // Native frame buffer + RTCCVPixelBuffer* rtcPixelBuffer = (RTCCVPixelBuffer*)frame.buffer; + if (![rtcPixelBuffer requiresCropping]) { + // This pixel buffer might have a higher resolution than what the + // compression session is configured to. The compression session can + // handle that and will output encoded frames in the configured + // resolution regardless of the input pixel buffer resolution. + pixelBuffer = rtcPixelBuffer.pixelBuffer; + CVBufferRetain(pixelBuffer); + } else { + // Cropping required, we need to crop and scale to a new pixel buffer. + pixelBuffer = CreatePixelBuffer(pixelBufferPool); + if (!pixelBuffer) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + int dstWidth = CVPixelBufferGetWidth(pixelBuffer); + int dstHeight = CVPixelBufferGetHeight(pixelBuffer); + if ([rtcPixelBuffer requiresScalingToWidth:dstWidth height:dstHeight]) { + int size = + [rtcPixelBuffer bufferSizeForCroppingAndScalingToWidth:dstWidth + height:dstHeight]; + _nv12ScaleBuffer.resize(size); + } else { + _nv12ScaleBuffer.clear(); + } + _nv12ScaleBuffer.shrink_to_fit(); + if (![rtcPixelBuffer cropAndScaleTo:pixelBuffer + withTempBuffer:_nv12ScaleBuffer.data()]) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + } + } + + if (!pixelBuffer) { + // We did not have a native frame buffer + pixelBuffer = CreatePixelBuffer(pixelBufferPool); + if (!pixelBuffer) { + return WEBRTC_VIDEO_CODEC_ERROR; + } + RTC_DCHECK(pixelBuffer); + if (!CopyVideoFrameToPixelBuffer([frame.buffer toI420], pixelBuffer)) { + RTC_LOG(LS_ERROR) << "Failed to copy frame data."; + CVBufferRelease(pixelBuffer); + return WEBRTC_VIDEO_CODEC_ERROR; + } + } + + // Check if we need a keyframe. + if (!isKeyframeRequired && frameTypes) { + for (NSNumber* frameType in frameTypes) { + if ((RTCFrameType)frameType.intValue == RTCFrameTypeVideoFrameKey) { + isKeyframeRequired = YES; + break; + } + } + } + + CMTime presentationTimeStamp = + CMTimeMake(frame.timeStampNs / rtc::kNumNanosecsPerMillisec, 1000); + CFDictionaryRef frameProperties = nullptr; + if (isKeyframeRequired) { + CFTypeRef keys[] = {kVTEncodeFrameOptionKey_ForceKeyFrame}; + CFTypeRef values[] = {kCFBooleanTrue}; + frameProperties = CreateCFTypeDictionary(keys, values, 1); + } + + std::unique_ptr encodeParams; + encodeParams.reset(new RTCFrameEncodeParams( + self, _width, _height, frame.timeStampNs / rtc::kNumNanosecsPerMillisec, + frame.timeStamp, frame.rotation)); + + // Update the bitrate if needed. + [self setBitrateBps:_bitrateAdjuster->GetAdjustedBitrateBps()]; + + OSStatus status = VTCompressionSessionEncodeFrame( + _compressionSession, pixelBuffer, presentationTimeStamp, kCMTimeInvalid, + frameProperties, encodeParams.release(), nullptr); + if (frameProperties) { + CFRelease(frameProperties); + } + if (pixelBuffer) { + CVBufferRelease(pixelBuffer); + } + if (status != noErr) { + RTC_LOG(LS_ERROR) << "Failed to encode frame with code: " << status; + return WEBRTC_VIDEO_CODEC_ERROR; + } + return WEBRTC_VIDEO_CODEC_OK; +} + +- (void)setCallback:(RTCVideoEncoderCallback)callback { + _callback = callback; +} + +- (int)setBitrate:(uint32_t)bitrateKbit framerate:(uint32_t)framerate { + _targetBitrateBps = 1000 * bitrateKbit; + _bitrateAdjuster->SetTargetBitrateBps(_targetBitrateBps); + [self setBitrateBps:_bitrateAdjuster->GetAdjustedBitrateBps()]; + return WEBRTC_VIDEO_CODEC_OK; +} + +#pragma mark - Private + +- (NSInteger)releaseEncoder { + // Need to destroy so that the session is invalidated and won't use the + // callback anymore. Do not remove callback until the session is invalidated + // since async encoder callbacks can occur until invalidation. + [self destroyCompressionSession]; + _callback = nullptr; + return WEBRTC_VIDEO_CODEC_OK; +} + +- (int)resetCompressionSession { + [self destroyCompressionSession]; + + // Set source image buffer attributes. These attributes will be present on + // buffers retrieved from the encoder's pixel buffer pool. + const size_t attributesSize = 3; + CFTypeRef keys[attributesSize] = { +#if defined(WEBRTC_IOS) + kCVPixelBufferOpenGLESCompatibilityKey, +#elif defined(WEBRTC_MAC) + kCVPixelBufferOpenGLCompatibilityKey, +#endif + kCVPixelBufferIOSurfacePropertiesKey, + kCVPixelBufferPixelFormatTypeKey + }; + CFDictionaryRef ioSurfaceValue = CreateCFTypeDictionary(nullptr, nullptr, 0); + int64_t nv12type = kCVPixelFormatType_420YpCbCr8BiPlanarFullRange; + CFNumberRef pixelFormat = + CFNumberCreate(nullptr, kCFNumberLongType, &nv12type); + CFTypeRef values[attributesSize] = {kCFBooleanTrue, ioSurfaceValue, + pixelFormat}; + CFDictionaryRef sourceAttributes = + CreateCFTypeDictionary(keys, values, attributesSize); + if (ioSurfaceValue) { + CFRelease(ioSurfaceValue); + ioSurfaceValue = nullptr; + } + if (pixelFormat) { + CFRelease(pixelFormat); + pixelFormat = nullptr; + } + CFMutableDictionaryRef encoder_specs = nullptr; +#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS) + // Currently hw accl is supported above 360p on mac, below 360p + // the compression session will be created with hw accl disabled. + encoder_specs = + CFDictionaryCreateMutable(nullptr, 1, &kCFTypeDictionaryKeyCallBacks, + &kCFTypeDictionaryValueCallBacks); + CFDictionarySetValue( + encoder_specs, + kVTVideoEncoderSpecification_EnableHardwareAcceleratedVideoEncoder, + kCFBooleanTrue); +#endif + OSStatus status = VTCompressionSessionCreate( + nullptr, // use default allocator + _width, _height, kCMVideoCodecType_HEVC, + encoder_specs, // use hardware accelerated encoder if available + sourceAttributes, + nullptr, // use default compressed data allocator + compressionOutputCallback, nullptr, &_compressionSession); + if (sourceAttributes) { + CFRelease(sourceAttributes); + sourceAttributes = nullptr; + } + if (encoder_specs) { + CFRelease(encoder_specs); + encoder_specs = nullptr; + } + if (status != noErr) { + RTC_LOG(LS_ERROR) << "Failed to create compression session: " << status; + return WEBRTC_VIDEO_CODEC_ERROR; + } +#if defined(WEBRTC_MAC) && !defined(WEBRTC_IOS) + CFBooleanRef hwaccl_enabled = nullptr; + status = VTSessionCopyProperty( + _compressionSession, + kVTCompressionPropertyKey_UsingHardwareAcceleratedVideoEncoder, nullptr, + &hwaccl_enabled); + if (status == noErr && (CFBooleanGetValue(hwaccl_enabled))) { + RTC_LOG(LS_INFO) << "Compression session created with hw accl enabled"; + } else { + RTC_LOG(LS_INFO) << "Compression session created with hw accl disabled"; + } +#endif + [self configureCompressionSession]; + return WEBRTC_VIDEO_CODEC_OK; +} + +- (void)configureCompressionSession { + RTC_DCHECK(_compressionSession); + SetVTSessionProperty(_compressionSession, kVTCompressionPropertyKey_RealTime, + true); + // SetVTSessionProperty(_compressionSession, + // kVTCompressionPropertyKey_ProfileLevel, _profile); + SetVTSessionProperty(_compressionSession, + kVTCompressionPropertyKey_AllowFrameReordering, false); + [self setEncoderBitrateBps:_targetBitrateBps]; + // TODO(tkchin): Look at entropy mode and colorspace matrices. + // TODO(tkchin): Investigate to see if there's any way to make this work. + // May need it to interop with Android. Currently this call just fails. + // On inspecting encoder output on iOS8, this value is set to 6. + // internal::SetVTSessionProperty(compression_session_, + // kVTCompressionPropertyKey_MaxFrameDelayCount, + // 1); + + // Set a relatively large value for keyframe emission (7200 frames or 4 + // minutes). + SetVTSessionProperty(_compressionSession, + kVTCompressionPropertyKey_MaxKeyFrameInterval, 7200); + SetVTSessionProperty(_compressionSession, + kVTCompressionPropertyKey_MaxKeyFrameIntervalDuration, + 240); + OSStatus status = + VTCompressionSessionPrepareToEncodeFrames(_compressionSession); + if (status != noErr) { + RTC_LOG(LS_ERROR) << "Compression session failed to prepare encode frames."; + } +} + +- (void)destroyCompressionSession { + if (_compressionSession) { + VTCompressionSessionInvalidate(_compressionSession); + CFRelease(_compressionSession); + _compressionSession = nullptr; + } +} + +- (NSString*)implementationName { + return @"VideoToolbox"; +} + +- (void)setBitrateBps:(uint32_t)bitrateBps { + if (_encoderBitrateBps != bitrateBps) { + [self setEncoderBitrateBps:bitrateBps]; + } +} + +- (void)setEncoderBitrateBps:(uint32_t)bitrateBps { + if (_compressionSession) { + SetVTSessionProperty(_compressionSession, + kVTCompressionPropertyKey_AverageBitRate, bitrateBps); + + // TODO(tkchin): Add a helper method to set array value. + int64_t dataLimitBytesPerSecondValue = + static_cast(bitrateBps * kLimitToAverageBitRateFactor / 8); + CFNumberRef bytesPerSecond = + CFNumberCreate(kCFAllocatorDefault, kCFNumberSInt64Type, + &dataLimitBytesPerSecondValue); + int64_t oneSecondValue = 1; + CFNumberRef oneSecond = CFNumberCreate( + kCFAllocatorDefault, kCFNumberSInt64Type, &oneSecondValue); + const void* nums[2] = {bytesPerSecond, oneSecond}; + CFArrayRef dataRateLimits = + CFArrayCreate(nullptr, nums, 2, &kCFTypeArrayCallBacks); + OSStatus status = VTSessionSetProperty( + _compressionSession, kVTCompressionPropertyKey_DataRateLimits, + dataRateLimits); + if (bytesPerSecond) { + CFRelease(bytesPerSecond); + } + if (oneSecond) { + CFRelease(oneSecond); + } + if (dataRateLimits) { + CFRelease(dataRateLimits); + } + if (status != noErr) { + RTC_LOG(LS_ERROR) << "Failed to set data rate limit"; + } + + _encoderBitrateBps = bitrateBps; + } +} + +- (void)frameWasEncoded:(OSStatus)status + flags:(VTEncodeInfoFlags)infoFlags + sampleBuffer:(CMSampleBufferRef)sampleBuffer + width:(int32_t)width + height:(int32_t)height + renderTimeMs:(int64_t)renderTimeMs + timestamp:(uint32_t)timestamp + rotation:(RTCVideoRotation)rotation { + if (status != noErr) { + RTC_LOG(LS_ERROR) << "h265 encode failed."; + return; + } + if (infoFlags & kVTEncodeInfo_FrameDropped) { + RTC_LOG(LS_INFO) << "h265 encoder dropped a frame."; + return; + } + + BOOL isKeyframe = NO; + CFArrayRef attachments = + CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, 0); + if (attachments != nullptr && CFArrayGetCount(attachments)) { + CFDictionaryRef attachment = + static_cast(CFArrayGetValueAtIndex(attachments, 0)); + isKeyframe = + !CFDictionaryContainsKey(attachment, kCMSampleAttachmentKey_NotSync); + } + + if (isKeyframe) { + RTC_LOG(LS_INFO) << "Generated keyframe"; + } + + // Convert the sample buffer into a buffer suitable for RTP packetization. + // TODO(tkchin): Allocate buffers through a pool. + std::unique_ptr buffer(new rtc::Buffer()); + RTCRtpFragmentationHeader* header; + { + std::unique_ptr header_cpp; + bool result = H265CMSampleBufferToAnnexBBuffer(sampleBuffer, isKeyframe, + buffer.get(), &header_cpp); + header = [[RTCRtpFragmentationHeader alloc] + initWithNativeFragmentationHeader:header_cpp.get()]; + if (!result) { + RTC_LOG(LS_ERROR) << "Failed to convert sample buffer."; + return; + } + } + + RTCEncodedImage* frame = [[RTCEncodedImage alloc] init]; + frame.buffer = [NSData dataWithBytesNoCopy:buffer->data() + length:buffer->size() + freeWhenDone:NO]; + frame.encodedWidth = width; + frame.encodedHeight = height; + frame.completeFrame = YES; + frame.frameType = + isKeyframe ? RTCFrameTypeVideoFrameKey : RTCFrameTypeVideoFrameDelta; + frame.captureTimeMs = renderTimeMs; + frame.timeStamp = timestamp; + frame.rotation = rotation; + frame.contentType = (_mode == RTCVideoCodecModeScreensharing) + ? RTCVideoContentTypeScreenshare + : RTCVideoContentTypeUnspecified; + frame.flags = webrtc::VideoSendTiming::kInvalid; + + int qp; + _h265BitstreamParser.ParseBitstream(buffer->data(), buffer->size()); + _h265BitstreamParser.GetLastSliceQp(&qp); + frame.qp = @(qp); + + BOOL res = _callback(frame, [[RTCCodecSpecificInfoH265 alloc] init], header); + if (!res) { + RTC_LOG(LS_ERROR) << "Encode callback failed."; + return; + } + _bitrateAdjuster->Update(frame.buffer.length); +} + +- (RTCVideoEncoderQpThresholds*)scalingSettings { + return [[RTCVideoEncoderQpThresholds alloc] + initWithThresholdsLow:kLowh265QpThreshold + high:kHighh265QpThreshold]; +} + +@end diff --git a/submodules/TgVoipWebrtc/Impl/CodecsApple.h b/submodules/TgVoipWebrtc/Impl/CodecsApple.h new file mode 100644 index 0000000000..b691f1ecc4 --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/CodecsApple.h @@ -0,0 +1,28 @@ +#ifndef CODECS_APPLE_H +#define CODECS_APPLE_H + +#include "rtc_base/thread.h" +#include "api/video_codecs/video_encoder_factory.h" +#include "api/video_codecs/video_decoder_factory.h" +#include "api/media_stream_interface.h" + +#ifdef TGVOIP_NAMESPACE +namespace TGVOIP_NAMESPACE { +#endif + +class VideoCapturerInterface { +public: + virtual ~VideoCapturerInterface(); +}; + +void configurePlatformAudio(); +std::unique_ptr makeVideoEncoderFactory(); +std::unique_ptr makeVideoDecoderFactory(); +rtc::scoped_refptr makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread); +std::unique_ptr makeVideoCapturer(rtc::scoped_refptr source); + +#ifdef TGVOIP_NAMESPACE +} +#endif + +#endif diff --git a/submodules/TgVoipWebrtc/Impl/CodecsApple.mm b/submodules/TgVoipWebrtc/Impl/CodecsApple.mm new file mode 100644 index 0000000000..519c836f44 --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/CodecsApple.mm @@ -0,0 +1,175 @@ +#import "CodecsApple.h" + +#include "absl/strings/match.h" +#include "api/audio_codecs/audio_decoder_factory_template.h" +#include "api/audio_codecs/audio_encoder_factory_template.h" +#include "api/audio_codecs/opus/audio_decoder_opus.h" +#include "api/audio_codecs/opus/audio_encoder_opus.h" +#include "api/rtp_parameters.h" +#include "api/task_queue/default_task_queue_factory.h" +#include "media/base/codec.h" +#include "media/base/media_constants.h" +#include "media/engine/webrtc_media_engine.h" +#include "modules/audio_device/include/audio_device_default.h" +#include "rtc_base/task_utils/repeating_task.h" +#include "system_wrappers/include/field_trial.h" +#include "api/video/builtin_video_bitrate_allocator_factory.h" +#include "api/video/video_bitrate_allocation.h" + +#include "Apple/TGRTCDefaultVideoEncoderFactory.h" +#include "Apple/TGRTCDefaultVideoDecoderFactory.h" +#include "sdk/objc/native/api/video_encoder_factory.h" +#include "sdk/objc/native/api/video_decoder_factory.h" + +#include "sdk/objc/native/src/objc_video_track_source.h" +#include "api/video_track_source_proxy.h" +#include "sdk/objc/api/RTCVideoRendererAdapter.h" +#include "sdk/objc/native/api/video_frame.h" +#include "sdk/objc/components/audio/RTCAudioSession.h" +#include "api/media_types.h" + +#import "VideoCameraCapturer.h" + +@interface VideoCapturerInterfaceImplReference : NSObject { + VideoCameraCapturer *_videoCapturer; +} + +@end + +@implementation VideoCapturerInterfaceImplReference + +- (instancetype)initWithSource:(rtc::scoped_refptr)source { + self = [super init]; + if (self != nil) { + assert([NSThread isMainThread]); + + _videoCapturer = [[VideoCameraCapturer alloc] initWithSource:source]; + + AVCaptureDevice *frontCamera = nil; + for (AVCaptureDevice *device in [VideoCameraCapturer captureDevices]) { + if (device.position == AVCaptureDevicePositionFront) { + frontCamera = device; + break; + } + } + + if (frontCamera == nil) { + return nil; + } + + NSArray *sortedFormats = [[VideoCameraCapturer supportedFormatsForDevice:frontCamera] sortedArrayUsingComparator:^NSComparisonResult(AVCaptureDeviceFormat* lhs, AVCaptureDeviceFormat *rhs) { + int32_t width1 = CMVideoFormatDescriptionGetDimensions(lhs.formatDescription).width; + int32_t width2 = CMVideoFormatDescriptionGetDimensions(rhs.formatDescription).width; + return width1 < width2 ? NSOrderedAscending : NSOrderedDescending; + }]; + + AVCaptureDeviceFormat *bestFormat = nil; + for (AVCaptureDeviceFormat *format in sortedFormats) { + CMVideoDimensions dimensions = CMVideoFormatDescriptionGetDimensions(format.formatDescription); + if (dimensions.width >= 1000 || dimensions.height >= 1000) { + bestFormat = format; + break; + } + } + + if (bestFormat == nil) { + assert(false); + return nil; + } + + AVFrameRateRange *frameRateRange = [[bestFormat.videoSupportedFrameRateRanges sortedArrayUsingComparator:^NSComparisonResult(AVFrameRateRange *lhs, AVFrameRateRange *rhs) { + if (lhs.maxFrameRate < rhs.maxFrameRate) { + return NSOrderedAscending; + } else { + return NSOrderedDescending; + } + }] lastObject]; + + if (frameRateRange == nil) { + assert(false); + return nil; + } + + [_videoCapturer startCaptureWithDevice:frontCamera format:bestFormat fps:30]; + } + return self; +} + +- (void)dealloc { + assert([NSThread isMainThread]); + + [_videoCapturer stopCapture]; +} + +@end + +@interface VideoCapturerInterfaceImplHolder : NSObject + +@property (nonatomic) void *reference; + +@end + +@implementation VideoCapturerInterfaceImplHolder + +@end + +#ifdef TGVOIP_NAMESPACE +namespace TGVOIP_NAMESPACE { +#endif + +class VideoCapturerInterfaceImpl: public VideoCapturerInterface { +public: + VideoCapturerInterfaceImpl(rtc::scoped_refptr source) : + _source(source) { + _implReference = [[VideoCapturerInterfaceImplHolder alloc] init]; + VideoCapturerInterfaceImplHolder *implReference = _implReference; + dispatch_async(dispatch_get_main_queue(), ^{ + VideoCapturerInterfaceImplReference *value = [[VideoCapturerInterfaceImplReference alloc] initWithSource:source]; + if (value != nil) { + implReference.reference = (void *)CFBridgingRetain(value); + } + }); + } + + virtual ~VideoCapturerInterfaceImpl() { + VideoCapturerInterfaceImplHolder *implReference = _implReference; + dispatch_async(dispatch_get_main_queue(), ^{ + if (implReference.reference != nil) { + CFBridgingRelease(implReference.reference); + } + }); + } + +private: + rtc::scoped_refptr _source; + VideoCapturerInterfaceImplHolder *_implReference; +}; + +VideoCapturerInterface::~VideoCapturerInterface() { +} + +void configurePlatformAudio() { + //[RTCAudioSession sharedInstance].useManualAudio = true; + //[RTCAudioSession sharedInstance].isAudioEnabled = true; +} + +std::unique_ptr makeVideoEncoderFactory() { + return webrtc::ObjCToNativeVideoEncoderFactory([[TGRTCDefaultVideoEncoderFactory alloc] init]); +} + +std::unique_ptr makeVideoDecoderFactory() { + return webrtc::ObjCToNativeVideoDecoderFactory([[TGRTCDefaultVideoDecoderFactory alloc] init]); +} + +rtc::scoped_refptr makeVideoSource(rtc::Thread *signalingThread, rtc::Thread *workerThread) { + rtc::scoped_refptr objCVideoTrackSource(new rtc::RefCountedObject()); + return webrtc::VideoTrackSourceProxy::Create(signalingThread, workerThread, objCVideoTrackSource); +} + +std::unique_ptr makeVideoCapturer(rtc::scoped_refptr source) { + return std::make_unique(source); +} + +#ifdef TGVOIP_NAMESPACE +} +#endif diff --git a/submodules/TgVoipWebrtc/Impl/Connector.h b/submodules/TgVoipWebrtc/Impl/Connector.h deleted file mode 100644 index 35187efafa..0000000000 --- a/submodules/TgVoipWebrtc/Impl/Connector.h +++ /dev/null @@ -1,50 +0,0 @@ -#ifndef DEMO_CONNECTOR_H -#define DEMO_CONNECTOR_H - -#include "p2p/base/basic_packet_socket_factory.h" -#include "rtc_base/proxy_info.h" -#include "rtc_base/task_utils/repeating_task.h" -#include "rtc_base/third_party/sigslot/sigslot.h" -#include "rtc_base/thread.h" - -#include "p2p/base/p2p_transport_channel.h" -#include "p2p/client/basic_port_allocator.h" -#include "p2p/base/basic_async_resolver_factory.h" - -#include -#include - -class Connector : public sigslot::has_slots<> { -public: - explicit Connector(bool isOutgoing); - ~Connector() override; - void Start(); - - sigslot::signal1&> SignalCandidatesGathered; - sigslot::signal1 SignalReadyToSendStateChanged; - sigslot::signal1 SignalPacketReceived; - - void AddRemoteCandidates(const std::vector &candidates); - void SendPacket(const rtc::CopyOnWriteBuffer& data); - -private: - void CandidateGathered(cricket::IceTransportInternal *transport, const cricket::Candidate &candidate); - void CandidateGatheringState(cricket::IceTransportInternal *transport); - void TransportStateChanged(cricket::IceTransportInternal *transport); - void TransportRoleConflict(cricket::IceTransportInternal *transport); - void TransportReadyToSend(cricket::IceTransportInternal *transport); - void TransportPacketReceived(rtc::PacketTransportInternal *transport, const char *bytes, size_t size, const int64_t ×tamp, int unused); - - std::unique_ptr networkThread; - - bool isOutgoing; - std::unique_ptr socketFactory; - std::unique_ptr networkManager; - std::unique_ptr portAllocator; - std::unique_ptr asyncResolverFactory; - std::unique_ptr transportChannel; - - std::vector collectedLocalCandidates; -}; - -#endif //DEMO_CONNECTOR_H diff --git a/submodules/TgVoipWebrtc/Impl/Connector.mm b/submodules/TgVoipWebrtc/Impl/Connector.mm deleted file mode 100644 index 2480940642..0000000000 --- a/submodules/TgVoipWebrtc/Impl/Connector.mm +++ /dev/null @@ -1,158 +0,0 @@ -#include "Connector.h" - -#include "MediaEngineWebrtc.h" - -#include "api/packet_socket_factory.h" -#include "rtc_base/task_utils/to_queued_task.h" -#include "p2p/base/ice_credentials_iterator.h" -#include "api/jsep_ice_candidate.h" - -#include - -Connector::Connector(bool isOutgoing) { - networkThread = rtc::Thread::CreateWithSocketServer(); - - this->isOutgoing = isOutgoing; -} - -Connector::~Connector() { - networkThread->Invoke(RTC_FROM_HERE, [this]() { - transportChannel = nullptr; - asyncResolverFactory = nullptr; - portAllocator = nullptr; - networkManager = nullptr; - socketFactory = nullptr; - }); -} - -void Connector::Start() { - NSLog(@"Started %d", (int)[[NSDate date] timeIntervalSince1970]); - networkThread->Start(); - - networkThread->Invoke(RTC_FROM_HERE, [this] { - socketFactory.reset(new rtc::BasicPacketSocketFactory(networkThread.get())); - - networkManager = std::make_unique(); - portAllocator.reset(new cricket::BasicPortAllocator(networkManager.get(), socketFactory.get(), /*turn_customizer=*/ nullptr, /*relay_port_factory=*/ nullptr)); - uint32_t flags = cricket::PORTALLOCATOR_DISABLE_TCP; - //flags |= cricket::PORTALLOCATOR_DISABLE_UDP; - portAllocator->set_flags(portAllocator->flags() | flags); - portAllocator->Initialize(); - - rtc::SocketAddress defaultStunAddress = rtc::SocketAddress("hlgkfjdrtjfykgulhijkljhulyo.uksouth.cloudapp.azure.com", 3478); - cricket::ServerAddresses stunServers; - stunServers.insert(defaultStunAddress); - std::vector turnServers; - turnServers.push_back(cricket::RelayServerConfig( - rtc::SocketAddress("hlgkfjdrtjfykgulhijkljhulyo.uksouth.cloudapp.azure.com", 3478), - "user", - "root", - cricket::PROTO_UDP - )); - portAllocator->SetConfiguration(stunServers, turnServers, 2, webrtc::NO_PRUNE); - - asyncResolverFactory = std::make_unique(); - transportChannel.reset(new cricket::P2PTransportChannel("transport", 0, portAllocator.get(), asyncResolverFactory.get(), /*event_log=*/ nullptr)); - - cricket::IceConfig iceConfig; - iceConfig.continual_gathering_policy = cricket::GATHER_CONTINUALLY; - transportChannel->SetIceConfig(iceConfig); - - cricket::IceParameters localIceParameters( - "gcp3", - "zWDKozH8/3JWt8he3M/CMj5R", - false - ); - cricket::IceParameters remoteIceParameters( - "acp3", - "aWDKozH8/3JWt8he3M/CMj5R", - false - ); - - transportChannel->SetIceParameters(isOutgoing ? localIceParameters : remoteIceParameters); - transportChannel->SetIceRole(isOutgoing ? cricket::ICEROLE_CONTROLLING : cricket::ICEROLE_CONTROLLED); - - transportChannel->SignalCandidateGathered.connect(this, &Connector::CandidateGathered); - transportChannel->SignalGatheringState.connect(this, &Connector::CandidateGatheringState); - transportChannel->SignalIceTransportStateChanged.connect(this, &Connector::TransportStateChanged); - transportChannel->SignalRoleConflict.connect(this, &Connector::TransportRoleConflict); - transportChannel->SignalReadPacket.connect(this, &Connector::TransportPacketReceived); - - transportChannel->MaybeStartGathering(); - - transportChannel->SetRemoteIceMode(cricket::ICEMODE_FULL); - transportChannel->SetRemoteIceParameters((!isOutgoing) ? localIceParameters : remoteIceParameters); - }); -} - -void Connector::AddRemoteCandidates(const std::vector &candidates) { - networkThread->Invoke(RTC_FROM_HERE, [this, candidates] { - for (auto &serializedCandidate : candidates) { - webrtc::JsepIceCandidate parseCandidate("", 0); - if (parseCandidate.Initialize(serializedCandidate, nullptr)) { - auto candidate = parseCandidate.candidate(); - printf("Add remote candidate %s\n", serializedCandidate.c_str()); - transportChannel->AddRemoteCandidate(candidate); - } - } - }); -} - -void Connector::CandidateGathered(cricket::IceTransportInternal *transport, const cricket::Candidate &candidate) { - assert(networkThread->IsCurrent()); - - webrtc::JsepIceCandidate iceCandidate("", 0); - iceCandidate.SetCandidate(candidate); - std::string serializedCandidate; - if (iceCandidate.ToString(&serializedCandidate)) { - std::vector arrayOfOne; - arrayOfOne.push_back(serializedCandidate); - SignalCandidatesGathered(arrayOfOne); - - webrtc::JsepIceCandidate parseCandidate("", 0); - if (parseCandidate.Initialize(serializedCandidate, nullptr)) { - auto candidate = parseCandidate.candidate(); - - } - } -} - -void Connector::CandidateGatheringState(cricket::IceTransportInternal *transport) { - if (transport->gathering_state() == cricket::IceGatheringState::kIceGatheringComplete) { - /*if (collectedLocalCandidates.size() != 0) { - SignalCandidatesGathered(collectedLocalCandidates); - }*/ - } -} - -void Connector::TransportStateChanged(cricket::IceTransportInternal *transport) { - auto state = transport->GetIceTransportState(); - switch (state) { - case webrtc::IceTransportState::kConnected: - case webrtc::IceTransportState::kCompleted: - SignalReadyToSendStateChanged(true); - printf("===== State: Connected\n"); - break; - default: - SignalReadyToSendStateChanged(false); - printf("===== State: Disconnected\n"); - break; - } -} - -void Connector::TransportRoleConflict(cricket::IceTransportInternal *transport) { - printf("===== Role conflict\n"); -} - -void Connector::TransportPacketReceived(rtc::PacketTransportInternal *transport, const char *bytes, size_t size, const int64_t ×tamp, __unused int unused) { - rtc::CopyOnWriteBuffer data; - data.AppendData(bytes, size); - SignalPacketReceived(data); -} - -void Connector::SendPacket(const rtc::CopyOnWriteBuffer& data) { - networkThread->Invoke(RTC_FROM_HERE, [this, data] { - rtc::PacketOptions options; - transportChannel->SendPacket((const char *)data.data(), data.size(), options, 0); - }); -} diff --git a/submodules/TgVoipWebrtc/Impl/Controller.h b/submodules/TgVoipWebrtc/Impl/Controller.h deleted file mode 100644 index ac22631d8e..0000000000 --- a/submodules/TgVoipWebrtc/Impl/Controller.h +++ /dev/null @@ -1,75 +0,0 @@ -#ifndef DEMO_CONTROLLER_H -#define DEMO_CONTROLLER_H - - -#include "Connector.h" -#include "MediaEngineWebrtc.h" - -#include "rtc_base/copy_on_write_buffer.h" -#include "rtc_base/socket_address.h" -#include "rtc_base/task_utils/repeating_task.h" -#include "rtc_base/third_party/sigslot/sigslot.h" - -#import "VideoMetalView.h" - -class Controller : public sigslot::has_slots<> { -public: - enum EndpointType { - UDP, - TCP, - P2P, - }; - - enum State { - Starting, - WaitInit, - WaitInitAck, - Established, - Failed, - Reconnecting, - }; - - explicit Controller(bool is_outgoing, size_t init_timeout, size_t reconnect_timeout); - ~Controller() override; - void Start(); - //void SetNetworkType(message::NetworkType network_type); - void SetDataSaving(bool data_saving); - void SetMute(bool mute); - void AttachVideoView(rtc::VideoSinkInterface *sink); - void SetProxy(rtc::ProxyType type, const rtc::SocketAddress& addr, const std::string& username, const std::string& password); - void AddRemoteCandidates(const std::vector &candidates); - - //static std::map network_params; - static MediaEngineWebrtc::NetworkParams default_network_params; - static MediaEngineWebrtc::NetworkParams datasaving_network_params; - sigslot::signal1 SignalNewState; - sigslot::signal1&> SignalCandidatesGathered; - -private: - std::unique_ptr thread; - std::unique_ptr connector; - std::unique_ptr media; - State state; - webrtc::RepeatingTaskHandle repeatable; - int64_t last_recv_time; - int64_t last_send_time; - const bool is_outgoing; - const size_t init_timeout; - const size_t reconnect_timeout; - bool local_datasaving; - bool final_datasaving; - //message::NetworkType local_network_type; - //message::NetworkType final_network_type; - - void PacketReceived(const rtc::CopyOnWriteBuffer &); - void WriteableStateChanged(bool); - void CandidatesGathered(const std::vector &); - void SetFail(); - void Play(const int16_t *data, size_t size); - void Record(int16_t *data, size_t size); - void SendRtp(rtc::CopyOnWriteBuffer packet); - //void UpdateNetworkParams(const message::RtpStream& rtp); -}; - - -#endif //DEMO_CONTROLLER_H diff --git a/submodules/TgVoipWebrtc/Impl/Controller.mm b/submodules/TgVoipWebrtc/Impl/Controller.mm deleted file mode 100644 index 8a1a0a1c3f..0000000000 --- a/submodules/TgVoipWebrtc/Impl/Controller.mm +++ /dev/null @@ -1,126 +0,0 @@ -#include "Controller.h" - -#include "modules/rtp_rtcp/source/rtp_utility.h" -#include "rtc_base/time_utils.h" -#include "rtc_base/message_handler.h" - -#include - -/*std::map Controller::network_params = { - {message::NetworkType::nGprs, {6, 8, 6, 120, false, false, false}}, - {message::NetworkType::nEdge, {6, 16, 6, 120, false, false, false}}, - {message::NetworkType::n3gOrAbove, {6, 32, 16, 60, false, false, false}}, -}; -MediaEngineWebrtc::NetworkParams Controller::default_network_params = {6, 32, 16, 30, false, false, false}; -MediaEngineWebrtc::NetworkParams Controller::datasaving_network_params = {6, 8, 6, 120, false, false, true};*/ - -Controller::Controller(bool is_outgoing, size_t init_timeout, size_t reconnect_timeout) -: thread(rtc::Thread::Create()) -, connector(std::make_unique(is_outgoing)) -, state(State::Starting) -, last_recv_time(rtc::TimeMillis()) -, last_send_time(rtc::TimeMillis()) -, is_outgoing(is_outgoing) -, init_timeout(init_timeout * 1000) -, reconnect_timeout(reconnect_timeout * 1000) -, local_datasaving(false) -, final_datasaving(false) -{ - connector->SignalReadyToSendStateChanged.connect(this, &Controller::WriteableStateChanged); - connector->SignalPacketReceived.connect(this, &Controller::PacketReceived); - connector->SignalCandidatesGathered.connect(this, &Controller::CandidatesGathered); - thread->Start(); - - thread->Invoke(RTC_FROM_HERE, [this, is_outgoing]() { - media.reset(new MediaEngineWebrtc(is_outgoing)); - media->Send.connect(this, &Controller::SendRtp); - }); -} - -Controller::~Controller() { - thread->Invoke(RTC_FROM_HERE, [this]() { - media = nullptr; - connector = nullptr; - }); -} - -void Controller::Start() { - last_recv_time = rtc::TimeMillis(); - connector->Start(); -} - -void Controller::PacketReceived(const rtc::CopyOnWriteBuffer &data) { - thread->PostTask(RTC_FROM_HERE, [this, data]() { - if (media) { - media->Receive(data); - } - }); -} - -void Controller::WriteableStateChanged(bool isWriteable) { - if (isWriteable) { - SignalNewState(State::Established); - } else { - SignalNewState(State::Reconnecting); - } - thread->PostTask(RTC_FROM_HERE, [this, isWriteable]() { - if (media) { - media->SetCanSendPackets(isWriteable); - } - }); -} - -void Controller::SendRtp(rtc::CopyOnWriteBuffer packet) { - connector->SendPacket(packet); -} - -/*void Controller::UpdateNetworkParams(const message::RtpStream& rtp) { - bool new_datasaving = local_datasaving || rtp.data_saving; - if (!new_datasaving) { - final_datasaving = false; - message::NetworkType new_network_type = std::min(local_network_type, rtp.network_type); - if (new_network_type != final_network_type) { - final_network_type = new_network_type; - auto it = network_params.find(rtp.network_type); - if (it == network_params.end()) - media->SetNetworkParams(default_network_params); - else - media->SetNetworkParams(it->second); - } - } else if (new_datasaving != final_datasaving) { - final_datasaving = true; - media->SetNetworkParams(datasaving_network_params); - } -}*/ - -void Controller::AttachVideoView(rtc::VideoSinkInterface *sink) { - thread->PostTask(RTC_FROM_HERE, [this, sink]() { - media->AttachVideoView(sink); - }); -} - -/*void Controller::SetNetworkType(message::NetworkType network_type) { - local_network_type = network_type; -}*/ - -void Controller::SetDataSaving(bool data_saving) { - local_datasaving = data_saving; -} - -void Controller::SetMute(bool mute) { - thread->Invoke(RTC_FROM_HERE, [this, mute]() { - if (media) - media->SetMute(mute); - }); -} - -void Controller::SetProxy(rtc::ProxyType type, const rtc::SocketAddress& addr, const std::string& username, const std::string& password) { -} - -void Controller::CandidatesGathered(const std::vector &candidates) { - SignalCandidatesGathered(candidates); -} - -void Controller::AddRemoteCandidates(const std::vector &candidates) { - connector->AddRemoteCandidates(candidates); -} diff --git a/submodules/TgVoipWebrtc/Impl/Manager.cpp b/submodules/TgVoipWebrtc/Impl/Manager.cpp new file mode 100644 index 0000000000..c7840430e9 --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/Manager.cpp @@ -0,0 +1,134 @@ +#include "Manager.h" + +#ifdef TGVOIP_NAMESPACE +namespace TGVOIP_NAMESPACE { +#endif + +static rtc::Thread *makeNetworkThread() { + static std::unique_ptr value = rtc::Thread::CreateWithSocketServer(); + value->SetName("WebRTC-Network", nullptr); + value->Start(); + return value.get(); +} + + +static rtc::Thread *getNetworkThread() { + static rtc::Thread *value = makeNetworkThread(); + return value; +} + +static rtc::Thread *makeMediaThread() { + static std::unique_ptr value = rtc::Thread::Create(); + value->SetName("WebRTC-Media", nullptr); + value->Start(); + return value.get(); +} + + +static rtc::Thread *getMediaThread() { + static rtc::Thread *value = makeMediaThread(); + return value; +} + +Manager::Manager( + rtc::Thread *thread, + TgVoipEncryptionKey encryptionKey, + bool enableP2P, + std::function stateUpdated, + std::function &)> signalingDataEmitted +) : +_thread(thread), +_encryptionKey(encryptionKey), +_enableP2P(enableP2P), +_stateUpdated(stateUpdated), +_signalingDataEmitted(signalingDataEmitted) { + assert(_thread->IsCurrent()); +} + +Manager::~Manager() { + assert(_thread->IsCurrent()); +} + +void Manager::start() { + auto weakThis = std::weak_ptr(shared_from_this()); + _networkManager.reset(new ThreadLocalObject(getNetworkThread(), [encryptionKey = _encryptionKey, enableP2P = _enableP2P, thread = _thread, weakThis, signalingDataEmitted = _signalingDataEmitted]() { + return new NetworkManager( + getNetworkThread(), + encryptionKey, + enableP2P, + [thread, weakThis](const NetworkManager::State &state) { + thread->PostTask(RTC_FROM_HERE, [weakThis, state]() { + auto strongThis = weakThis.lock(); + if (strongThis == nullptr) { + return; + } + TgVoipState mappedState; + if (state.isReadyToSendData) { + mappedState = TgVoipState::Estabilished; + } else { + mappedState = TgVoipState::Reconnecting; + } + strongThis->_stateUpdated(mappedState); + + strongThis->_mediaManager->perform([state](MediaManager *mediaManager) { + mediaManager->setIsConnected(state.isReadyToSendData); + }); + }); + }, + [thread, weakThis](const rtc::CopyOnWriteBuffer &packet) { + thread->PostTask(RTC_FROM_HERE, [weakThis, packet]() { + auto strongThis = weakThis.lock(); + if (strongThis == nullptr) { + return; + } + strongThis->_mediaManager->perform([packet](MediaManager *mediaManager) { + mediaManager->receivePacket(packet); + }); + }); + }, + [signalingDataEmitted](const std::vector &data) { + signalingDataEmitted(data); + } + ); + })); + bool isOutgoing = _encryptionKey.isOutgoing; + _mediaManager.reset(new ThreadLocalObject(getMediaThread(), [isOutgoing, thread = _thread, weakThis]() { + return new MediaManager( + getMediaThread(), + isOutgoing, + [thread, weakThis](const rtc::CopyOnWriteBuffer &packet) { + thread->PostTask(RTC_FROM_HERE, [weakThis, packet]() { + auto strongThis = weakThis.lock(); + if (strongThis == nullptr) { + return; + } + strongThis->_networkManager->perform([packet](NetworkManager *networkManager) { + networkManager->sendPacket(packet); + }); + }); + } + ); + })); +} + +void Manager::receiveSignalingData(const std::vector &data) { + _networkManager->perform([data](NetworkManager *networkManager) { + networkManager->receiveSignalingData(data); + }); +} + +void Manager::setIncomingVideoOutput(std::shared_ptr> sink) { + _mediaManager->perform([sink](MediaManager *mediaManager) { + mediaManager->setIncomingVideoOutput(sink); + }); +} + +void Manager::setOutgoingVideoOutput(std::shared_ptr> sink) { + _mediaManager->perform([sink](MediaManager *mediaManager) { + mediaManager->setOutgoingVideoOutput(sink); + }); +} + +#ifdef TGVOIP_NAMESPACE +} +#endif diff --git a/submodules/TgVoipWebrtc/Impl/Manager.h b/submodules/TgVoipWebrtc/Impl/Manager.h new file mode 100644 index 0000000000..cb4e637b70 --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/Manager.h @@ -0,0 +1,45 @@ +#ifndef TGVOIP_WEBRTC_MANAGER_H +#define TGVOIP_WEBRTC_MANAGER_H + +#include "ThreadLocalObject.h" +#include "NetworkManager.h" +#include "MediaManager.h" +#include "TgVoip.h" + +#ifdef TGVOIP_NAMESPACE +namespace TGVOIP_NAMESPACE { +#endif + +class Manager : public std::enable_shared_from_this { +public: + Manager( + rtc::Thread *thread, + TgVoipEncryptionKey encryptionKey, + bool enableP2P, + std::function stateUpdated, + std::function &)> signalingDataEmitted + ); + ~Manager(); + + void start(); + void receiveSignalingData(const std::vector &data); + void setIncomingVideoOutput(std::shared_ptr> sink); + void setOutgoingVideoOutput(std::shared_ptr> sink); + +private: + rtc::Thread *_thread; + TgVoipEncryptionKey _encryptionKey; + bool _enableP2P; + std::function _stateUpdated; + std::function &)> _signalingDataEmitted; + std::unique_ptr> _networkManager; + std::unique_ptr> _mediaManager; + +private: +}; + +#ifdef TGVOIP_NAMESPACE +} +#endif + +#endif diff --git a/submodules/TgVoipWebrtc/Impl/MediaEngineBase.h b/submodules/TgVoipWebrtc/Impl/MediaEngineBase.h deleted file mode 100644 index 1c2eda6c1c..0000000000 --- a/submodules/TgVoipWebrtc/Impl/MediaEngineBase.h +++ /dev/null @@ -1,19 +0,0 @@ -#ifndef DEMO_MEDIAENGINEBASE_H -#define DEMO_MEDIAENGINEBASE_H - - -#include "rtc_base/copy_on_write_buffer.h" -#include "rtc_base/third_party/sigslot/sigslot.h" - -#include - -class MediaEngineBase { -public: - MediaEngineBase() = default; - virtual ~MediaEngineBase() = default; - - - virtual void Receive(rtc::CopyOnWriteBuffer) = 0; -}; - -#endif //DEMO_MEDIAENGINEBASE_H diff --git a/submodules/TgVoipWebrtc/Impl/MediaEngineWebrtc.h b/submodules/TgVoipWebrtc/Impl/MediaEngineWebrtc.h deleted file mode 100644 index 206717dd9a..0000000000 --- a/submodules/TgVoipWebrtc/Impl/MediaEngineWebrtc.h +++ /dev/null @@ -1,76 +0,0 @@ -#ifndef DEMO_MEDIAENGINEWEBRTC_H -#define DEMO_MEDIAENGINEWEBRTC_H - -#include "rtc_base/copy_on_write_buffer.h" -#include "rtc_base/third_party/sigslot/sigslot.h" - -#include "api/transport/field_trial_based_config.h" -#include "call/call.h" -#include "media/base/media_engine.h" -#include "pc/rtp_sender.h" -#include "rtc_base/task_queue.h" - -#include - -#import "VideoCameraCapturer.h" -#import "VideoMetalView.h" - -class MediaEngineWebrtc : public sigslot::has_slots<> { -public: - struct NetworkParams { - uint8_t min_bitrate_kbps; - uint8_t max_bitrate_kbps; - uint8_t start_bitrate_kbps; - uint8_t ptime_ms; - bool echo_cancellation; - bool auto_gain_control; - bool noise_suppression; - }; - - explicit MediaEngineWebrtc(bool outgoing); - ~MediaEngineWebrtc(); - void Receive(rtc::CopyOnWriteBuffer); - void OnSentPacket(const rtc::SentPacket& sent_packet); - void SetNetworkParams(const NetworkParams& params); - void SetMute(bool mute); - void SetCanSendPackets(bool); - void AttachVideoView(rtc::VideoSinkInterface *sink); - - sigslot::signal1 Send; - -private: - class Sender final : public cricket::MediaChannel::NetworkInterface { - public: - explicit Sender(MediaEngineWebrtc &engine, bool isVideo); - bool SendPacket(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) override; - bool SendRtcp(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) override; - int SetOption(SocketType type, rtc::Socket::Option opt, int option) override; - - private: - MediaEngineWebrtc &engine; - bool isVideo; - }; - - const uint32_t ssrc_send; - const uint32_t ssrc_recv; - const uint32_t ssrc_send_video; - const uint32_t ssrc_recv_video; - std::unique_ptr call; - std::unique_ptr media_engine; - std::unique_ptr event_log; - std::unique_ptr task_queue_factory; - webrtc::FieldTrialBasedConfig field_trials; - webrtc::LocalAudioSinkAdapter audio_source; - Sender audio_sender; - Sender video_sender; - std::unique_ptr voice_channel; - std::unique_ptr video_channel; - std::unique_ptr video_bitrate_allocator_factory; - std::unique_ptr signaling_thread; - std::unique_ptr worker_thread; - rtc::scoped_refptr _nativeVideoSource; - VideoCameraCapturer *_videoCapturer; -}; - - -#endif //DEMO_MEDIAENGINEWEBRTC_H diff --git a/submodules/TgVoipWebrtc/Impl/MediaEngineWebrtc.mm b/submodules/TgVoipWebrtc/Impl/MediaEngineWebrtc.mm deleted file mode 100644 index 90775e8011..0000000000 --- a/submodules/TgVoipWebrtc/Impl/MediaEngineWebrtc.mm +++ /dev/null @@ -1,425 +0,0 @@ -#include "MediaEngineWebrtc.h" - -#include "absl/strings/match.h" -#include "api/audio_codecs/audio_decoder_factory_template.h" -#include "api/audio_codecs/audio_encoder_factory_template.h" -#include "api/audio_codecs/opus/audio_decoder_opus.h" -#include "api/audio_codecs/opus/audio_encoder_opus.h" -#include "api/rtp_parameters.h" -#include "api/task_queue/default_task_queue_factory.h" -#include "media/base/codec.h" -#include "media/base/media_constants.h" -#include "media/engine/webrtc_media_engine.h" -#include "modules/audio_device/include/audio_device_default.h" -#include "rtc_base/task_utils/repeating_task.h" -#include "system_wrappers/include/field_trial.h" -#include "api/video/builtin_video_bitrate_allocator_factory.h" -#include "api/video/video_bitrate_allocation.h" - -#include "sdk/objc/components/video_codec/RTCVideoEncoderFactoryH264.h" -#include "sdk/objc/components/video_codec/RTCVideoDecoderFactoryH264.h" -#include "sdk/objc/components/video_codec/RTCDefaultVideoEncoderFactory.h" -#include "sdk/objc/components/video_codec/RTCDefaultVideoDecoderFactory.h" -#include "sdk/objc/native/api/video_encoder_factory.h" -#include "sdk/objc/native/api/video_decoder_factory.h" - -#include "sdk/objc/native/src/objc_video_track_source.h" -#include "api/video_track_source_proxy.h" -#include "sdk/objc/api/RTCVideoRendererAdapter.h" -#include "sdk/objc/native/api/video_frame.h" -#include "api/media_types.h" - -namespace { -const size_t frame_samples = 480; -const uint8_t channels = 1; -const uint8_t sample_bytes = 2; -const uint32_t clockrate = 48000; -const uint16_t sdp_payload = 111; -const char* sdp_name = "opus"; -const uint8_t sdp_channels = 2; -const uint32_t sdp_bitrate = 0; -const uint32_t caller_ssrc = 1; -const uint32_t called_ssrc = 2; -const uint32_t caller_ssrc_video = 3; -const uint32_t called_ssrc_video = 4; -const int extension_sequence = 1; -const int extension_sequence_video = 1; -} - -static void AddDefaultFeedbackParams(cricket::VideoCodec* codec) { - // Don't add any feedback params for RED and ULPFEC. - if (codec->name == cricket::kRedCodecName || codec->name == cricket::kUlpfecCodecName) - return; - codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamRemb, cricket::kParamValueEmpty)); - codec->AddFeedbackParam( - cricket::FeedbackParam(cricket::kRtcpFbParamTransportCc, cricket::kParamValueEmpty)); - // Don't add any more feedback params for FLEXFEC. - if (codec->name == cricket::kFlexfecCodecName) - return; - codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamCcm, cricket::kRtcpFbCcmParamFir)); - codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack, cricket::kParamValueEmpty)); - codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack, cricket::kRtcpFbNackParamPli)); - if (codec->name == cricket::kVp8CodecName && - webrtc::field_trial::IsEnabled("WebRTC-RtcpLossNotification")) { - codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamLntf, cricket::kParamValueEmpty)); - } -} - -static std::vector AssignPayloadTypesAndDefaultCodecs(std::vector input_formats, int32_t &outCodecId) { - if (input_formats.empty()) - return std::vector(); - static const int kFirstDynamicPayloadType = 96; - static const int kLastDynamicPayloadType = 127; - int payload_type = kFirstDynamicPayloadType; - - input_formats.push_back(webrtc::SdpVideoFormat(cricket::kRedCodecName)); - input_formats.push_back(webrtc::SdpVideoFormat(cricket::kUlpfecCodecName)); - - /*if (IsFlexfecAdvertisedFieldTrialEnabled()) { - webrtc::SdpVideoFormat flexfec_format(kFlexfecCodecName); - // This value is currently arbitrarily set to 10 seconds. (The unit - // is microseconds.) This parameter MUST be present in the SDP, but - // we never use the actual value anywhere in our code however. - // TODO(brandtr): Consider honouring this value in the sender and receiver. - flexfec_format.parameters = {{kFlexfecFmtpRepairWindow, "10000000"}}; - input_formats.push_back(flexfec_format); - }*/ - - bool found = false; - bool useVP9 = true; - - std::vector output_codecs; - for (const webrtc::SdpVideoFormat& format : input_formats) { - cricket::VideoCodec codec(format); - codec.id = payload_type; - AddDefaultFeedbackParams(&codec); - output_codecs.push_back(codec); - - if (useVP9 && codec.name == cricket::kVp9CodecName) { - if (!found) { - outCodecId = codec.id; - found = true; - } - } - if (!useVP9 && codec.name == cricket::kH264CodecName) { - if (!found) { - outCodecId = codec.id; - found = true; - } - } - - // Increment payload type. - ++payload_type; - if (payload_type > kLastDynamicPayloadType) { - RTC_LOG(LS_ERROR) << "Out of dynamic payload types, skipping the rest."; - break; - } - - // Add associated RTX codec for non-FEC codecs. - if (!absl::EqualsIgnoreCase(codec.name, cricket::kUlpfecCodecName) && - !absl::EqualsIgnoreCase(codec.name, cricket::kFlexfecCodecName)) { - output_codecs.push_back( - cricket::VideoCodec::CreateRtxCodec(payload_type, codec.id)); - - // Increment payload type. - ++payload_type; - if (payload_type > kLastDynamicPayloadType) { - RTC_LOG(LS_ERROR) << "Out of dynamic payload types, skipping the rest."; - break; - } - } - } - return output_codecs; -} - -MediaEngineWebrtc::MediaEngineWebrtc(bool outgoing) -: ssrc_send(outgoing ? caller_ssrc : called_ssrc) -, ssrc_recv(outgoing ? called_ssrc : caller_ssrc) -, ssrc_send_video(outgoing ? caller_ssrc_video : called_ssrc_video) -, ssrc_recv_video(outgoing ? called_ssrc_video : caller_ssrc_video) -, event_log(std::make_unique()) -, task_queue_factory(webrtc::CreateDefaultTaskQueueFactory()) -, audio_sender(*this, false) -, video_sender(*this, true) -, signaling_thread(rtc::Thread::Create()) -, worker_thread(rtc::Thread::Create()) { - signaling_thread->Start(); - worker_thread->Start(); - - webrtc::field_trial::InitFieldTrialsFromString( - "WebRTC-Audio-SendSideBwe/Enabled/" - "WebRTC-Audio-Allocation/min:6kbps,max:32kbps/" - "WebRTC-Audio-OpusMinPacketLossRate/Enabled-1/" - ); - video_bitrate_allocator_factory = webrtc::CreateBuiltinVideoBitrateAllocatorFactory(); - cricket::MediaEngineDependencies media_deps; - media_deps.task_queue_factory = task_queue_factory.get(); - media_deps.audio_encoder_factory = webrtc::CreateAudioEncoderFactory(); - media_deps.audio_decoder_factory = webrtc::CreateAudioDecoderFactory(); - - //auto video_encoder_factory = webrtc::ObjCToNativeVideoEncoderFactory([[RTCVideoEncoderFactoryH264 alloc] init]); - auto video_encoder_factory = webrtc::ObjCToNativeVideoEncoderFactory([[RTCDefaultVideoEncoderFactory alloc] init]); - int32_t outCodecId = 96; - std::vector videoCodecs = AssignPayloadTypesAndDefaultCodecs(video_encoder_factory->GetSupportedFormats(), outCodecId); - - media_deps.video_encoder_factory = webrtc::ObjCToNativeVideoEncoderFactory([[RTCDefaultVideoEncoderFactory alloc] init]); - media_deps.video_decoder_factory = webrtc::ObjCToNativeVideoDecoderFactory([[RTCDefaultVideoDecoderFactory alloc] init]); - - media_deps.audio_processing = webrtc::AudioProcessingBuilder().Create(); - media_engine = cricket::CreateMediaEngine(std::move(media_deps)); - media_engine->Init(); - webrtc::Call::Config call_config(event_log.get()); - call_config.task_queue_factory = task_queue_factory.get(); - call_config.trials = &field_trials; - call_config.audio_state = media_engine->voice().GetAudioState(); - call.reset(webrtc::Call::Create(call_config)); - voice_channel.reset(media_engine->voice().CreateMediaChannel( - call.get(), cricket::MediaConfig(), cricket::AudioOptions(), webrtc::CryptoOptions::NoGcm())); - video_channel.reset(media_engine->video().CreateMediaChannel(call.get(), cricket::MediaConfig(), cricket::VideoOptions(), webrtc::CryptoOptions::NoGcm(), video_bitrate_allocator_factory.get())); - - if (true) { - voice_channel->AddSendStream(cricket::StreamParams::CreateLegacy(ssrc_send)); - SetNetworkParams({6, 32, 6, 120, false, false, false}); - SetMute(false); - voice_channel->SetInterface(&audio_sender, webrtc::MediaTransportConfig()); - } - - if (true) { - video_channel->AddSendStream(cricket::StreamParams::CreateLegacy(ssrc_send_video)); - - for (auto codec : videoCodecs) { - if (codec.id == outCodecId) { - rtc::scoped_refptr objCVideoTrackSource(new rtc::RefCountedObject()); - _nativeVideoSource = webrtc::VideoTrackSourceProxy::Create(signaling_thread.get(), worker_thread.get(), objCVideoTrackSource); - - codec.SetParam(cricket::kCodecParamMinBitrate, 64); - codec.SetParam(cricket::kCodecParamStartBitrate, 256); - codec.SetParam(cricket::kCodecParamMaxBitrate, 2500); - - dispatch_async(dispatch_get_main_queue(), ^{ -#if TARGET_IPHONE_SIMULATOR -#else - _videoCapturer = [[VideoCameraCapturer alloc] initWithSource:_nativeVideoSource]; - - AVCaptureDevice *frontCamera = nil; - for (AVCaptureDevice *device in [VideoCameraCapturer captureDevices]) { - if (device.position == AVCaptureDevicePositionFront) { - frontCamera = device; - break; - } - } - - if (frontCamera == nil) { - assert(false); - return; - } - - NSArray *sortedFormats = [[VideoCameraCapturer supportedFormatsForDevice:frontCamera] sortedArrayUsingComparator:^NSComparisonResult(AVCaptureDeviceFormat* lhs, AVCaptureDeviceFormat *rhs) { - int32_t width1 = CMVideoFormatDescriptionGetDimensions(lhs.formatDescription).width; - int32_t width2 = CMVideoFormatDescriptionGetDimensions(rhs.formatDescription).width; - return width1 < width2 ? NSOrderedAscending : NSOrderedDescending; - }]; - - AVCaptureDeviceFormat *bestFormat = nil; - for (AVCaptureDeviceFormat *format in sortedFormats) { - CMVideoDimensions dimensions = CMVideoFormatDescriptionGetDimensions(format.formatDescription); - if (dimensions.width >= 1000 || dimensions.height >= 1000) { - bestFormat = format; - break; - } - } - - if (bestFormat == nil) { - assert(false); - return; - } - - AVFrameRateRange *frameRateRange = [[bestFormat.videoSupportedFrameRateRanges sortedArrayUsingComparator:^NSComparisonResult(AVFrameRateRange *lhs, AVFrameRateRange *rhs) { - if (lhs.maxFrameRate < rhs.maxFrameRate) { - return NSOrderedAscending; - } else { - return NSOrderedDescending; - } - }] lastObject]; - - if (frameRateRange == nil) { - assert(false); - return; - } - - [_videoCapturer startCaptureWithDevice:frontCamera format:bestFormat fps:27]; -#endif - }); - - cricket::VideoSendParameters send_parameters; - send_parameters.codecs.push_back(codec); - send_parameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extension_sequence_video); - //send_parameters.options.echo_cancellation = params.echo_cancellation; - //send_parameters.options.noise_suppression = params.noise_suppression; - //send_parameters.options.auto_gain_control = params.auto_gain_control; - //send_parameters.options.highpass_filter = false; - //send_parameters.options.typing_detection = false; - //send_parameters.max_bandwidth_bps = 800000; - //send_parameters.rtcp.reduced_size = true; - send_parameters.rtcp.remote_estimate = true; - video_channel->SetSendParameters(send_parameters); - - video_channel->SetVideoSend(ssrc_send_video, NULL, _nativeVideoSource.get()); - - video_channel->SetInterface(&video_sender, webrtc::MediaTransportConfig()); - - break; - } - } - } - if (true) { - cricket::AudioRecvParameters recv_parameters; - recv_parameters.codecs.emplace_back(sdp_payload, sdp_name, clockrate, sdp_bitrate, sdp_channels); - recv_parameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extension_sequence); - recv_parameters.rtcp.reduced_size = true; - recv_parameters.rtcp.remote_estimate = true; - voice_channel->AddRecvStream(cricket::StreamParams::CreateLegacy(ssrc_recv)); - voice_channel->SetRecvParameters(recv_parameters); - voice_channel->SetPlayout(true); - } - if (true) { - for (auto codec : videoCodecs) { - if (codec.id == outCodecId) { - codec.SetParam(cricket::kCodecParamMinBitrate, 32); - codec.SetParam(cricket::kCodecParamStartBitrate, 300); - codec.SetParam(cricket::kCodecParamMaxBitrate, 1000); - - cricket::VideoRecvParameters recv_parameters; - recv_parameters.codecs.emplace_back(codec); - recv_parameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extension_sequence_video); - //recv_parameters.rtcp.reduced_size = true; - recv_parameters.rtcp.remote_estimate = true; - video_channel->AddRecvStream(cricket::StreamParams::CreateLegacy(ssrc_recv_video)); - video_channel->SetRecvParameters(recv_parameters); - - break; - } - } - } -} - -MediaEngineWebrtc::~MediaEngineWebrtc() { - [_videoCapturer stopCapture]; - video_channel->SetSink(ssrc_recv_video, nullptr); - video_channel->RemoveSendStream(ssrc_send_video); - video_channel->RemoveRecvStream(ssrc_recv_video); - - voice_channel->SetPlayout(false); - voice_channel->RemoveSendStream(ssrc_send); - voice_channel->RemoveRecvStream(ssrc_recv); -}; - -void MediaEngineWebrtc::Receive(rtc::CopyOnWriteBuffer packet) { - if (packet.size() < 1) { - return; - } - - uint8_t header = ((uint8_t *)packet.data())[0]; - rtc::CopyOnWriteBuffer unwrappedPacket = packet.Slice(1, packet.size() - 1); - - if (header == 0xba) { - if (voice_channel) { - voice_channel->OnPacketReceived(unwrappedPacket, -1); - } - } else if (header == 0xbf) { - if (video_channel) { - video_channel->OnPacketReceived(unwrappedPacket, -1); - } - } else { - printf("----- Unknown packet header"); - } -} - -void MediaEngineWebrtc::OnSentPacket(const rtc::SentPacket& sent_packet) { - call->OnSentPacket(sent_packet); -} - -void MediaEngineWebrtc::SetNetworkParams(const MediaEngineWebrtc::NetworkParams& params) { - cricket::AudioCodec opus_codec(sdp_payload, sdp_name, clockrate, sdp_bitrate, sdp_channels); - opus_codec.AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamTransportCc)); - opus_codec.SetParam(cricket::kCodecParamMinBitrate, params.min_bitrate_kbps); - opus_codec.SetParam(cricket::kCodecParamStartBitrate, params.start_bitrate_kbps); - opus_codec.SetParam(cricket::kCodecParamMaxBitrate, params.max_bitrate_kbps); - opus_codec.SetParam(cricket::kCodecParamUseInbandFec, 1); - opus_codec.SetParam(cricket::kCodecParamPTime, params.ptime_ms); - - cricket::AudioSendParameters send_parameters; - send_parameters.codecs.push_back(opus_codec); - send_parameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extension_sequence); - send_parameters.options.echo_cancellation = params.echo_cancellation; -// send_parameters.options.experimental_ns = false; - send_parameters.options.noise_suppression = params.noise_suppression; - send_parameters.options.auto_gain_control = params.auto_gain_control; - send_parameters.options.highpass_filter = false; - send_parameters.options.typing_detection = false; -// send_parameters.max_bandwidth_bps = 16000; - send_parameters.rtcp.reduced_size = true; - send_parameters.rtcp.remote_estimate = true; - voice_channel->SetSendParameters(send_parameters); -} - -void MediaEngineWebrtc::SetMute(bool mute) { - -} - -void MediaEngineWebrtc::SetCanSendPackets(bool canSendPackets) { - if (canSendPackets) { - call->SignalChannelNetworkState(webrtc::MediaType::AUDIO, webrtc::kNetworkUp); - call->SignalChannelNetworkState(webrtc::MediaType::VIDEO, webrtc::kNetworkUp); - } else { - call->SignalChannelNetworkState(webrtc::MediaType::AUDIO, webrtc::kNetworkDown); - call->SignalChannelNetworkState(webrtc::MediaType::VIDEO, webrtc::kNetworkDown); - } - if (voice_channel) { - voice_channel->OnReadyToSend(canSendPackets); - voice_channel->SetSend(canSendPackets); - voice_channel->SetAudioSend(ssrc_send, true, nullptr, &audio_source); - } - if (video_channel) { - video_channel->OnReadyToSend(canSendPackets); - video_channel->SetSend(canSendPackets); - } -} - -void MediaEngineWebrtc::AttachVideoView(rtc::VideoSinkInterface *sink) { - video_channel->SetSink(ssrc_recv_video, sink); -} - -bool MediaEngineWebrtc::Sender::SendPacket(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) { - rtc::CopyOnWriteBuffer wrappedPacket; - uint8_t header = isVideo ? 0xbf : 0xba; - wrappedPacket.AppendData(&header, 1); - wrappedPacket.AppendData(*packet); - - engine.Send(wrappedPacket); - rtc::SentPacket sent_packet(options.packet_id, rtc::TimeMillis(), options.info_signaled_after_sent); - engine.OnSentPacket(sent_packet); - return true; -} - -bool MediaEngineWebrtc::Sender::SendRtcp(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) { - rtc::CopyOnWriteBuffer wrappedPacket; - uint8_t header = isVideo ? 0xbf : 0xba; - wrappedPacket.AppendData(&header, 1); - wrappedPacket.AppendData(*packet); - - engine.Send(wrappedPacket); - rtc::SentPacket sent_packet(options.packet_id, rtc::TimeMillis(), options.info_signaled_after_sent); - engine.OnSentPacket(sent_packet); - return true; -} - -int MediaEngineWebrtc::Sender::SetOption(cricket::MediaChannel::NetworkInterface::SocketType, rtc::Socket::Option, int) { - return -1; // in general, the result is not important yet -} - -MediaEngineWebrtc::Sender::Sender(MediaEngineWebrtc &engine, bool isVideo) : -engine(engine), -isVideo(isVideo) { - -} diff --git a/submodules/TgVoipWebrtc/Impl/MediaManager.cpp b/submodules/TgVoipWebrtc/Impl/MediaManager.cpp new file mode 100644 index 0000000000..ea606218d3 --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/MediaManager.cpp @@ -0,0 +1,434 @@ +#include "MediaManager.h" + +#include "absl/strings/match.h" +#include "api/audio_codecs/audio_decoder_factory_template.h" +#include "api/audio_codecs/audio_encoder_factory_template.h" +#include "api/audio_codecs/opus/audio_decoder_opus.h" +#include "api/audio_codecs/opus/audio_encoder_opus.h" +#include "api/rtp_parameters.h" +#include "api/task_queue/default_task_queue_factory.h" +#include "media/base/codec.h" +#include "media/base/media_constants.h" +#include "media/engine/webrtc_media_engine.h" +#include "modules/audio_device/include/audio_device_default.h" +#include "rtc_base/task_utils/repeating_task.h" +#include "system_wrappers/include/field_trial.h" +#include "api/video/builtin_video_bitrate_allocator_factory.h" +#include "api/video/video_bitrate_allocation.h" +#include "call/call.h" + +#if TARGET_OS_IPHONE + +#include "CodecsApple.h" + +#else +#error "Unsupported platform" +#endif + +#ifdef TGVOIP_NAMESPACE +namespace TGVOIP_NAMESPACE { +#endif + +static const uint32_t ssrcAudioIncoming = 1; +static const uint32_t ssrcAudioOutgoing = 2; +static const uint32_t ssrcAudioFecIncoming = 5; +static const uint32_t ssrcAudioFecOutgoing = 6; +static const uint32_t ssrcVideoIncoming = 3; +static const uint32_t ssrcVideoOutgoing = 4; +static const uint32_t ssrcVideoFecIncoming = 7; +static const uint32_t ssrcVideoFecOutgoing = 8; + +static void AddDefaultFeedbackParams(cricket::VideoCodec *codec) { + // Don't add any feedback params for RED and ULPFEC. + if (codec->name == cricket::kRedCodecName || codec->name == cricket::kUlpfecCodecName) + return; + codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamRemb, cricket::kParamValueEmpty)); + codec->AddFeedbackParam( + cricket::FeedbackParam(cricket::kRtcpFbParamTransportCc, cricket::kParamValueEmpty)); + // Don't add any more feedback params for FLEXFEC. + if (codec->name == cricket::kFlexfecCodecName) + return; + codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamCcm, cricket::kRtcpFbCcmParamFir)); + codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack, cricket::kParamValueEmpty)); + codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamNack, cricket::kRtcpFbNackParamPli)); + if (codec->name == cricket::kVp8CodecName && + webrtc::field_trial::IsEnabled("WebRTC-RtcpLossNotification")) { + codec->AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamLntf, cricket::kParamValueEmpty)); + } +} + +static std::vector AssignPayloadTypesAndDefaultCodecs(std::vector input_formats) { + if (input_formats.empty()) + return std::vector(); + static const int kFirstDynamicPayloadType = 96; + static const int kLastDynamicPayloadType = 127; + int payload_type = kFirstDynamicPayloadType; + + input_formats.push_back(webrtc::SdpVideoFormat(cricket::kRedCodecName)); + input_formats.push_back(webrtc::SdpVideoFormat(cricket::kUlpfecCodecName)); + + if (true) { + webrtc::SdpVideoFormat flexfec_format(cricket::kFlexfecCodecName); + // This value is currently arbitrarily set to 10 seconds. (The unit + // is microseconds.) This parameter MUST be present in the SDP, but + // we never use the actual value anywhere in our code however. + // TODO(brandtr): Consider honouring this value in the sender and receiver. + flexfec_format.parameters = {{cricket::kFlexfecFmtpRepairWindow, "10000000"}}; + input_formats.push_back(flexfec_format); + } + + std::vector output_codecs; + for (const webrtc::SdpVideoFormat& format : input_formats) { + cricket::VideoCodec codec(format); + codec.id = payload_type; + AddDefaultFeedbackParams(&codec); + output_codecs.push_back(codec); + + // Increment payload type. + ++payload_type; + if (payload_type > kLastDynamicPayloadType) { + RTC_LOG(LS_ERROR) << "Out of dynamic payload types, skipping the rest."; + break; + } + + // Add associated RTX codec for non-FEC codecs. + if (!absl::EqualsIgnoreCase(codec.name, cricket::kUlpfecCodecName) && + !absl::EqualsIgnoreCase(codec.name, cricket::kFlexfecCodecName)) { + output_codecs.push_back( + cricket::VideoCodec::CreateRtxCodec(payload_type, codec.id)); + + // Increment payload type. + ++payload_type; + if (payload_type > kLastDynamicPayloadType) { + RTC_LOG(LS_ERROR) << "Out of dynamic payload types, skipping the rest."; + break; + } + } + } + return output_codecs; +} + +static absl::optional selectVideoCodec(std::vector &codecs) { + bool useVP9 = false; + bool useH265 = true; + + for (auto &codec : codecs) { + if (useVP9) { + if (codec.name == cricket::kVp9CodecName) { + return absl::optional(codec); + } + } else if (useH265) { + if (codec.name == cricket::kH265CodecName) { + return absl::optional(codec); + } + } else { + if (codec.name == cricket::kH264CodecName) { + return absl::optional(codec); + } + } + } + + return absl::optional(); +} + +static rtc::Thread *makeWorkerThread() { + static std::unique_ptr value = rtc::Thread::Create(); + value->SetName("WebRTC-Worker", nullptr); + value->Start(); + return value.get(); +} + + +static rtc::Thread *getWorkerThread() { + static rtc::Thread *value = makeWorkerThread(); + return value; +} + +MediaManager::MediaManager( + rtc::Thread *thread, + bool isOutgoing, + std::function packetEmitted +) : +_packetEmitted(packetEmitted), +_thread(thread), +_eventLog(std::make_unique()), +_taskQueueFactory(webrtc::CreateDefaultTaskQueueFactory()) { + _ssrcAudio.incoming = isOutgoing ? ssrcAudioIncoming : ssrcAudioOutgoing; + _ssrcAudio.outgoing = (!isOutgoing) ? ssrcAudioIncoming : ssrcAudioOutgoing; + _ssrcAudio.fecIncoming = isOutgoing ? ssrcAudioFecIncoming : ssrcAudioFecOutgoing; + _ssrcAudio.fecOutgoing = (!isOutgoing) ? ssrcAudioFecIncoming : ssrcAudioFecOutgoing; + _ssrcVideo.incoming = isOutgoing ? ssrcVideoIncoming : ssrcVideoOutgoing; + _ssrcVideo.outgoing = (!isOutgoing) ? ssrcVideoIncoming : ssrcVideoOutgoing; + _ssrcVideo.fecIncoming = isOutgoing ? ssrcVideoFecIncoming : ssrcVideoFecOutgoing; + _ssrcVideo.fecOutgoing = (!isOutgoing) ? ssrcVideoFecIncoming : ssrcVideoFecOutgoing; + + _audioNetworkInterface = std::unique_ptr(new MediaManager::NetworkInterfaceImpl(this, false)); + _videoNetworkInterface = std::unique_ptr(new MediaManager::NetworkInterfaceImpl(this, true)); + + webrtc::field_trial::InitFieldTrialsFromString( + "WebRTC-Audio-SendSideBwe/Enabled/" + "WebRTC-Audio-Allocation/min:6kbps,max:32kbps/" + "WebRTC-Audio-OpusMinPacketLossRate/Enabled-1/" + "WebRTC-FlexFEC-03/Enabled/" + "WebRTC-FlexFEC-03-Advertised/Enabled/" + ); + + configurePlatformAudio(); + + _videoBitrateAllocatorFactory = webrtc::CreateBuiltinVideoBitrateAllocatorFactory(); + + cricket::MediaEngineDependencies mediaDeps; + mediaDeps.task_queue_factory = _taskQueueFactory.get(); + mediaDeps.audio_encoder_factory = webrtc::CreateAudioEncoderFactory(); + mediaDeps.audio_decoder_factory = webrtc::CreateAudioDecoderFactory(); + + auto videoEncoderFactory = makeVideoEncoderFactory(); + std::vector videoCodecs = AssignPayloadTypesAndDefaultCodecs(videoEncoderFactory->GetSupportedFormats()); + + mediaDeps.video_encoder_factory = makeVideoEncoderFactory(); + mediaDeps.video_decoder_factory = makeVideoDecoderFactory(); + + mediaDeps.audio_processing = webrtc::AudioProcessingBuilder().Create(); + _mediaEngine = cricket::CreateMediaEngine(std::move(mediaDeps)); + _mediaEngine->Init(); + webrtc::Call::Config callConfig(_eventLog.get()); + callConfig.task_queue_factory = _taskQueueFactory.get(); + callConfig.trials = &_fieldTrials; + callConfig.audio_state = _mediaEngine->voice().GetAudioState(); + _call.reset(webrtc::Call::Create(callConfig)); + _audioChannel.reset(_mediaEngine->voice().CreateMediaChannel(_call.get(), cricket::MediaConfig(), cricket::AudioOptions(), webrtc::CryptoOptions::NoGcm())); + _videoChannel.reset(_mediaEngine->video().CreateMediaChannel(_call.get(), cricket::MediaConfig(), cricket::VideoOptions(), webrtc::CryptoOptions::NoGcm(), _videoBitrateAllocatorFactory.get())); + + _audioChannel->AddSendStream(cricket::StreamParams::CreateLegacy(_ssrcAudio.outgoing)); + + const uint32_t opusClockrate = 48000; + const uint16_t opusSdpPayload = 111; + const char *opusSdpName = "opus"; + const uint8_t opusSdpChannels = 2; + const uint32_t opusSdpBitrate = 0; + + const uint8_t opusMinBitrateKbps = 6; + const uint8_t opusMaxBitrateKbps = 32; + const uint8_t opusStartBitrateKbps = 6; + const uint8_t opusPTimeMs = 120; + const int extensionSequenceOne = 1; + + cricket::AudioCodec opusCodec(opusSdpPayload, opusSdpName, opusClockrate, opusSdpBitrate, opusSdpChannels); + opusCodec.AddFeedbackParam(cricket::FeedbackParam(cricket::kRtcpFbParamTransportCc)); + opusCodec.SetParam(cricket::kCodecParamMinBitrate, opusMinBitrateKbps); + opusCodec.SetParam(cricket::kCodecParamStartBitrate, opusStartBitrateKbps); + opusCodec.SetParam(cricket::kCodecParamMaxBitrate, opusMaxBitrateKbps); + opusCodec.SetParam(cricket::kCodecParamUseInbandFec, 1); + opusCodec.SetParam(cricket::kCodecParamPTime, opusPTimeMs); + + cricket::AudioSendParameters audioSendPrameters; + audioSendPrameters.codecs.push_back(opusCodec); + audioSendPrameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extensionSequenceOne); + audioSendPrameters.options.echo_cancellation = false; + //audioSendPrameters.options.experimental_ns = false; + audioSendPrameters.options.noise_suppression = false; + audioSendPrameters.options.auto_gain_control = false; + audioSendPrameters.options.highpass_filter = false; + audioSendPrameters.options.typing_detection = false; + //audioSendPrameters.max_bandwidth_bps = 16000; + audioSendPrameters.rtcp.reduced_size = true; + audioSendPrameters.rtcp.remote_estimate = true; + _audioChannel->SetSendParameters(audioSendPrameters); + _audioChannel->SetInterface(_audioNetworkInterface.get(), webrtc::MediaTransportConfig()); + + cricket::AudioRecvParameters audioRecvParameters; + audioRecvParameters.codecs.emplace_back(opusSdpPayload, opusSdpName, opusClockrate, opusSdpBitrate, opusSdpChannels); + audioRecvParameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extensionSequenceOne); + audioRecvParameters.rtcp.reduced_size = true; + audioRecvParameters.rtcp.remote_estimate = true; + + _audioChannel->SetRecvParameters(audioRecvParameters); + _audioChannel->AddRecvStream(cricket::StreamParams::CreateLegacy(_ssrcAudio.incoming)); + _audioChannel->SetPlayout(true); + + cricket::StreamParams videoSendStreamParams; + cricket::SsrcGroup videoSendSsrcGroup(cricket::kFecFrSsrcGroupSemantics, {_ssrcVideo.outgoing, _ssrcVideo.fecOutgoing}); + videoSendStreamParams.ssrcs = {_ssrcVideo.outgoing}; + videoSendStreamParams.ssrc_groups.push_back(videoSendSsrcGroup); + videoSendStreamParams.cname = "cname"; + _videoChannel->AddSendStream(videoSendStreamParams); + + auto videoCodec = selectVideoCodec(videoCodecs); + if (videoCodec.has_value()) { + _nativeVideoSource = makeVideoSource(_thread, getWorkerThread()); + + auto codec = videoCodec.value(); + + codec.SetParam(cricket::kCodecParamMinBitrate, 64); + codec.SetParam(cricket::kCodecParamStartBitrate, 512); + codec.SetParam(cricket::kCodecParamMaxBitrate, 2500); + + _videoCapturer = makeVideoCapturer(_nativeVideoSource); + + cricket::VideoSendParameters videoSendParameters; + videoSendParameters.codecs.push_back(codec); + + for (auto &c : videoCodecs) { + if (c.name == cricket::kFlexfecCodecName) { + videoSendParameters.codecs.push_back(c); + break; + } + } + + videoSendParameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extensionSequenceOne); + //send_parameters.max_bandwidth_bps = 800000; + //send_parameters.rtcp.reduced_size = true; + //videoSendParameters.rtcp.remote_estimate = true; + _videoChannel->SetSendParameters(videoSendParameters); + + _videoChannel->SetVideoSend(_ssrcVideo.outgoing, NULL, _nativeVideoSource.get()); + _videoChannel->SetVideoSend(_ssrcVideo.fecOutgoing, NULL, nullptr); + + _videoChannel->SetInterface(_videoNetworkInterface.get(), webrtc::MediaTransportConfig()); + + cricket::VideoRecvParameters videoRecvParameters; + videoRecvParameters.codecs.emplace_back(codec); + + for (auto &c : videoCodecs) { + if (c.name == cricket::kFlexfecCodecName) { + videoRecvParameters.codecs.push_back(c); + break; + } + } + + videoRecvParameters.extensions.emplace_back(webrtc::RtpExtension::kTransportSequenceNumberUri, extensionSequenceOne); + //recv_parameters.rtcp.reduced_size = true; + videoRecvParameters.rtcp.remote_estimate = true; + + cricket::StreamParams videoRecvStreamParams; + cricket::SsrcGroup videoRecvSsrcGroup(cricket::kFecFrSsrcGroupSemantics, {_ssrcVideo.incoming, _ssrcVideo.fecIncoming}); + videoRecvStreamParams.ssrcs = {_ssrcVideo.incoming}; + videoRecvStreamParams.ssrc_groups.push_back(videoRecvSsrcGroup); + videoRecvStreamParams.cname = "cname"; + + _videoChannel->AddRecvStream(videoRecvStreamParams); + _videoChannel->SetRecvParameters(videoRecvParameters); + + /*webrtc::FlexfecReceiveStream::Config config(_videoNetworkInterface.get()); + config.payload_type = 118; + config.protected_media_ssrcs = {1324234}; + webrtc::FlexfecReceiveStream* stream; + std::list streams;*/ + } +} + +MediaManager::~MediaManager() { + assert(_thread->IsCurrent()); + + _call->SignalChannelNetworkState(webrtc::MediaType::AUDIO, webrtc::kNetworkDown); + _call->SignalChannelNetworkState(webrtc::MediaType::VIDEO, webrtc::kNetworkDown); + + _audioChannel->OnReadyToSend(false); + _audioChannel->SetSend(false); + _audioChannel->SetAudioSend(_ssrcAudio.outgoing, false, nullptr, &_audioSource); + + _audioChannel->SetPlayout(false); + + _audioChannel->RemoveRecvStream(_ssrcAudio.incoming); + _audioChannel->RemoveSendStream(_ssrcAudio.outgoing); + + _audioChannel->SetInterface(nullptr, webrtc::MediaTransportConfig()); + + _videoChannel->RemoveRecvStream(_ssrcVideo.incoming); + _videoChannel->RemoveRecvStream(_ssrcVideo.fecIncoming); + _videoChannel->RemoveSendStream(_ssrcVideo.outgoing); + _videoChannel->RemoveSendStream(_ssrcVideo.fecOutgoing); + + _videoChannel->SetVideoSend(_ssrcVideo.outgoing, NULL, nullptr); + _videoChannel->SetVideoSend(_ssrcVideo.fecOutgoing, NULL, nullptr); + _videoChannel->SetInterface(nullptr, webrtc::MediaTransportConfig()); +} + +void MediaManager::setIsConnected(bool isConnected) { + if (isConnected) { + _call->SignalChannelNetworkState(webrtc::MediaType::AUDIO, webrtc::kNetworkUp); + _call->SignalChannelNetworkState(webrtc::MediaType::VIDEO, webrtc::kNetworkUp); + } else { + _call->SignalChannelNetworkState(webrtc::MediaType::AUDIO, webrtc::kNetworkDown); + _call->SignalChannelNetworkState(webrtc::MediaType::VIDEO, webrtc::kNetworkDown); + } + if (_audioChannel) { + _audioChannel->OnReadyToSend(isConnected); + _audioChannel->SetSend(isConnected); + _audioChannel->SetAudioSend(_ssrcAudio.outgoing, isConnected, nullptr, &_audioSource); + } + if (_videoChannel) { + _videoChannel->OnReadyToSend(isConnected); + _videoChannel->SetSend(isConnected); + } +} + +void MediaManager::receivePacket(const rtc::CopyOnWriteBuffer &packet) { + if (packet.size() < 1) { + return; + } + + uint8_t header = ((uint8_t *)packet.data())[0]; + rtc::CopyOnWriteBuffer unwrappedPacket = packet.Slice(1, packet.size() - 1); + + if (header == 0xba) { + if (_audioChannel) { + _audioChannel->OnPacketReceived(unwrappedPacket, -1); + } + } else if (header == 0xbf) { + if (_videoChannel) { + _videoChannel->OnPacketReceived(unwrappedPacket, -1); + } + } +} + +void MediaManager::notifyPacketSent(const rtc::SentPacket &sentPacket) { + _call->OnSentPacket(sentPacket); +} + +void MediaManager::setIncomingVideoOutput(std::shared_ptr> sink) { + _currentIncomingVideoSink = sink; + _videoChannel->SetSink(_ssrcVideo.incoming, sink.get()); +} + +void MediaManager::setOutgoingVideoOutput(std::shared_ptr> sink) { + _currentOutgoingVideoSink = sink; + _nativeVideoSource->AddOrUpdateSink(sink.get(), rtc::VideoSinkWants()); +} + +MediaManager::NetworkInterfaceImpl::NetworkInterfaceImpl(MediaManager *mediaManager, bool isVideo) : +_mediaManager(mediaManager), +_isVideo(isVideo) { +} + +bool MediaManager::NetworkInterfaceImpl::SendPacket(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) { + rtc::CopyOnWriteBuffer wrappedPacket; + uint8_t header = _isVideo ? 0xbf : 0xba; + wrappedPacket.AppendData(&header, 1); + wrappedPacket.AppendData(*packet); + + _mediaManager->_packetEmitted(wrappedPacket); + rtc::SentPacket sentPacket(options.packet_id, rtc::TimeMillis(), options.info_signaled_after_sent); + _mediaManager->notifyPacketSent(sentPacket); + return true; +} + +bool MediaManager::NetworkInterfaceImpl::SendRtcp(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) { + rtc::CopyOnWriteBuffer wrappedPacket; + uint8_t header = _isVideo ? 0xbf : 0xba; + wrappedPacket.AppendData(&header, 1); + wrappedPacket.AppendData(*packet); + + _mediaManager->_packetEmitted(wrappedPacket); + rtc::SentPacket sentPacket(options.packet_id, rtc::TimeMillis(), options.info_signaled_after_sent); + _mediaManager->notifyPacketSent(sentPacket); + return true; +} + +int MediaManager::NetworkInterfaceImpl::SetOption(cricket::MediaChannel::NetworkInterface::SocketType, rtc::Socket::Option, int) { + return -1; +} + +#ifdef TGVOIP_NAMESPACE +} +#endif diff --git a/submodules/TgVoipWebrtc/Impl/MediaManager.h b/submodules/TgVoipWebrtc/Impl/MediaManager.h new file mode 100644 index 0000000000..7bbb243308 --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/MediaManager.h @@ -0,0 +1,101 @@ +#ifndef TGVOIP_WEBRTC_MEDIA_MANAGER_H +#define TGVOIP_WEBRTC_MEDIA_MANAGER_H + +#include "rtc_base/thread.h" +#include "rtc_base/copy_on_write_buffer.h" +#include "rtc_base/third_party/sigslot/sigslot.h" +#include "api/transport/field_trial_based_config.h" +#include "pc/rtp_sender.h" + +#include +#include + +namespace webrtc { +class Call; +class RtcEventLogNull; +class TaskQueueFactory; +class VideoBitrateAllocatorFactory; +class VideoTrackSourceInterface; +}; + +namespace cricket { +class MediaEngineInterface; +class VoiceMediaChannel; +class VideoMediaChannel; +}; + +#ifdef TGVOIP_NAMESPACE +namespace TGVOIP_NAMESPACE { +#endif + +class VideoCapturerInterface; + +class MediaManager : public sigslot::has_slots<>, public std::enable_shared_from_this { +private: + struct SSRC { + uint32_t incoming; + uint32_t outgoing; + uint32_t fecIncoming; + uint32_t fecOutgoing; + }; + + class NetworkInterfaceImpl : public cricket::MediaChannel::NetworkInterface { + public: + NetworkInterfaceImpl(MediaManager *mediaManager, bool isVideo); + bool SendPacket(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) override; + bool SendRtcp(rtc::CopyOnWriteBuffer *packet, const rtc::PacketOptions& options) override; + int SetOption(SocketType type, rtc::Socket::Option opt, int option) override; + + private: + MediaManager *_mediaManager; + bool _isVideo; + }; + + friend class MediaManager::NetworkInterfaceImpl; + +public: + MediaManager( + rtc::Thread *thread, + bool isOutgoing, + std::function packetEmitted + ); + ~MediaManager(); + + void setIsConnected(bool isConnected); + void receivePacket(const rtc::CopyOnWriteBuffer &packet); + void notifyPacketSent(const rtc::SentPacket &sentPacket); + void setIncomingVideoOutput(std::shared_ptr> sink); + void setOutgoingVideoOutput(std::shared_ptr> sink); + +protected: + std::function _packetEmitted; + +private: + rtc::Thread *_thread; + std::unique_ptr _eventLog; + std::unique_ptr _taskQueueFactory; + + SSRC _ssrcAudio; + SSRC _ssrcVideo; + + std::unique_ptr _mediaEngine; + std::unique_ptr _call; + webrtc::FieldTrialBasedConfig _fieldTrials; + webrtc::LocalAudioSinkAdapter _audioSource; + std::unique_ptr _audioChannel; + std::unique_ptr _videoChannel; + std::unique_ptr _videoBitrateAllocatorFactory; + rtc::scoped_refptr _nativeVideoSource; + std::unique_ptr _videoCapturer; + std::shared_ptr> _currentIncomingVideoSink; + std::shared_ptr> _currentOutgoingVideoSink; + + std::unique_ptr _audioNetworkInterface; + std::unique_ptr _videoNetworkInterface; +}; + +#ifdef TGVOIP_NAMESPACE +} +#endif + +#endif diff --git a/submodules/TgVoipWebrtc/Impl/NetworkManager.cpp b/submodules/TgVoipWebrtc/Impl/NetworkManager.cpp new file mode 100644 index 0000000000..65aaf9a24b --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/NetworkManager.cpp @@ -0,0 +1,334 @@ +#include "NetworkManager.h" + +#include "p2p/base/basic_packet_socket_factory.h" +#include "p2p/client/basic_port_allocator.h" +#include "p2p/base/p2p_transport_channel.h" +#include "p2p/base/basic_async_resolver_factory.h" +#include "api/packet_socket_factory.h" +#include "rtc_base/task_utils/to_queued_task.h" +#include "p2p/base/ice_credentials_iterator.h" +#include "api/jsep_ice_candidate.h" + +extern "C" { +#include +#include +#include +#include +#include +} + +#ifdef TGVOIP_NAMESPACE +namespace TGVOIP_NAMESPACE { +#endif + +static void KDF2(unsigned char *encryptionKey, unsigned char *msgKey, size_t x, unsigned char *aesKey, unsigned char *aesIv) { + uint8_t sA[32], sB[32]; + uint8_t buf[16 + 36]; + memcpy(buf, msgKey, 16); + memcpy(buf + 16, encryptionKey + x, 36); + SHA256(buf, 16 + 36, sA); + memcpy(buf, encryptionKey + 40 + x, 36); + memcpy(buf + 36, msgKey, 16); + SHA256(buf, 36 + 16, sB); + memcpy(aesKey, sA, 8); + memcpy(aesKey + 8, sB + 8, 16); + memcpy(aesKey + 8 + 16, sA + 24, 8); + memcpy(aesIv, sB, 8); + memcpy(aesIv + 8, sA + 8, 16); + memcpy(aesIv + 8 + 16, sB + 24, 8); +} + +static void aesIgeEncrypt(uint8_t *in, uint8_t *out, size_t length, uint8_t *key, uint8_t *iv) { + AES_KEY akey; + AES_set_encrypt_key(key, 32*8, &akey); + AES_ige_encrypt(in, out, length, &akey, iv, AES_ENCRYPT); +} + +static void aesIgeDecrypt(uint8_t *in, uint8_t *out, size_t length, uint8_t *key, uint8_t *iv) { + AES_KEY akey; + AES_set_decrypt_key(key, 32*8, &akey); + AES_ige_encrypt(in, out, length, &akey, iv, AES_DECRYPT); +} + +static absl::optional decryptPacket(const rtc::CopyOnWriteBuffer &packet, const TgVoipEncryptionKey &encryptionKey) { + if (packet.size() < 16 + 16) { + return absl::nullopt; + } + unsigned char msgKey[16]; + memcpy(msgKey, packet.data(), 16); + + int x = encryptionKey.isOutgoing ? 8 : 0; + + unsigned char aesKey[32]; + unsigned char aesIv[32]; + KDF2((unsigned char *)encryptionKey.value.data(), msgKey, x, aesKey, aesIv); + size_t decryptedSize = packet.size() - 16; + if (decryptedSize < 0 || decryptedSize > 128 * 1024) { + return absl::nullopt; + } + if (decryptedSize % 16 != 0) { + return absl::nullopt; + } + rtc::Buffer decryptionBuffer(decryptedSize); + aesIgeDecrypt(((uint8_t *)packet.data()) + 16, decryptionBuffer.begin(), decryptionBuffer.size(), aesKey, aesIv); + + rtc::ByteBufferWriter msgKeyData; + msgKeyData.WriteBytes((const char *)encryptionKey.value.data() + 88 + x, 32); + msgKeyData.WriteBytes((const char *)decryptionBuffer.data(), decryptionBuffer.size()); + unsigned char msgKeyLarge[32]; + SHA256((uint8_t *)msgKeyData.Data(), msgKeyData.Length(), msgKeyLarge); + + uint16_t innerSize; + memcpy(&innerSize, decryptionBuffer.data(), 2); + + unsigned char checkMsgKey[16]; + memcpy(checkMsgKey, msgKeyLarge + 8, 16); + + if (memcmp(checkMsgKey, msgKey, 16) != 0) { + return absl::nullopt; + } + + if (innerSize < 0 || innerSize > decryptionBuffer.size() - 2) { + return absl::nullopt; + } + + rtc::CopyOnWriteBuffer decryptedPacket; + decryptedPacket.AppendData((const char *)decryptionBuffer.data() + 2, innerSize); + return decryptedPacket; +} + +static absl::optional encryptPacket(const rtc::CopyOnWriteBuffer &packet, const TgVoipEncryptionKey &encryptionKey) { + if (packet.size() > UINT16_MAX) { + return absl::nullopt; + } + + rtc::ByteBufferWriter innerData; + uint16_t packetSize = (uint16_t)packet.size(); + innerData.WriteBytes((const char *)&packetSize, 2); + innerData.WriteBytes((const char *)packet.data(), packet.size()); + + size_t innerPadding = 16 - innerData.Length() % 16; + uint8_t paddingData[16]; + RAND_bytes(paddingData, (int)innerPadding); + innerData.WriteBytes((const char *)paddingData, innerPadding); + + if (innerData.Length() % 16 != 0) { + assert(false); + return absl::nullopt; + } + + int x = encryptionKey.isOutgoing ? 0 : 8; + + rtc::ByteBufferWriter msgKeyData; + msgKeyData.WriteBytes((const char *)encryptionKey.value.data() + 88 + x, 32); + msgKeyData.WriteBytes(innerData.Data(), innerData.Length()); + unsigned char msgKeyLarge[32]; + SHA256((uint8_t *)msgKeyData.Data(), msgKeyData.Length(), msgKeyLarge); + + unsigned char msgKey[16]; + memcpy(msgKey, msgKeyLarge + 8, 16); + + unsigned char aesKey[32]; + unsigned char aesIv[32]; + KDF2((unsigned char *)encryptionKey.value.data(), msgKey, x, aesKey, aesIv); + + rtc::Buffer encryptedPacket; + encryptedPacket.AppendData((const char *)msgKey, 16); + + rtc::Buffer encryptionBuffer(innerData.Length()); + aesIgeEncrypt((uint8_t *)innerData.Data(), encryptionBuffer.begin(), innerData.Length(), aesKey, aesIv); + + encryptedPacket.AppendData(encryptionBuffer.begin(), encryptionBuffer.size()); + + /*rtc::CopyOnWriteBuffer testBuffer; + testBuffer.AppendData(encryptedPacket.data(), encryptedPacket.size()); + TgVoipEncryptionKey testKey; + testKey.value = encryptionKey.value; + testKey.isOutgoing = !encryptionKey.isOutgoing; + decryptPacket(testBuffer, testKey);*/ + + return encryptedPacket; +} + +NetworkManager::NetworkManager( + rtc::Thread *thread, + TgVoipEncryptionKey encryptionKey, + bool enableP2P, + std::function stateUpdated, + std::function packetReceived, + std::function &)> signalingDataEmitted +) : +_thread(thread), +_encryptionKey(encryptionKey), +_stateUpdated(stateUpdated), +_packetReceived(packetReceived), +_signalingDataEmitted(signalingDataEmitted) { + assert(_thread->IsCurrent()); + + _socketFactory.reset(new rtc::BasicPacketSocketFactory(_thread)); + + _networkManager = std::make_unique(); + _portAllocator.reset(new cricket::BasicPortAllocator(_networkManager.get(), _socketFactory.get(), nullptr, nullptr)); + + uint32_t flags = cricket::PORTALLOCATOR_DISABLE_TCP; + if (!enableP2P) { + flags |= cricket::PORTALLOCATOR_DISABLE_UDP; + flags |= cricket::PORTALLOCATOR_DISABLE_STUN; + } + //flags |= cricket::PORTALLOCATOR_DISABLE_UDP; + _portAllocator->set_flags(_portAllocator->flags() | flags); + _portAllocator->Initialize(); + + rtc::SocketAddress defaultStunAddress = rtc::SocketAddress("hlgkfjdrtjfykgulhijkljhulyo.uksouth.cloudapp.azure.com", 3478); + cricket::ServerAddresses stunServers; + stunServers.insert(defaultStunAddress); + std::vector turnServers; + turnServers.push_back(cricket::RelayServerConfig( + rtc::SocketAddress("hlgkfjdrtjfykgulhijkljhulyo.uksouth.cloudapp.azure.com", 3478), + "user", + "root", + cricket::PROTO_UDP + )); + _portAllocator->SetConfiguration(stunServers, turnServers, 2, webrtc::NO_PRUNE); + + _asyncResolverFactory = std::make_unique(); + _transportChannel.reset(new cricket::P2PTransportChannel("transport", 0, _portAllocator.get(), _asyncResolverFactory.get(), nullptr)); + + cricket::IceConfig iceConfig; + iceConfig.continual_gathering_policy = cricket::GATHER_CONTINUALLY; + _transportChannel->SetIceConfig(iceConfig); + + cricket::IceParameters localIceParameters( + "gcp3", + "zWDKozH8/3JWt8he3M/CMj5R", + false + ); + cricket::IceParameters remoteIceParameters( + "acp3", + "aWDKozH8/3JWt8he3M/CMj5R", + false + ); + + _transportChannel->SetIceParameters(_encryptionKey.isOutgoing ? localIceParameters : remoteIceParameters); + _transportChannel->SetIceRole(_encryptionKey.isOutgoing ? cricket::ICEROLE_CONTROLLING : cricket::ICEROLE_CONTROLLED); + + _transportChannel->SignalCandidateGathered.connect(this, &NetworkManager::candidateGathered); + _transportChannel->SignalGatheringState.connect(this, &NetworkManager::candidateGatheringState); + _transportChannel->SignalIceTransportStateChanged.connect(this, &NetworkManager::transportStateChanged); + _transportChannel->SignalReadPacket.connect(this, &NetworkManager::transportPacketReceived); + + _transportChannel->MaybeStartGathering(); + + _transportChannel->SetRemoteIceMode(cricket::ICEMODE_FULL); + _transportChannel->SetRemoteIceParameters((!_encryptionKey.isOutgoing) ? localIceParameters : remoteIceParameters); +} + +NetworkManager::~NetworkManager() { + assert(_thread->IsCurrent()); + + _transportChannel.reset(); + _asyncResolverFactory.reset(); + _portAllocator.reset(); + _networkManager.reset(); + _socketFactory.reset(); +} + +void NetworkManager::receiveSignalingData(const std::vector &data) { + rtc::ByteBufferReader reader((const char *)data.data(), data.size()); + uint32_t candidateCount = 0; + if (!reader.ReadUInt32(&candidateCount)) { + return; + } + std::vector candidates; + for (uint32_t i = 0; i < candidateCount; i++) { + uint32_t candidateLength = 0; + if (!reader.ReadUInt32(&candidateLength)) { + return; + } + std::string candidate; + if (!reader.ReadString(&candidate, candidateLength)) { + return; + } + candidates.push_back(candidate); + } + + for (auto &serializedCandidate : candidates) { + webrtc::JsepIceCandidate parseCandidate("", 0); + if (parseCandidate.Initialize(serializedCandidate, nullptr)) { + auto parsedCandidate = parseCandidate.candidate(); + _transportChannel->AddRemoteCandidate(parsedCandidate); + } + } +} + +void NetworkManager::sendPacket(const rtc::CopyOnWriteBuffer &packet) { + auto encryptedPacket = encryptPacket(packet, _encryptionKey); + if (encryptedPacket.has_value()) { + rtc::PacketOptions packetOptions; + _transportChannel->SendPacket((const char *)encryptedPacket->data(), encryptedPacket->size(), packetOptions, 0); + } +} + +void NetworkManager::candidateGathered(cricket::IceTransportInternal *transport, const cricket::Candidate &candidate) { + assert(_thread->IsCurrent()); + webrtc::JsepIceCandidate iceCandidate("", 0); + iceCandidate.SetCandidate(candidate); + std::string serializedCandidate; + if (!iceCandidate.ToString(&serializedCandidate)) { + return; + } + std::vector candidates; + candidates.push_back(serializedCandidate); + + rtc::ByteBufferWriter writer; + writer.WriteUInt32((uint32_t)candidates.size()); + for (auto string : candidates) { + writer.WriteUInt32((uint32_t)string.size()); + writer.WriteString(string); + } + std::vector data; + data.resize(writer.Length()); + memcpy(data.data(), writer.Data(), writer.Length()); + _signalingDataEmitted(data); +} + +void NetworkManager::candidateGatheringState(cricket::IceTransportInternal *transport) { + assert(_thread->IsCurrent()); +} + +void NetworkManager::transportStateChanged(cricket::IceTransportInternal *transport) { + assert(_thread->IsCurrent()); + + auto state = transport->GetIceTransportState(); + bool isConnected = false; + switch (state) { + case webrtc::IceTransportState::kConnected: + case webrtc::IceTransportState::kCompleted: + isConnected = true; + break; + default: + break; + } + NetworkManager::State emitState; + emitState.isReadyToSendData = isConnected; + _stateUpdated(emitState); +} + +void NetworkManager::transportReadyToSend(cricket::IceTransportInternal *transport) { + assert(_thread->IsCurrent()); +} + +void NetworkManager::transportPacketReceived(rtc::PacketTransportInternal *transport, const char *bytes, size_t size, const int64_t ×tamp, int unused) { + assert(_thread->IsCurrent()); + rtc::CopyOnWriteBuffer packet; + packet.AppendData(bytes, size); + + auto decryptedPacket = decryptPacket(packet, _encryptionKey); + if (decryptedPacket.has_value()) { + _packetReceived(decryptedPacket.value()); + } +} + +#ifdef TGVOIP_NAMESPACE +} +#endif diff --git a/submodules/TgVoipWebrtc/Impl/NetworkManager.h b/submodules/TgVoipWebrtc/Impl/NetworkManager.h new file mode 100644 index 0000000000..8d804271ee --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/NetworkManager.h @@ -0,0 +1,78 @@ +#ifndef TGVOIP_WEBRTC_NETWORK_MANAGER_H +#define TGVOIP_WEBRTC_NETWORK_MANAGER_H + +#include "rtc_base/thread.h" + +#include +#include + +#include "rtc_base/copy_on_write_buffer.h" +#include "api/candidate.h" +#include "TgVoip.h" + +namespace rtc { +class BasicPacketSocketFactory; +class BasicNetworkManager; +class PacketTransportInternal; +} + +namespace cricket { +class BasicPortAllocator; +class P2PTransportChannel; +class IceTransportInternal; +} + +namespace webrtc { +class BasicAsyncResolverFactory; +} + +#ifdef TGVOIP_NAMESPACE +namespace TGVOIP_NAMESPACE { +#endif + +class NetworkManager: public sigslot::has_slots<> { +public: + struct State { + bool isReadyToSendData; + }; + +public: + NetworkManager( + rtc::Thread *thread, + TgVoipEncryptionKey encryptionKey, + bool enableP2P, + std::function stateUpdated, + std::function packetReceived, + std::function &)> signalingDataEmitted + ); + ~NetworkManager(); + + void receiveSignalingData(const std::vector &data); + void sendPacket(const rtc::CopyOnWriteBuffer &packet); + +private: + rtc::Thread *_thread; + TgVoipEncryptionKey _encryptionKey; + std::function _stateUpdated; + std::function _packetReceived; + std::function &)> _signalingDataEmitted; + + std::unique_ptr _socketFactory; + std::unique_ptr _networkManager; + std::unique_ptr _portAllocator; + std::unique_ptr _asyncResolverFactory; + std::unique_ptr _transportChannel; + +private: + void candidateGathered(cricket::IceTransportInternal *transport, const cricket::Candidate &candidate); + void candidateGatheringState(cricket::IceTransportInternal *transport); + void transportStateChanged(cricket::IceTransportInternal *transport); + void transportReadyToSend(cricket::IceTransportInternal *transport); + void transportPacketReceived(rtc::PacketTransportInternal *transport, const char *bytes, size_t size, const int64_t ×tamp, int unused); +}; + +#ifdef TGVOIP_NAMESPACE +} +#endif + +#endif diff --git a/submodules/TgVoipWebrtc/Impl/TgVoip.h b/submodules/TgVoipWebrtc/Impl/TgVoip.h index 220975e71b..bf1c29ae03 100644 --- a/submodules/TgVoipWebrtc/Impl/TgVoip.h +++ b/submodules/TgVoipWebrtc/Impl/TgVoip.h @@ -1,14 +1,19 @@ #ifndef __TGVOIP_H #define __TGVOIP_H -#define TGVOIP_NAMESPACE tgvoip_webrtc - #include #include #include #include -#import "VideoMetalView.h" +namespace rtc { +template +class VideoSinkInterface; +} + +namespace webrtc { +class VideoFrame; +} #ifdef TGVOIP_NAMESPACE namespace TGVOIP_NAMESPACE { @@ -131,7 +136,9 @@ public: std::vector const &endpoints, std::unique_ptr const &proxy, TgVoipNetworkType initialNetworkType, - TgVoipEncryptionKey const &encryptionKey + TgVoipEncryptionKey const &encryptionKey, + std::function stateUpdated, + std::function &)> signalingDataEmitted ); virtual ~TgVoip(); @@ -141,19 +148,16 @@ public: virtual void setAudioOutputGainControlEnabled(bool enabled) = 0; virtual void setEchoCancellationStrength(int strength) = 0; - virtual void AttachVideoView(VideoMetalView *videoView) = 0; + virtual void setIncomingVideoOutput(std::shared_ptr> sink) = 0; + virtual void setOutgoingVideoOutput(std::shared_ptr> sink) = 0; virtual std::string getLastError() = 0; virtual std::string getDebugInfo() = 0; virtual int64_t getPreferredRelayId() = 0; virtual TgVoipTrafficStats getTrafficStats() = 0; virtual TgVoipPersistentState getPersistentState() = 0; - - virtual void setOnStateUpdated(std::function onStateUpdated) = 0; - virtual void setOnSignalBarsUpdated(std::function onSignalBarsUpdated) = 0; - virtual void setOnCandidatesGathered(std::function &)> onCandidatesGathered) = 0; - virtual void addRemoteCandidates(const std::vector &candidates) = 0; + virtual void receiveSignalingData(const std::vector &data) = 0; virtual TgVoipFinalState stop() = 0; }; diff --git a/submodules/TgVoipWebrtc/Impl/TgVoip.mm b/submodules/TgVoipWebrtc/Impl/TgVoip.mm index 8ebefc7aa4..af578c03b3 100644 --- a/submodules/TgVoipWebrtc/Impl/TgVoip.mm +++ b/submodules/TgVoipWebrtc/Impl/TgVoip.mm @@ -2,11 +2,17 @@ #include "TgVoip.h" -#include "Controller.h" +#include "rtc_base/logging.h" + +#include "Manager.h" #include #include +#import + +#include + #ifndef TGVOIP_USE_CUSTOM_CRYPTO /*extern "C" { #include @@ -75,10 +81,59 @@ CryptoFunctions Layer92::crypto={ namespace TGVOIP_NAMESPACE { #endif +class LogSinkImpl : public rtc::LogSink { +public: + LogSinkImpl() { + } + virtual ~LogSinkImpl() { + } + + virtual void OnLogMessage(const std::string &msg, rtc::LoggingSeverity severity, const char *tag) override { + OnLogMessage(std::string(tag) + ": " + msg); + } + + virtual void OnLogMessage(const std::string &message, rtc::LoggingSeverity severity) override { + OnLogMessage(message); + } + + virtual void OnLogMessage(const std::string &message) override { + time_t rawTime; + time(&rawTime); + struct tm timeinfo; + localtime_r(&rawTime, &timeinfo); + + timeval curTime; + gettimeofday(&curTime, nullptr); + int32_t milliseconds = curTime.tv_usec / 1000; + + _data << (timeinfo.tm_year + 1900); + _data << "-" << (timeinfo.tm_mon + 1); + _data << "-" << (timeinfo.tm_mday); + _data << " " << timeinfo.tm_hour; + _data << ":" << timeinfo.tm_min; + _data << ":" << timeinfo.tm_sec; + _data << ":" << milliseconds; + _data << " " << message; + } + +public: + std::ostringstream _data; +}; + +static rtc::Thread *makeManagerThread() { + static std::unique_ptr value = rtc::Thread::Create(); + value->SetName("WebRTC-Manager", nullptr); + value->Start(); + return value.get(); +} + + +static rtc::Thread *getManagerThread() { + static rtc::Thread *value = makeManagerThread(); + return value; +} + class TgVoipImpl : public TgVoip, public sigslot::has_slots<> { -private: - - public: TgVoipImpl( std::vector const &endpoints, @@ -86,88 +141,48 @@ public: std::unique_ptr const &proxy, TgVoipConfig const &config, TgVoipEncryptionKey const &encryptionKey, - TgVoipNetworkType initialNetworkType - ) { - + TgVoipNetworkType initialNetworkType, + std::function stateUpdated, + std::function &)> signalingDataEmitted + ) : + _stateUpdated(stateUpdated), + _signalingDataEmitted(signalingDataEmitted) { static dispatch_once_t onceToken; dispatch_once(&onceToken, ^{ rtc::LogMessage::LogToDebug(rtc::LS_INFO); rtc::LogMessage::SetLogToStderr(true); }); - - /*EncryptionKey encryptionKeyValue; - memcpy(encryptionKeyValue, encryptionKey.value.data(), 256);*/ - controller_ = new Controller(encryptionKey.isOutgoing, 5, 3); - - if (proxy != nullptr) { - controller_->SetProxy(rtc::ProxyType::PROXY_SOCKS5, rtc::SocketAddress(proxy->host, proxy->port), - proxy->login, proxy->password); - } - - controller_->SignalNewState.connect(this, &TgVoipImpl::controllerStateCallback); - controller_->SignalCandidatesGathered.connect(this, &TgVoipImpl::candidatesGathered); - controller_->Start(); - - for (const auto &endpoint : endpoints) { - rtc::SocketAddress addr(endpoint.host.ipv4, endpoint.port); - Controller::EndpointType type; - switch (endpoint.type) { - case TgVoipEndpointType::UdpRelay: - type = Controller::EndpointType::UDP; - break; - case TgVoipEndpointType::Lan: - case TgVoipEndpointType::Inet: - type = Controller::EndpointType::P2P; - break; - case TgVoipEndpointType::TcpRelay: - type = Controller::EndpointType::TCP; - break; - default: - type = Controller::EndpointType::UDP; - break; - } - //controller_->AddEndpoint(addr, endpoint.peerTag, type); - } - /*rtc::SocketAddress addr("192.168.8.118", 7325); - unsigned char peerTag[16]; - controller_->AddEndpoint(addr, peerTag, Controller::EndpointType::P2P);*/ - - setNetworkType(initialNetworkType); - - switch (config.dataSaving) { - case TgVoipDataSaving::Mobile: - controller_->SetDataSaving(true); - break; - case TgVoipDataSaving::Always: - controller_->SetDataSaving(true); - break; - default: - controller_->SetDataSaving(false); - break; - } + rtc::LogMessage::AddLogToStream(&_logSink, rtc::LS_INFO); + + bool enableP2P = config.enableP2P; + + _manager.reset(new ThreadLocalObject(getManagerThread(), [encryptionKey = encryptionKey, enableP2P = enableP2P, stateUpdated, signalingDataEmitted](){ + return new Manager( + getManagerThread(), + encryptionKey, + enableP2P, + [stateUpdated](const TgVoipState &state) { + stateUpdated(state); + }, + [signalingDataEmitted](const std::vector &data) { + signalingDataEmitted(data); + } + ); + })); + _manager->perform([](Manager *manager) { + manager->start(); + }); } ~TgVoipImpl() override { - stop(); - } - - void setOnStateUpdated(std::function onStateUpdated) override { - std::lock_guard lock(m_onStateUpdated); - onStateUpdated_ = onStateUpdated; - } - - void setOnSignalBarsUpdated(std::function onSignalBarsUpdated) override { - std::lock_guard lock(m_onSignalBarsUpdated); - onSignalBarsUpdated_ = onSignalBarsUpdated; + rtc::LogMessage::RemoveLogToStream(&_logSink); } - void setOnCandidatesGathered(std::function &)> onCandidatesGathered) override { - onCandidatesGathered_ = onCandidatesGathered; - } - - void addRemoteCandidates(const std::vector &candidates) override { - controller_->AddRemoteCandidates(candidates); - } + void receiveSignalingData(const std::vector &data) override { + _manager->perform([data](Manager *manager) { + manager->receiveSignalingData(data); + }); + }; void setNetworkType(TgVoipNetworkType networkType) override { /*message::NetworkType mappedType; @@ -218,11 +233,19 @@ public: } void setMuteMicrophone(bool muteMicrophone) override { - controller_->SetMute(muteMicrophone); + //controller_->SetMute(muteMicrophone); } - void AttachVideoView(VideoMetalView *videoView) override { - controller_->AttachVideoView([videoView getSink]); + void setIncomingVideoOutput(std::shared_ptr> sink) override { + _manager->perform([sink](Manager *manager) { + manager->setIncomingVideoOutput(sink); + }); + } + + void setOutgoingVideoOutput(std::shared_ptr> sink) override { + _manager->perform([sink](Manager *manager) { + manager->setOutgoingVideoOutput(sink); + }); } void setAudioOutputGainControlEnabled(bool enabled) override { @@ -252,16 +275,14 @@ public: } TgVoipFinalState stop() override { - TgVoipFinalState finalState = { - }; - - delete controller_; - controller_ = nullptr; + TgVoipFinalState finalState; + finalState.debugLog = _logSink._data.str(); + finalState.isRatingSuggested = false; return finalState; } - void controllerStateCallback(Controller::State state) { + /*void controllerStateCallback(Controller::State state) { if (onStateUpdated_) { TgVoipState mappedState; switch (state) { @@ -287,44 +308,14 @@ public: onStateUpdated_(mappedState); } - } + }*/ + +private: + std::unique_ptr> _manager; + std::function _stateUpdated; + std::function &)> _signalingDataEmitted; - void candidatesGathered(const std::vector &candidates) { - onCandidatesGathered_(candidates); - } - -private: -#ifdef TGVOIP_USE_CALLBACK_AUDIO_IO - TgVoipAudioDataCallbacks audioCallbacks; - - void play(const int16_t *data, size_t size) { - if (!audioCallbacks.output) - return; - int16_t buf[size]; - memcpy(buf, data, size * 2); - audioCallbacks.output(buf, size); - } - - void record(int16_t *data, size_t size) { - if (audioCallbacks.input) - audioCallbacks.input(data, size); - } - - void preprocessed(const int16_t *data, size_t size) { - if (!audioCallbacks.preprocessed) - return; - int16_t buf[size]; - memcpy(buf, data, size * 2); - audioCallbacks.preprocessed(buf, size); - } -#endif - -private: - Controller *controller_; - std::function onStateUpdated_; - std::function onSignalBarsUpdated_; - std::function &)> onCandidatesGathered_; - std::mutex m_onStateUpdated, m_onSignalBarsUpdated; + LogSinkImpl _logSink; }; std::function globalLoggingFunction; @@ -368,7 +359,9 @@ TgVoip *TgVoip::makeInstance( std::vector const &endpoints, std::unique_ptr const &proxy, TgVoipNetworkType initialNetworkType, - TgVoipEncryptionKey const &encryptionKey + TgVoipEncryptionKey const &encryptionKey, + std::function stateUpdated, + std::function &)> signalingDataEmitted ) { return new TgVoipImpl( endpoints, @@ -376,7 +369,9 @@ TgVoip *TgVoip::makeInstance( proxy, config, encryptionKey, - initialNetworkType + initialNetworkType, + stateUpdated, + signalingDataEmitted ); } diff --git a/submodules/TgVoipWebrtc/Impl/ThreadLocalObject.cpp b/submodules/TgVoipWebrtc/Impl/ThreadLocalObject.cpp new file mode 100644 index 0000000000..8b13789179 --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/ThreadLocalObject.cpp @@ -0,0 +1 @@ + diff --git a/submodules/TgVoipWebrtc/Impl/ThreadLocalObject.h b/submodules/TgVoipWebrtc/Impl/ThreadLocalObject.h new file mode 100644 index 0000000000..fbaee62e2d --- /dev/null +++ b/submodules/TgVoipWebrtc/Impl/ThreadLocalObject.h @@ -0,0 +1,55 @@ +#ifndef TGVOIP_WEBRTC_THREAD_LOCAL_OBJECT_H +#define TGVOIP_WEBRTC_THREAD_LOCAL_OBJECT_H + +#include "rtc_base/thread.h" + +#include +#include + +#ifdef TGVOIP_NAMESPACE +namespace TGVOIP_NAMESPACE { +#endif + +template +class ThreadLocalObject { +private: + template + class ValueHolder { + public: + std::shared_ptr _value; + }; + +public: + ThreadLocalObject(rtc::Thread *thread, std::function generator) : + _thread(thread), + _valueHolder(new ThreadLocalObject::ValueHolder()) { + assert(_thread != nullptr); + _thread->PostTask(RTC_FROM_HERE, [valueHolder = _valueHolder, generator](){ + valueHolder->_value.reset(generator()); + }); + } + + ~ThreadLocalObject() { + _thread->PostTask(RTC_FROM_HERE, [valueHolder = _valueHolder](){ + valueHolder->_value.reset(); + }); + } + + template + void perform(FunctorT&& functor) { + _thread->PostTask(RTC_FROM_HERE, [valueHolder = _valueHolder, f = std::forward(functor)](){ + assert(valueHolder->_value != nullptr); + f(valueHolder->_value.get()); + }); + } + +private: + rtc::Thread *_thread; + std::shared_ptr> _valueHolder; +}; + +#ifdef TGVOIP_NAMESPACE +} +#endif + +#endif diff --git a/submodules/TgVoipWebrtc/Impl/VideoMetalView.h b/submodules/TgVoipWebrtc/Impl/VideoMetalView.h index 3425ec74f8..eb332b65e9 100644 --- a/submodules/TgVoipWebrtc/Impl/VideoMetalView.h +++ b/submodules/TgVoipWebrtc/Impl/VideoMetalView.h @@ -6,6 +6,8 @@ #import "api/media_stream_interface.h" +#include + @class RTCVideoFrame; @interface VideoMetalView : UIView @@ -17,9 +19,7 @@ - (void)setSize:(CGSize)size; - (void)renderFrame:(nullable RTCVideoFrame *)frame; -- (void)addToTrack:(rtc::scoped_refptr)track; - -- (rtc::VideoSinkInterface *)getSink; +- (std::shared_ptr>)getSink; @end diff --git a/submodules/TgVoipWebrtc/Impl/VideoMetalView.mm b/submodules/TgVoipWebrtc/Impl/VideoMetalView.mm index 32616ffb67..125fe45d4d 100644 --- a/submodules/TgVoipWebrtc/Impl/VideoMetalView.mm +++ b/submodules/TgVoipWebrtc/Impl/VideoMetalView.mm @@ -23,26 +23,22 @@ class VideoRendererAdapterImpl : public rtc::VideoSinkInterface { public: - VideoRendererAdapterImpl(VideoMetalView *adapter) { - adapter_ = adapter; - size_ = CGSizeZero; + VideoRendererAdapterImpl(void (^frameReceived)(CGSize, RTCVideoFrame *)) { + _frameReceived = [frameReceived copy]; } void OnFrame(const webrtc::VideoFrame& nativeVideoFrame) override { RTCVideoFrame* videoFrame = NativeToObjCVideoFrame(nativeVideoFrame); - CGSize current_size = (videoFrame.rotation % 180 == 0) ? CGSizeMake(videoFrame.width, videoFrame.height) : CGSizeMake(videoFrame.height, videoFrame.width); + CGSize currentSize = (videoFrame.rotation % 180 == 0) ? CGSizeMake(videoFrame.width, videoFrame.height) : CGSizeMake(videoFrame.height, videoFrame.width); - if (!CGSizeEqualToSize(size_, current_size)) { - size_ = current_size; - [adapter_ setSize:size_]; + if (_frameReceived) { + _frameReceived(currentSize, videoFrame); } - [adapter_ renderFrame:videoFrame]; } private: - __weak VideoMetalView *adapter_; - CGSize size_; + void (^_frameReceived)(CGSize, RTCVideoFrame *); }; @interface VideoMetalView () { @@ -54,7 +50,8 @@ private: CGSize _videoFrameSize; int64_t _lastFrameTimeNs; - std::unique_ptr _sink; + CGSize _currentSize; + std::shared_ptr _sink; } @end @@ -66,7 +63,23 @@ private: if (self) { [self configure]; - _sink.reset(new VideoRendererAdapterImpl(self)); + _currentSize = CGSizeZero; + + __weak VideoMetalView *weakSelf = self; + _sink.reset(new VideoRendererAdapterImpl(^(CGSize size, RTCVideoFrame *videoFrame) { + dispatch_async(dispatch_get_main_queue(), ^{ + __strong VideoMetalView *strongSelf = weakSelf; + if (strongSelf == nil) { + return; + } + if (!CGSizeEqualToSize(size, strongSelf->_currentSize)) { + strongSelf->_currentSize = size; + [strongSelf setSize:size]; + } + + [strongSelf renderFrame:videoFrame]; + }); + })); } return self; } @@ -239,23 +252,19 @@ private: #pragma mark - RTCVideoRenderer - (void)setSize:(CGSize)size { - __weak VideoMetalView *weakSelf = self; - dispatch_async(dispatch_get_main_queue(), ^{ - __strong VideoMetalView *strongSelf = weakSelf; - if (strongSelf == nil) { - return; - } - - strongSelf->_videoFrameSize = size; - CGSize drawableSize = [strongSelf drawableSize]; - - strongSelf->_metalView.drawableSize = drawableSize; - [strongSelf setNeedsLayout]; - //[strongSelf.delegate videoView:self didChangeVideoSize:size]; - }); + assert([NSThread isMainThread]); + + _videoFrameSize = size; + CGSize drawableSize = [self drawableSize]; + + _metalView.drawableSize = drawableSize; + [self setNeedsLayout]; + //[strongSelf.delegate videoView:self didChangeVideoSize:size]; } - (void)renderFrame:(nullable RTCVideoFrame *)frame { + assert([NSThread isMainThread]); + if (!self.isEnabled) { return; } @@ -267,12 +276,10 @@ private: _videoFrame = frame; } -- (void)addToTrack:(rtc::scoped_refptr)track { - track->AddOrUpdateSink(_sink.get(), rtc::VideoSinkWants()); -} - -- (rtc::VideoSinkInterface *)getSink { - return _sink.get(); +- (std::shared_ptr>)getSink { + assert([NSThread isMainThread]); + + return _sink; } @end diff --git a/submodules/TgVoipWebrtc/Impl/VideoRendererAdapter.h b/submodules/TgVoipWebrtc/Impl/VideoRendererAdapter.h deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/submodules/TgVoipWebrtc/Impl/VideoRendererAdapter.mm b/submodules/TgVoipWebrtc/Impl/VideoRendererAdapter.mm deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/submodules/TgVoipWebrtc/PublicHeaders/TgVoip/OngoingCallThreadLocalContext.h b/submodules/TgVoipWebrtc/PublicHeaders/TgVoip/OngoingCallThreadLocalContext.h index 6ecd994552..f90f9b961c 100644 --- a/submodules/TgVoipWebrtc/PublicHeaders/TgVoip/OngoingCallThreadLocalContext.h +++ b/submodules/TgVoipWebrtc/PublicHeaders/TgVoip/OngoingCallThreadLocalContext.h @@ -76,7 +76,8 @@ typedef NS_ENUM(int32_t, OngoingCallDataSavingWebrtc) { - (void)setIsMuted:(bool)isMuted; - (void)setNetworkType:(OngoingCallNetworkTypeWebrtc)networkType; -- (void)getRemoteCameraView:(void (^_Nonnull)(UIView * _Nullable))completion; +- (void)makeIncomingVideoView:(void (^_Nonnull)(UIView * _Nullable))completion; +- (void)makeOutgoingVideoView:(void (^_Nonnull)(UIView * _Nullable))completion; - (void)addSignalingData:(NSData * _Nonnull)data; @end diff --git a/submodules/TgVoipWebrtc/Sources/OngoingCallThreadLocalContext.mm b/submodules/TgVoipWebrtc/Sources/OngoingCallThreadLocalContext.mm index f8fd37445e..08859c9538 100644 --- a/submodules/TgVoipWebrtc/Sources/OngoingCallThreadLocalContext.mm +++ b/submodules/TgVoipWebrtc/Sources/OngoingCallThreadLocalContext.mm @@ -1,6 +1,7 @@ #import #import "TgVoip.h" +#import "VideoMetalView.h" using namespace TGVOIP_NAMESPACE; @@ -189,41 +190,35 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL; .isOutgoing = isOutgoing, }; + __weak OngoingCallThreadLocalContextWebrtc *weakSelf = self; _tgVoip = TgVoip::makeInstance( config, { derivedStateValue }, endpoints, proxyValue, callControllerNetworkTypeForType(networkType), - encryptionKey + encryptionKey, + [weakSelf, queue](TgVoipState state) { + [queue dispatch:^{ + __strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf; + if (strongSelf) { + [strongSelf controllerStateChanged:state]; + } + }]; + }, + [weakSelf, queue](const std::vector &data) { + NSData *mappedData = [[NSData alloc] initWithBytes:data.data() length:data.size()]; + [queue dispatch:^{ + __strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf; + if (strongSelf) { + [strongSelf signalingDataEmitted:mappedData]; + } + }]; + } ); _state = OngoingCallStateInitializing; _signalBars = -1; - - __weak OngoingCallThreadLocalContextWebrtc *weakSelf = self; - _tgVoip->setOnStateUpdated([weakSelf](TgVoipState state) { - __strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf; - if (strongSelf) { - [strongSelf controllerStateChanged:state]; - } - }); - _tgVoip->setOnSignalBarsUpdated([weakSelf](int signalBars) { - __strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf; - if (strongSelf) { - [strongSelf signalBarsChanged:signalBars]; - } - }); - _tgVoip->setOnCandidatesGathered([weakSelf](const std::vector &candidates) { - __strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf; - if (strongSelf) { - NSMutableArray *mappedCandidates = [[NSMutableArray alloc] init]; - for (auto &candidate : candidates) { - [mappedCandidates addObject:[[NSString alloc] initWithCString:candidate.c_str() encoding:NSUTF8StringEncoding]]; - } - [strongSelf candidatesGathered:mappedCandidates]; - } - }); } return self; } @@ -320,27 +315,18 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL; } } -- (void)candidatesGathered:(NSArray *)candidates { +- (void)signalingDataEmitted:(NSData *)data { if (_sendSignalingData) { - NSData *data = [NSKeyedArchiver archivedDataWithRootObject:@{ - @"type": @"candidates", - @"data": candidates - }]; _sendSignalingData(data); } } - (void)addSignalingData:(NSData *)data { - NSDictionary *dict = [NSKeyedUnarchiver unarchiveObjectWithData:data]; - NSString *type = dict[@"type"]; - if ([type isEqualToString:@"candidates"]) { - if (_tgVoip) { - std::vector candidates; - for (NSString *string in dict[@"data"]) { - candidates.push_back([string UTF8String]); - } - _tgVoip->addRemoteCandidates(candidates); - } + if (_tgVoip) { + std::vector mappedData; + mappedData.resize(data.length); + [data getBytes:mappedData.data() length:data.length]; + _tgVoip->receiveSignalingData(mappedData); } } @@ -359,17 +345,38 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL; } } -- (void)getRemoteCameraView:(void (^_Nonnull)(UIView * _Nullable))completion { +- (void)makeIncomingVideoView:(void (^_Nonnull)(UIView * _Nullable))completion { if (_tgVoip) { + __weak OngoingCallThreadLocalContextWebrtc *weakSelf = self; dispatch_async(dispatch_get_main_queue(), ^{ VideoMetalView *remoteRenderer = [[VideoMetalView alloc] initWithFrame:CGRectZero]; remoteRenderer.videoContentMode = UIViewContentModeScaleAspectFill; - _tgVoip->AttachVideoView(remoteRenderer); + std::shared_ptr> sink = [remoteRenderer getSink]; + __strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf; + if (strongSelf) { + strongSelf->_tgVoip->setIncomingVideoOutput(sink); + } - dispatch_async(dispatch_get_main_queue(), ^{ - completion(remoteRenderer); - }); + completion(remoteRenderer); + }); + } +} + +- (void)makeOutgoingVideoView:(void (^_Nonnull)(UIView * _Nullable))completion { + if (_tgVoip) { + __weak OngoingCallThreadLocalContextWebrtc *weakSelf = self; + dispatch_async(dispatch_get_main_queue(), ^{ + VideoMetalView *remoteRenderer = [[VideoMetalView alloc] initWithFrame:CGRectZero]; + remoteRenderer.videoContentMode = UIViewContentModeScaleAspectFill; + + std::shared_ptr> sink = [remoteRenderer getSink]; + __strong OngoingCallThreadLocalContextWebrtc *strongSelf = weakSelf; + if (strongSelf) { + strongSelf->_tgVoip->setOutgoingVideoOutput(sink); + } + + completion(remoteRenderer); }); } } diff --git a/third-party/webrtc/BUCK b/third-party/webrtc/BUCK index 3d8708fd0c..438e4ab424 100644 --- a/third-party/webrtc/BUCK +++ b/third-party/webrtc/BUCK @@ -9,7 +9,6 @@ genrule( srcs = [ "build-webrtc-buck.sh", "webrtc-ios", - "patch.sh", ], bash = """ @@ -33,8 +32,6 @@ genrule( rm -rf "$BUILD_DIR/depot_tools" cp -R "$DEPOT_TOOLS_PATH" "$BUILD_DIR/" - cp "$SRCDIR/patch.sh" "$BUILD_DIR/" - rm -rf "$BUILD_DIR/openssl" cp -R "$(location //submodules/openssl:openssl_build_merged)" "$BUILD_DIR/openssl/" cp -R "$(location //submodules/openssl:openssl_libssl_merged)" "$BUILD_DIR/libssl/" diff --git a/third-party/webrtc/BUILD b/third-party/webrtc/BUILD index 03119e6e72..7553b55933 100644 --- a/third-party/webrtc/BUILD +++ b/third-party/webrtc/BUILD @@ -15,7 +15,6 @@ genrule( name = "webrtc_build", srcs = [ "build-webrtc-bazel.sh", - "patch.sh", ":webrtc_sources", "//third-party:depot_tools_sources", "//submodules/openssl:openssl_include", @@ -36,7 +35,7 @@ genrule( echo "Unsupported architecture $(TARGET_CPU)" fi BUILD_DIR="$(RULEDIR)/$$BUILD_ARCH" - #rm -rf "$$BUILD_DIR" + rm -rf "$$BUILD_DIR" mkdir -p "$$BUILD_DIR" SOURCE_PATH="third-party/webrtc/webrtc-ios/src" @@ -62,9 +61,6 @@ genrule( rm -f "$$BUILD_DIR/build-webrtc-bazel.sh" cp $(location build-webrtc-bazel.sh) "$$BUILD_DIR/" - rm -f "$$BUILD_DIR/patch.sh" - cp $(location patch.sh) "$$BUILD_DIR/" - sh $$BUILD_DIR/build-webrtc-bazel.sh "$$BUILD_DIR" $$BUILD_ARCH """ + "\n".join([ "cp -f $$BUILD_DIR/src/out/$$OUT_DIR/obj/sdk/libframework_objc_static.a $(location {lib})".format(lib=lib) for lib in webrtc_libs diff --git a/third-party/webrtc/build-webrtc-bazel.sh b/third-party/webrtc/build-webrtc-bazel.sh index 0e076ccb1d..93bc477e08 100755 --- a/third-party/webrtc/build-webrtc-bazel.sh +++ b/third-party/webrtc/build-webrtc-bazel.sh @@ -16,14 +16,12 @@ cp -R "$BUILD_DIR/openssl" "$BUILD_DIR/src/" pushd "$BUILD_DIR/src" -sh "../patch.sh" || true - OUT_DIR="ios" if [ "$ARCH" == "x64" ]; then OUT_DIR="ios_sim" fi -gn gen out/$OUT_DIR --args="use_xcode_clang=true "" target_cpu=\"$ARCH\""' target_os="ios" is_debug=false is_component_build=false rtc_include_tests=false use_rtti=true rtc_use_x11=false use_custom_libcxx=false use_custom_libcxx_for_host=false rtc_build_ssl=false rtc_build_examples=false rtc_build_tools=false ios_deployment_target="9.0" ios_enable_code_signing=false is_unsafe_developer_build=false rtc_enable_protobuf=false rtc_include_builtin_video_codecs=true rtc_build_libvpx=true rtc_libvpx_build_vp9=true rtc_use_gtk=false rtc_use_metal_rendering=true rtc_ssl_root="//openssl"' +buildtools/mac/gn gen out/$OUT_DIR --args="use_xcode_clang=true "" target_cpu=\"$ARCH\""' target_os="ios" is_debug=false is_component_build=false rtc_include_tests=false use_rtti=true rtc_use_x11=false use_custom_libcxx=false use_custom_libcxx_for_host=false rtc_build_ssl=false rtc_build_examples=false rtc_build_tools=false ios_deployment_target="9.0" ios_enable_code_signing=false is_unsafe_developer_build=false rtc_enable_protobuf=false rtc_include_builtin_video_codecs=true rtc_build_libvpx=true rtc_libvpx_build_vp9=true rtc_use_gtk=false rtc_use_metal_rendering=true' ninja -C out/$OUT_DIR framework_objc_static popd diff --git a/third-party/webrtc/build-webrtc-buck.sh b/third-party/webrtc/build-webrtc-buck.sh index 8859551f05..214fa403ea 100755 --- a/third-party/webrtc/build-webrtc-buck.sh +++ b/third-party/webrtc/build-webrtc-buck.sh @@ -20,14 +20,12 @@ pushd "$BUILD_DIR/webrtc-ios/src" mv openssl/lib/libcrypto.a openssl/ mv libssl/lib/libssl.a openssl/ -sh "../../patch.sh" || true - OUT_DIR="ios" if [ "$ARCH" == "x64" ]; then OUT_DIR="ios_sim" fi -buildtools/mac/gn gen out/$OUT_DIR --args="use_xcode_clang=true "" target_cpu=\"$ARCH\""' target_os="ios" is_debug=false is_component_build=false rtc_include_tests=false use_rtti=true rtc_use_x11=false use_custom_libcxx=false use_custom_libcxx_for_host=false rtc_build_ssl=false rtc_build_examples=false rtc_build_tools=false ios_deployment_target="9.0" ios_enable_code_signing=false is_unsafe_developer_build=false rtc_enable_protobuf=false rtc_include_builtin_video_codecs=true rtc_build_libvpx=true rtc_libvpx_build_vp9=true rtc_use_gtk=false rtc_use_metal_rendering=true rtc_ssl_root="//openssl"' +buildtools/mac/gn gen out/$OUT_DIR --args="use_xcode_clang=true "" target_cpu=\"$ARCH\""' target_os="ios" is_debug=false is_component_build=false rtc_include_tests=false use_rtti=true rtc_use_x11=false use_custom_libcxx=false use_custom_libcxx_for_host=false rtc_build_ssl=false rtc_build_examples=false rtc_build_tools=false ios_deployment_target="9.0" ios_enable_code_signing=false is_unsafe_developer_build=false rtc_enable_protobuf=false rtc_include_builtin_video_codecs=true rtc_build_libvpx=true rtc_libvpx_build_vp9=true rtc_use_gtk=false rtc_use_metal_rendering=true' ninja -C out/$OUT_DIR framework_objc_static popd diff --git a/third-party/webrtc/patch.sh b/third-party/webrtc/patch.sh deleted file mode 100644 index 8b6e418e81..0000000000 --- a/third-party/webrtc/patch.sh +++ /dev/null @@ -1,71 +0,0 @@ -#!/bin/sh - -PATCH=$(cat <<-END ---- a/rtc_base/BUILD.gn -+++ b/rtc_base/BUILD.gn -@@ -23,7 +23,11 @@ if (!rtc_build_ssl) { - config("external_ssl_library") { - assert(rtc_ssl_root != "", - "You must specify rtc_ssl_root when rtc_build_ssl==0.") -- include_dirs = [ rtc_ssl_root ] -+ include_dirs = [ "\$rtc_ssl_root/include" ] -+ libs = [ -+ "\$rtc_ssl_root/libssl.a", -+ "\$rtc_ssl_root/libcrypto.a" -+ ] - } - } - ---- a/third_party/usrsctp/BUILD.gn -+++ b/third_party/usrsctp/BUILD.gn -@@ -3,6 +3,7 @@ - # found in the LICENSE file. - - import("//build/toolchain/toolchain.gni") -+import("//webrtc.gni") - - config("usrsctp_config") { - include_dirs = [ -@@ -140,7 +141,9 @@ static_library("usrsctp") { - if (is_fuchsia) { - defines += [ "__Userspace_os_Fuchsia" ] - } -- deps = [ -- "//third_party/boringssl", -- ] -+ if (rtc_build_ssl) { -+ deps += [ "//third_party/boringssl" ] -+ } else { -+ configs += [ "//rtc_base:external_ssl_library" ] -+ } - } - ---- a/third_party/libsrtp/BUILD.gn -+++ b/third_party/libsrtp/BUILD.gn -@@ -3,6 +3,7 @@ - # found in the LICENSE file. - - import("//testing/test.gni") -+import("//webrtc.gni") - - declare_args() { - # Tests may not be appropriate for some build environments, e.g. Windows. -@@ -114,9 +115,11 @@ static_library("libsrtp") { - "srtp/ekt.c", - "srtp/srtp.c", - ] -- public_deps = [ -- "//third_party/boringssl:boringssl", -- ] -+ if (rtc_build_ssl) { -+ public_deps = [ "//third_party/boringssl" ] -+ } else { -+ configs += [ "//rtc_base:external_ssl_library" ] -+ } - } - - if (build_libsrtp_tests) { -END -) - -echo "$PATCH" | patch -p1 diff --git a/third-party/webrtc/webrtc-ios b/third-party/webrtc/webrtc-ios index 625c777c8a..cf2c8a8364 160000 --- a/third-party/webrtc/webrtc-ios +++ b/third-party/webrtc/webrtc-ios @@ -1 +1 @@ -Subproject commit 625c777c8ad260cbc4bf9a6409d5b4382d92914b +Subproject commit cf2c8a8364b4cfda7ea9eb448671033351851130