diff --git a/submodules/AnimationUI/Sources/AnimationNode.swift b/submodules/AnimationUI/Sources/AnimationNode.swift index 99e485f8ce..8fc72d44c1 100644 --- a/submodules/AnimationUI/Sources/AnimationNode.swift +++ b/submodules/AnimationUI/Sources/AnimationNode.swift @@ -206,7 +206,7 @@ public final class AnimationNode: ASDisplayNode { } public func preferredSize() -> CGSize? { - if let animationView = animationView(), let animation = animationView.animation { + if let animationView = self.animationView(), let animation = animationView.animation { return CGSize(width: animation.size.width * self.scale, height: animation.size.height * self.scale) } else { return nil diff --git a/submodules/Camera/Sources/Camera.swift b/submodules/Camera/Sources/Camera.swift index eef54e9abb..8cb9ac59e3 100644 --- a/submodules/Camera/Sources/Camera.swift +++ b/submodules/Camera/Sources/Camera.swift @@ -604,6 +604,10 @@ private final class CameraContext { return self.audioLevelPipe.signal() } + var transitionImage: Signal { + return .single(self.mainDeviceContext?.output.transitionImage) + } + @objc private func sessionInterruptionEnded(notification: NSNotification) { } @@ -969,6 +973,20 @@ public final class Camera { } } + public var transitionImage: Signal { + return Signal { subscriber in + let disposable = MetaDisposable() + self.queue.async { + if let context = self.contextRef?.takeUnretainedValue() { + disposable.set(context.transitionImage.start(next: { codes in + subscriber.putNext(codes) + })) + } + } + return disposable + } + } + public enum ModeChange: Equatable { case none case position diff --git a/submodules/Camera/Sources/CameraOutput.swift b/submodules/Camera/Sources/CameraOutput.swift index 618cc71de7..0be54d8bfd 100644 --- a/submodules/Camera/Sources/CameraOutput.swift +++ b/submodules/Camera/Sources/CameraOutput.swift @@ -399,6 +399,10 @@ final class CameraOutput: NSObject { } } + var transitionImage: UIImage? { + return self.videoRecorder?.transitionImage + } + private weak var masterOutput: CameraOutput? func processVideoRecording(_ sampleBuffer: CMSampleBuffer, fromAdditionalOutput: Bool) { if let videoRecorder = self.videoRecorder, videoRecorder.isRecording { diff --git a/submodules/Camera/Sources/CameraRoundVideoFilter.swift b/submodules/Camera/Sources/CameraRoundVideoFilter.swift index e41911070d..801f853291 100644 --- a/submodules/Camera/Sources/CameraRoundVideoFilter.swift +++ b/submodules/Camera/Sources/CameraRoundVideoFilter.swift @@ -99,6 +99,8 @@ class CameraRoundVideoFilter { private(set) var isPrepared = false + let semaphore = DispatchSemaphore(value: 1) + init(ciContext: CIContext) { self.ciContext = ciContext } @@ -141,6 +143,8 @@ class CameraRoundVideoFilter { } func render(pixelBuffer: CVPixelBuffer, mirror: Bool) -> CVPixelBuffer? { + self.semaphore.wait() + guard let resizeFilter = self.resizeFilter, let compositeFilter = self.compositeFilter, self.isPrepared else { return nil } @@ -176,6 +180,9 @@ class CameraRoundVideoFilter { } self.ciContext.render(finalImage, to: outputPixelBuffer, bounds: CGRect(origin: .zero, size: CGSize(width: 400, height: 400)), colorSpace: outputColorSpace) + + self.semaphore.signal() + return outputPixelBuffer } } diff --git a/submodules/Camera/Sources/VideoRecorder.swift b/submodules/Camera/Sources/VideoRecorder.swift index 9215ea20b5..7f9bcb05f3 100644 --- a/submodules/Camera/Sources/VideoRecorder.swift +++ b/submodules/Camera/Sources/VideoRecorder.swift @@ -35,7 +35,7 @@ private final class VideoRecorderImpl { private var audioInput: AVAssetWriterInput? private let ciContext: CIContext - private var transitionImage: UIImage? + fileprivate var transitionImage: UIImage? private var savedTransitionImage = false private var pendingAudioSampleBuffers: [CMSampleBuffer] = [] @@ -533,4 +533,8 @@ public final class VideoRecorder { } } } + + var transitionImage: UIImage? { + return self.impl.transitionImage + } } diff --git a/submodules/TelegramUI/Components/MediaEditor/Sources/MediaEditor.swift b/submodules/TelegramUI/Components/MediaEditor/Sources/MediaEditor.swift index b83e888f80..6dba11a9ea 100644 --- a/submodules/TelegramUI/Components/MediaEditor/Sources/MediaEditor.swift +++ b/submodules/TelegramUI/Components/MediaEditor/Sources/MediaEditor.swift @@ -1736,7 +1736,7 @@ public final class MediaEditor { } } -public func videoFrames(asset: AVAsset, count: Int, mirror: Bool = false) -> Signal<([UIImage], Double), NoError> { +public func videoFrames(asset: AVAsset?, count: Int, initialPlaceholder: UIImage? = nil, initialTimestamp: Double? = nil, mirror: Bool = false) -> Signal<([UIImage], Double), NoError> { func blurredImage(_ image: UIImage) -> UIImage? { guard let image = image.cgImage else { return nil @@ -1769,55 +1769,82 @@ public func videoFrames(asset: AVAsset, count: Int, mirror: Bool = false) -> Sig guard count > 0 else { return .complete() } - let scale = UIScreen.main.scale - let imageGenerator = AVAssetImageGenerator(asset: asset) - imageGenerator.maximumSize = CGSize(width: 48.0 * scale, height: 36.0 * scale) - imageGenerator.appliesPreferredTrackTransform = true - imageGenerator.requestedTimeToleranceBefore = .zero - imageGenerator.requestedTimeToleranceAfter = .zero - + + var firstFrame: UIImage - if let cgImage = try? imageGenerator.copyCGImage(at: .zero, actualTime: nil) { - firstFrame = UIImage(cgImage: cgImage) - if let blurred = blurredImage(firstFrame) { + + var imageGenerator: AVAssetImageGenerator? + if let asset { + let scale = UIScreen.main.scale + + imageGenerator = AVAssetImageGenerator(asset: asset) + imageGenerator?.maximumSize = CGSize(width: 48.0 * scale, height: 36.0 * scale) + imageGenerator?.appliesPreferredTrackTransform = true + imageGenerator?.requestedTimeToleranceBefore = .zero + imageGenerator?.requestedTimeToleranceAfter = .zero + } + + if var initialPlaceholder { + initialPlaceholder = generateScaledImage(image: initialPlaceholder, size: initialPlaceholder.size.aspectFitted(CGSize(width: 144.0, height: 144.0)), scale: 1.0)! + if let blurred = blurredImage(initialPlaceholder) { firstFrame = blurred + } else { + firstFrame = initialPlaceholder + } + } else if let imageGenerator { + if let cgImage = try? imageGenerator.copyCGImage(at: .zero, actualTime: nil) { + firstFrame = UIImage(cgImage: cgImage) + if let blurred = blurredImage(firstFrame) { + firstFrame = blurred + } + } else { + firstFrame = generateSingleColorImage(size: CGSize(width: 24.0, height: 36.0), color: .black)! } } else { firstFrame = generateSingleColorImage(size: CGSize(width: 24.0, height: 36.0), color: .black)! } - return Signal { subscriber in - subscriber.putNext((Array(repeating: firstFrame, count: count), CACurrentMediaTime())) - - var timestamps: [NSValue] = [] - let duration = asset.duration.seconds - let interval = duration / Double(count) - for i in 0 ..< count { - timestamps.append(NSValue(time: CMTime(seconds: Double(i) * interval, preferredTimescale: CMTimeScale(1000)))) - } - - var updatedFrames: [UIImage] = [] - imageGenerator.generateCGImagesAsynchronously(forTimes: timestamps) { _, image, _, _, _ in - if let image { - updatedFrames.append(UIImage(cgImage: image, scale: 1.0, orientation: mirror ? .upMirrored : .up)) - if updatedFrames.count == count { - subscriber.putNext((updatedFrames, CACurrentMediaTime())) - subscriber.putCompletion() - } else { - var tempFrames = updatedFrames - for _ in 0 ..< count - updatedFrames.count { - tempFrames.append(firstFrame) + + if let asset { + return Signal { subscriber in + subscriber.putNext((Array(repeating: firstFrame, count: count), initialTimestamp ?? CACurrentMediaTime())) + + var timestamps: [NSValue] = [] + let duration = asset.duration.seconds + let interval = duration / Double(count) + for i in 0 ..< count { + timestamps.append(NSValue(time: CMTime(seconds: Double(i) * interval, preferredTimescale: CMTimeScale(1000)))) + } + + var updatedFrames: [UIImage] = [] + imageGenerator?.generateCGImagesAsynchronously(forTimes: timestamps) { _, image, _, _, _ in + if let image { + updatedFrames.append(UIImage(cgImage: image, scale: 1.0, orientation: mirror ? .upMirrored : .up)) + if updatedFrames.count == count { + subscriber.putNext((updatedFrames, CACurrentMediaTime())) + subscriber.putCompletion() + } else { + var tempFrames = updatedFrames + for _ in 0 ..< count - updatedFrames.count { + tempFrames.append(firstFrame) + } + subscriber.putNext((tempFrames, CACurrentMediaTime())) + } + } else { + if let previous = updatedFrames.last { + updatedFrames.append(previous) } - subscriber.putNext((tempFrames, CACurrentMediaTime())) - } - } else { - if let previous = updatedFrames.last { - updatedFrames.append(previous) } } + + return ActionDisposable { + imageGenerator?.cancelAllCGImageGeneration() + } } - - return ActionDisposable { - imageGenerator.cancelAllCGImageGeneration() + } else { + var frames: [UIImage] = [] + for _ in 0 ..< count { + frames.append(firstFrame) } + return .single((frames, CACurrentMediaTime())) } } diff --git a/submodules/TelegramUI/Components/MediaEditorScreen/Sources/MediaEditorScreen.swift b/submodules/TelegramUI/Components/MediaEditorScreen/Sources/MediaEditorScreen.swift index ab48eb22a1..1d06352aa7 100644 --- a/submodules/TelegramUI/Components/MediaEditorScreen/Sources/MediaEditorScreen.swift +++ b/submodules/TelegramUI/Components/MediaEditorScreen/Sources/MediaEditorScreen.swift @@ -1361,6 +1361,7 @@ final class MediaEditorScreenComponent: Component { component: AnyComponent(MediaScrubberComponent( context: component.context, style: .editor, + theme: environment.theme, generationTimestamp: playerState.generationTimestamp, position: playerState.position, minDuration: minDuration, diff --git a/submodules/TelegramUI/Components/MediaScrubberComponent/Sources/MediaScrubberComponent.swift b/submodules/TelegramUI/Components/MediaScrubberComponent/Sources/MediaScrubberComponent.swift index a8929b6c1f..891bfa7edf 100644 --- a/submodules/TelegramUI/Components/MediaScrubberComponent/Sources/MediaScrubberComponent.swift +++ b/submodules/TelegramUI/Components/MediaScrubberComponent/Sources/MediaScrubberComponent.swift @@ -74,6 +74,7 @@ public final class MediaScrubberComponent: Component { let context: AccountContext let style: Style + let theme: PresentationTheme let generationTimestamp: Double @@ -92,6 +93,7 @@ public final class MediaScrubberComponent: Component { public init( context: AccountContext, style: Style, + theme: PresentationTheme, generationTimestamp: Double, position: Double, minDuration: Double, @@ -105,6 +107,7 @@ public final class MediaScrubberComponent: Component { ) { self.context = context self.style = style + self.theme = theme self.generationTimestamp = generationTimestamp self.position = position self.minDuration = minDuration @@ -121,6 +124,9 @@ public final class MediaScrubberComponent: Component { if lhs.context !== rhs.context { return false } + if lhs.theme !== rhs.theme { + return false + } if lhs.generationTimestamp != rhs.generationTimestamp { return false } @@ -524,6 +530,7 @@ public final class MediaScrubberComponent: Component { self.trimView.isHollow = self.selectedTrackId != lowestVideoId || self.isAudioOnly let (leftHandleFrame, rightHandleFrame) = self.trimView.update( style: component.style, + theme: component.theme, visualInsets: trimViewVisualInsets, scrubberSize: CGSize(width: trackViewWidth, height: fullTrackHeight), duration: mainTrimDuration, @@ -537,6 +544,7 @@ public final class MediaScrubberComponent: Component { let (ghostLeftHandleFrame, ghostRightHandleFrame) = self.ghostTrimView.update( style: component.style, + theme: component.theme, visualInsets: .zero, scrubberSize: CGSize(width: scrubberSize.width, height: collapsedTrackHeight), duration: self.duration, @@ -1300,6 +1308,7 @@ private class TrimView: UIView { func update( style: MediaScrubberComponent.Style, + theme: PresentationTheme, visualInsets: UIEdgeInsets, scrubberSize: CGSize, duration: Double, @@ -1359,8 +1368,8 @@ private class TrimView: UIView { effectiveHandleWidth = 16.0 fullTrackHeight = 33.0 capsuleOffset = 8.0 - color = UIColor(rgb: 0x3478f6) - highlightColor = UIColor(rgb: 0x3478f6) + color = theme.chat.inputPanel.panelControlAccentColor + highlightColor = theme.chat.inputPanel.panelControlAccentColor if isFirstTime { let handleImage = generateImage(CGSize(width: effectiveHandleWidth, height: fullTrackHeight), rotatedContext: { size, context in diff --git a/submodules/TelegramUI/Components/VideoMessageCameraScreen/Sources/VideoMessageCameraScreen.swift b/submodules/TelegramUI/Components/VideoMessageCameraScreen/Sources/VideoMessageCameraScreen.swift index 8ca4d566c8..7040046c32 100644 --- a/submodules/TelegramUI/Components/VideoMessageCameraScreen/Sources/VideoMessageCameraScreen.swift +++ b/submodules/TelegramUI/Components/VideoMessageCameraScreen/Sources/VideoMessageCameraScreen.swift @@ -173,7 +173,10 @@ private final class CameraScreenComponent: CombinedComponent { super.init() self.startRecording.connect({ [weak self] _ in - self?.startVideoRecording(pressing: true) + if let self, let controller = getController() { + self.startVideoRecording(pressing: !controller.scheduledLock) + controller.scheduledLock = false + } }) self.stopRecording.connect({ [weak self] _ in self?.stopVideoRecording() @@ -508,6 +511,8 @@ public class VideoMessageCameraScreen: ViewController { } var previewStatePromise = Promise() + var transitioningToPreview = false + init(controller: VideoMessageCameraScreen) { self.controller = controller self.context = controller.context @@ -736,16 +741,12 @@ public class VideoMessageCameraScreen: ViewController { self.results.append(result) self.resultsPipe.putNext(result) + self.transitioningToPreview = false + let composition = composition(with: self.results) controller.updatePreviewState({ _ in return PreviewState(composition: composition, trimRange: nil) }, transition: .spring(duration: 0.4)) - -// #if DEBUG -// if case let .video(video) = result { -// self.debugSaveResult(path: video.videoPath) -// } -// #endif } private func debugSaveResult(path: String) { @@ -895,7 +896,7 @@ public class VideoMessageCameraScreen: ViewController { CameraScreenComponent( context: self.context, cameraState: self.cameraState, - isPreviewing: self.previewState != nil, + isPreviewing: self.previewState != nil || self.transitioningToPreview, getController: { [weak self] in return self?.controller }, @@ -1065,30 +1066,63 @@ public class VideoMessageCameraScreen: ViewController { public func takenRecordedData() -> Signal { let previewState = self.node.previewStatePromise.get() - return self.currentResults - |> take(1) - |> mapToSignal { results in - var totalDuration: Double = 0.0 - for result in results { - if case let .video(video) = result { - totalDuration += video.duration - } + let count = 12 + + let initialPlaceholder: Signal + if let firstResult = self.node.results.first { + if case let .video(video) = firstResult { + initialPlaceholder = .single(video.thumbnail) + } else { + initialPlaceholder = .single(nil) } - let composition = composition(with: results) - return combineLatest( - queue: Queue.mainQueue(), - videoFrames(asset: composition, count: 12), - previewState - ) - |> map { framesAndUpdateTimestamp, previewState in + } else { + initialPlaceholder = self.camera?.transitionImage ?? .single(nil) + } + + let immediateResult: Signal = initialPlaceholder + |> take(1) + |> mapToSignal { initialPlaceholder in + return videoFrames(asset: nil, count: count, initialPlaceholder: initialPlaceholder) + |> map { framesAndUpdateTimestamp in return RecordedVideoData( - duration: totalDuration, + duration: 1.0, frames: framesAndUpdateTimestamp.0, framesUpdateTimestamp: framesAndUpdateTimestamp.1, - trimRange: previewState?.trimRange + trimRange: nil ) } } + + return immediateResult + |> mapToSignal { immediateResult in + return .single(immediateResult) + |> then( + self.currentResults + |> take(1) + |> mapToSignal { results in + var totalDuration: Double = 0.0 + for result in results { + if case let .video(video) = result { + totalDuration += video.duration + } + } + let composition = composition(with: results) + return combineLatest( + queue: Queue.mainQueue(), + videoFrames(asset: composition, count: count, initialTimestamp: immediateResult?.framesUpdateTimestamp), + previewState + ) + |> map { framesAndUpdateTimestamp, previewState in + return RecordedVideoData( + duration: totalDuration, + frames: framesAndUpdateTimestamp.0, + framesUpdateTimestamp: framesAndUpdateTimestamp.1, + trimRange: previewState?.trimRange + ) + } + } + ) + } } public init( @@ -1219,13 +1253,21 @@ public class VideoMessageCameraScreen: ViewController { private var waitingForNextResult = false public func stopVideoRecording() -> Bool { self.waitingForNextResult = true + self.node.transitioningToPreview = true + self.node.requestUpdateLayout(transition: .spring(duration: 0.4)) + self.node.stopRecording.invoke(Void()) return true } - + + fileprivate var scheduledLock = false public func lockVideoRecording() { - self.updateCameraState({ $0.updatedRecording(.handsFree) }, transition: .spring(duration: 0.4)) + if case .none = self.cameraState.recording { + self.scheduledLock = true + } else { + self.updateCameraState({ $0.updatedRecording(.handsFree) }, transition: .spring(duration: 0.4)) + } self.node.maybePresentViewOnceTooltip() } diff --git a/submodules/TelegramUI/Sources/ChatRecordingPreviewInputPanelNode.swift b/submodules/TelegramUI/Sources/ChatRecordingPreviewInputPanelNode.swift index d437a299b7..4f2e3d2935 100644 --- a/submodules/TelegramUI/Sources/ChatRecordingPreviewInputPanelNode.swift +++ b/submodules/TelegramUI/Sources/ChatRecordingPreviewInputPanelNode.swift @@ -304,6 +304,7 @@ final class ChatRecordingPreviewInputPanelNode: ChatInputPanelNode { MediaScrubberComponent( context: context, style: .videoMessage, + theme: interfaceState.theme, generationTimestamp: 0, position: 0, minDuration: 1.0, diff --git a/submodules/TelegramUI/Sources/ChatTextInputPanelNode.swift b/submodules/TelegramUI/Sources/ChatTextInputPanelNode.swift index 42a83038ef..28acf63db3 100644 --- a/submodules/TelegramUI/Sources/ChatTextInputPanelNode.swift +++ b/submodules/TelegramUI/Sources/ChatTextInputPanelNode.swift @@ -2094,7 +2094,6 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate, Ch size: audioRecordingCancelIndicator.bounds.size) audioRecordingCancelIndicator.frame = audioRecordingCancelIndicatorFrame if self.actionButtons.micButton.cancelTranslation > cancelTransformThreshold { - //let progress = 1 - (self.actionButtons.micButton.cancelTranslation - cancelTransformThreshold) / 80 let progress: CGFloat = max(0.0, min(1.0, (audioRecordingCancelIndicatorFrame.minX - 100.0) / 10.0)) audioRecordingCancelIndicator.alpha = progress } else { @@ -2145,6 +2144,8 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate, Ch audioRecordingTimeNode.layer.animateAlpha(from: 0, to: 1, duration: 0.5, timingFunction: kCAMediaTimingFunctionSpring) } + let dotFrame = CGRect(origin: CGPoint(x: leftInset + 2.0 - UIScreenPixel, y: audioRecordingTimeNode.frame.midY - 20), size: CGSize(width: 40.0, height: 40)) + var animateDotAppearing = false let audioRecordingDotNode: AnimationNode if let currentAudioRecordingDotNode = self.audioRecordingDotNode, !currentAudioRecordingDotNode.didPlay { @@ -2152,9 +2153,12 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate, Ch } else { self.audioRecordingDotNode?.removeFromSupernode() audioRecordingDotNode = AnimationNode(animation: "BinRed") + self.audioRecordingDotNode = audioRecordingDotNode self.audioRecordingDotNodeDismissed = false self.clippingNode.insertSubnode(audioRecordingDotNode, belowSubnode: self.menuButton) + audioRecordingDotNode.frame = dotFrame + self.animatingBinNode?.removeFromSupernode() self.animatingBinNode = nil } @@ -2163,10 +2167,14 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate, Ch if let mediaRecordingState = mediaRecordingState, case .waitingForPreview = mediaRecordingState { animateDotAppearing = false } + + audioRecordingDotNode.bounds = CGRect(origin: .zero, size: dotFrame.size) + audioRecordingDotNode.position = dotFrame.center - audioRecordingDotNode.frame = CGRect(origin: CGPoint(x: leftInset + 2.0 - UIScreenPixel, y: audioRecordingTimeNode.frame.midY - 20), size: CGSize(width: 40.0, height: 40)) - if animateDotAppearing || animateCancelSlideIn { - audioRecordingDotNode.layer.animateScale(from: 0.3, to: 1, duration: 0.15, delay: 0, removeOnCompletion: false) + if animateDotAppearing { + Queue.mainQueue().justDispatch { + audioRecordingDotNode.layer.animateScale(from: 0.3, to: 1, duration: 0.15, delay: 0, removeOnCompletion: false) + } audioRecordingTimeNode.started = { [weak audioRecordingDotNode] in if let audioRecordingDotNode = audioRecordingDotNode, audioRecordingDotNode.layer.animation(forKey: "recording") == nil { audioRecordingDotNode.layer.animateAlpha(from: CGFloat(audioRecordingDotNode.layer.presentation()?.opacity ?? 0), to: 1, duration: 0.15, delay: 0, completion: { [weak audioRecordingDotNode] finished in