diff --git a/submodules/LegacyComponents/PublicHeaders/LegacyComponents/TGVideoMessageCaptureController.h b/submodules/LegacyComponents/PublicHeaders/LegacyComponents/TGVideoMessageCaptureController.h index 0485923d6f..eb81ffdcd6 100644 --- a/submodules/LegacyComponents/PublicHeaders/LegacyComponents/TGVideoMessageCaptureController.h +++ b/submodules/LegacyComponents/PublicHeaders/LegacyComponents/TGVideoMessageCaptureController.h @@ -19,6 +19,7 @@ @property (nonatomic, copy) id (^requestActivityHolder)(); @property (nonatomic, copy) void (^micLevel)(CGFloat level); +@property (nonatomic, copy) void (^onDuration)(NSTimeInterval duration); @property (nonatomic, copy) void(^finishedWithVideo)(NSURL *videoURL, UIImage *previewImage, NSUInteger fileSize, NSTimeInterval duration, CGSize dimensions, id liveUploadData, TGVideoEditAdjustments *adjustments, bool, int32_t); @property (nonatomic, copy) void(^onDismiss)(bool isAuto); @property (nonatomic, copy) void(^onStop)(void); diff --git a/submodules/LegacyComponents/Sources/TGVideoMessageCaptureController.m b/submodules/LegacyComponents/Sources/TGVideoMessageCaptureController.m index 329de8d797..4052337a2c 100644 --- a/submodules/LegacyComponents/Sources/TGVideoMessageCaptureController.m +++ b/submodules/LegacyComponents/Sources/TGVideoMessageCaptureController.m @@ -683,6 +683,9 @@ typedef enum if (!_capturePipeline.isRecording) return false; + if (_capturePipeline.videoDuration < 0.33) + return false; + if ([self.view.window isKindOfClass:[TGVideoMessageCaptureControllerWindow class]]) { ((TGVideoMessageCaptureControllerWindow *)self.view.window).locked = false; } @@ -1045,6 +1048,7 @@ typedef enum { [_controlsView recordingStarted]; [_controlsView setDurationString:@"0:00,00"]; + self.onDuration(0); _audioRecordingDurationSeconds = 0; _audioRecordingDurationMilliseconds = 0.0; @@ -1078,6 +1082,7 @@ typedef enum } else { + self.onDuration(recordingDuration); _audioRecordingDurationSeconds = currentDurationSeconds; _audioRecordingDurationMilliseconds = currentDurationMilliseconds; [_controlsView setDurationString:[[NSString alloc] initWithFormat:@"%d:%02d,%02d", (int)_audioRecordingDurationSeconds / 60, (int)_audioRecordingDurationSeconds % 60, (int)_audioRecordingDurationMilliseconds]]; diff --git a/submodules/LegacyComponents/Sources/TGVideoMessageControls.m b/submodules/LegacyComponents/Sources/TGVideoMessageControls.m index 4062339754..c4b6fc917c 100644 --- a/submodules/LegacyComponents/Sources/TGVideoMessageControls.m +++ b/submodules/LegacyComponents/Sources/TGVideoMessageControls.m @@ -160,7 +160,7 @@ static CGRect viewFrame(UIView *view) CGRect slideToCancelArrowFrame = viewFrame(_slideToCancelArrow); setViewFrame(_slideToCancelArrow, CGRectMake(CGFloor((self.frame.size.width - _slideToCancelLabel.frame.size.width) / 2.0f) - slideToCancelArrowFrame.size.width - 7.0f, CGFloor((self.frame.size.height - _slideToCancelLabel.frame.size.height) / 2.0f), slideToCancelArrowFrame.size.width, slideToCancelArrowFrame.size.height)); _slideToCancelArrow.alpha = 0.0f; - [self addSubview:_slideToCancelArrow]; +// [self addSubview:_slideToCancelArrow]; _slideToCancelArrow.transform = CGAffineTransformMakeTranslation(hideLeftOffset, 0.0f); _slideToCancelLabel.transform = CGAffineTransformMakeTranslation(hideLeftOffset, 0.0f); @@ -185,11 +185,11 @@ static CGRect viewFrame(UIView *view) _recordDurationLabel.text = @"0:00,00"; if (_recordIndicatorView.superview == nil) - [self addSubview:_recordIndicatorView]; +// [self addSubview:_recordIndicatorView]; [_recordIndicatorView.layer removeAllAnimations]; if (_recordDurationLabel.superview == nil) - [self addSubview:_recordDurationLabel]; +// [self addSubview:_recordDurationLabel]; [_recordDurationLabel.layer removeAllAnimations]; _slideToCancelArrow.transform = CGAffineTransformMakeTranslation(300.0f, 0.0f); @@ -211,7 +211,7 @@ static CGRect viewFrame(UIView *view) if (!isAlreadyLocked) { if (_slideToCancelLabel.superview == nil) - [self addSubview:_slideToCancelLabel]; +// [self addSubview:_slideToCancelLabel]; [UIView animateWithDuration:0.18 delay:0.0 options:animationCurveOption animations:^ { @@ -445,8 +445,7 @@ static CGRect viewFrame(UIView *view) [UIView animateWithDuration:0.2 delay:0.0 options:UIViewAnimationOptionBeginFromCurrentState | animationCurveOption animations:^ { - CGAffineTransform transform = CGAffineTransformMakeTranslation(0.0f, -22.0f); - transform = CGAffineTransformScale(transform, 0.25f, 0.25f); + CGAffineTransform transform = CGAffineTransformScale(transform, 0.25f, 0.25f); _cancelButton.transform = transform; _cancelButton.alpha = 0.0f; } completion:nil]; diff --git a/submodules/TelegramUI/Sources/BlobView.swift b/submodules/TelegramUI/Sources/BlobView.swift index 37b84db05a..38e831f16d 100644 --- a/submodules/TelegramUI/Sources/BlobView.swift +++ b/submodules/TelegramUI/Sources/BlobView.swift @@ -16,27 +16,27 @@ final class VoiceBlobView: UIView, TGModernConversationInputMicButtonDecoration maxRandomness: 0.5, minSpeed: 0.2, maxSpeed: 0.6, - minScale: 0.56, - maxScale: 0.56, - scaleSpeed: 0 + minScale: 0.45, + maxScale: 0.55, + scaleSpeed: 0.2 ) private let mediumBlob = BlobView( pointsCount: 8, minRandomness: 1, - maxRandomness: 2, + maxRandomness: 1, minSpeed: 3, - maxSpeed: 8, - minScale: 0.67, - maxScale: 0.8, + maxSpeed: 7, + minScale: 0.55, + maxScale: 0.9, scaleSpeed: 0.2 ) private let bigBlob = BlobView( pointsCount: 8, minRandomness: 1, - maxRandomness: 2, + maxRandomness: 1, minSpeed: 3, - maxSpeed: 8, - minScale: 0.67, + maxSpeed: 7, + minScale: 0.55, maxScale: 1, scaleSpeed: 0.2 ) @@ -105,11 +105,16 @@ final class BlobView: UIView { didSet { speedLevel = max(level, speedLevel) scaleLevel = max(level, scaleLevel) + + if abs(scaleLevel - lastScaleLevel) > 0.4 { + animateToNewScale() + } } } private var speedLevel: CGFloat = 0 private var scaleLevel: CGFloat = 0 + private var lastScaleLevel: CGFloat = 0 private let shapeLayer: CAShapeLayer = { let layer = CAShapeLayer() @@ -183,12 +188,15 @@ final class BlobView: UIView { } func animateToNewScale() { + let isDownscale = lastScaleLevel > scaleLevel + lastScaleLevel = scaleLevel + shapeLayer.pop_removeAnimation(forKey: "scale") let currentScale = minScale + (maxScale - minScale) * scaleLevel let scaleAnimation = POPBasicAnimation(propertyNamed: kPOPLayerScaleXY)! scaleAnimation.toValue = CGPoint(x: currentScale, y: currentScale) - scaleAnimation.duration = CFTimeInterval(scaleSpeed) + scaleAnimation.duration = isDownscale ? 0.45 : CFTimeInterval(scaleSpeed) scaleAnimation.completionBlock = { [weak self] animation, finished in if finished { self?.animateToNewScale() diff --git a/submodules/TelegramUI/Sources/ChatController.swift b/submodules/TelegramUI/Sources/ChatController.swift index 747bca7a88..cafd8c180f 100644 --- a/submodules/TelegramUI/Sources/ChatController.swift +++ b/submodules/TelegramUI/Sources/ChatController.swift @@ -2557,6 +2557,8 @@ public final class ChatControllerImpl: TelegramBaseController, ChatController, G videoRecorder.onDismiss = { if let strongSelf = self { + strongSelf.beginMediaRecordingRequestId += 1 + strongSelf.lockMediaRecordingRequestId = nil strongSelf.videoRecorder.set(.single(nil)) } } @@ -7289,9 +7291,13 @@ public final class ChatControllerImpl: TelegramBaseController, ChatController, G self.audioRecorder.set(.single(nil)) } else if let videoRecorderValue = self.videoRecorderValue { if case .send = updatedAction { + self.chatDisplayNode.updateRecordedMediaDeleted(false) videoRecorderValue.completeVideo() self.videoRecorder.set(.single(nil)) } else { + if case .dismiss = updatedAction { + self.chatDisplayNode.updateRecordedMediaDeleted(true) + } if case .preview = updatedAction, videoRecorderValue.stopVideo() { self.updateChatPresentationInterfaceState(animated: true, interactive: true, { $0.updatedInputTextPanelState { panelState in diff --git a/submodules/TelegramUI/Sources/ChatTextInputAudioRecordingTimeNode.swift b/submodules/TelegramUI/Sources/ChatTextInputAudioRecordingTimeNode.swift index b3c17a2531..c4a015cc72 100644 --- a/submodules/TelegramUI/Sources/ChatTextInputAudioRecordingTimeNode.swift +++ b/submodules/TelegramUI/Sources/ChatTextInputAudioRecordingTimeNode.swift @@ -53,6 +53,28 @@ final class ChatTextInputAudioRecordingTimeNode: ASDisplayNode { } } + private var durationDisposable: MetaDisposable? + + var videoRecordingStatus: InstantVideoControllerRecordingStatus? { + didSet { + if self.videoRecordingStatus !== oldValue { + if self.durationDisposable == nil { + durationDisposable = MetaDisposable() + } + + if let videoRecordingStatus = self.videoRecordingStatus { + self.durationDisposable?.set(videoRecordingStatus.duration.start(next: { [weak self] duration in + Queue.mainQueue().async { [weak self] in + self?.timestamp = duration + } + })) + } else if self.audioRecorder == nil { + self.durationDisposable?.set(nil) + } + } + } + } + private var theme: PresentationTheme init(theme: PresentationTheme) { diff --git a/submodules/TelegramUI/Sources/ChatTextInputMediaRecordingButton.swift b/submodules/TelegramUI/Sources/ChatTextInputMediaRecordingButton.swift index 4077474668..79621153b7 100644 --- a/submodules/TelegramUI/Sources/ChatTextInputMediaRecordingButton.swift +++ b/submodules/TelegramUI/Sources/ChatTextInputMediaRecordingButton.swift @@ -238,7 +238,7 @@ final class ChatTextInputMediaRecordingButton: TGModernConversationInputMicButto } private lazy var micDecoration: (UIView & TGModernConversationInputMicButtonDecoration) = { - let blobView = VoiceBlobView(frame: CGRect(origin: CGPoint(), size: CGSize(width: 180.0, height: 180.0))) + let blobView = VoiceBlobView(frame: CGRect(origin: CGPoint(), size: CGSize(width: 220.0, height: 220.0))) blobView.setColor(self.theme.chat.inputPanel.actionControlFillColor) return blobView }() diff --git a/submodules/TelegramUI/Sources/ChatTextInputPanelNode.swift b/submodules/TelegramUI/Sources/ChatTextInputPanelNode.swift index 4f5421fc17..6647a84968 100644 --- a/submodules/TelegramUI/Sources/ChatTextInputPanelNode.swift +++ b/submodules/TelegramUI/Sources/ChatTextInputPanelNode.swift @@ -908,6 +908,68 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { var audioRecordingItemsAlpha: CGFloat = 1 if let mediaRecordingState = interfaceState.inputTextPanelState.mediaRecordingState { audioRecordingItemsAlpha = 0 + + let audioRecordingInfoContainerNode: ASDisplayNode + if let currentAudioRecordingInfoContainerNode = self.audioRecordingInfoContainerNode { + audioRecordingInfoContainerNode = currentAudioRecordingInfoContainerNode + } else { + audioRecordingInfoContainerNode = ASDisplayNode() + self.audioRecordingInfoContainerNode = audioRecordingInfoContainerNode + self.insertSubnode(audioRecordingInfoContainerNode, at: 0) + } + + var animateTimeSlideIn = false + let audioRecordingTimeNode: ChatTextInputAudioRecordingTimeNode + if let currentAudioRecordingTimeNode = self.audioRecordingTimeNode { + audioRecordingTimeNode = currentAudioRecordingTimeNode + } else { + audioRecordingTimeNode = ChatTextInputAudioRecordingTimeNode(theme: interfaceState.theme) + self.audioRecordingTimeNode = audioRecordingTimeNode + audioRecordingInfoContainerNode.addSubnode(audioRecordingTimeNode) + + if transition.isAnimated { + animateTimeSlideIn = true + } + } + + + var animateCancelSlideIn = false + let audioRecordingCancelIndicator: ChatTextInputAudioRecordingCancelIndicator + if let currentAudioRecordingCancelIndicator = self.audioRecordingCancelIndicator { + audioRecordingCancelIndicator = currentAudioRecordingCancelIndicator + } else { + animateCancelSlideIn = transition.isAnimated + + audioRecordingCancelIndicator = ChatTextInputAudioRecordingCancelIndicator(theme: interfaceState.theme, strings: interfaceState.strings, cancel: { [weak self] in + self?.interfaceInteraction?.finishMediaRecording(.dismiss) + }) + self.audioRecordingCancelIndicator = audioRecordingCancelIndicator + self.insertSubnode(audioRecordingCancelIndicator, at: 0) + } + + let isLocked = mediaRecordingState.isLocked + var hideInfo = false + + switch mediaRecordingState { + case let .audio(recorder, _): + self.actionButtons.micButton.audioRecorder = recorder + audioRecordingTimeNode.audioRecorder = recorder + case let .video(status, _): + switch status { + case let .recording(recordingStatus): + audioRecordingTimeNode.videoRecordingStatus = recordingStatus + self.actionButtons.micButton.videoRecordingStatus = recordingStatus + if isLocked { + audioRecordingCancelIndicator.layer.animateAlpha(from: audioRecordingCancelIndicator.alpha, to: 0, duration: 0.15, delay: 0, removeOnCompletion: false) + } + case .editing: + audioRecordingTimeNode.videoRecordingStatus = nil + self.actionButtons.micButton.videoRecordingStatus = nil + hideMicButton = true + hideInfo = true + } + } + transition.updateAlpha(layer: self.textInputBackgroundNode.layer, alpha: 0.0) if let textInputNode = self.textInputNode { transition.updateAlpha(node: textInputNode, alpha: 0.0) @@ -916,150 +978,105 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { transition.updateAlpha(layer: button.layer, alpha: 0.0) } - switch mediaRecordingState { - case let .audio(recorder, isLocked): - self.actionButtons.micButton.audioRecorder = recorder - let audioRecordingInfoContainerNode: ASDisplayNode - if let currentAudioRecordingInfoContainerNode = self.audioRecordingInfoContainerNode { - audioRecordingInfoContainerNode = currentAudioRecordingInfoContainerNode - } else { - audioRecordingInfoContainerNode = ASDisplayNode() - self.audioRecordingInfoContainerNode = audioRecordingInfoContainerNode - self.insertSubnode(audioRecordingInfoContainerNode, at: 0) - } - - var animateCancelSlideIn = false - let audioRecordingCancelIndicator: ChatTextInputAudioRecordingCancelIndicator - if let currentAudioRecordingCancelIndicator = self.audioRecordingCancelIndicator { - audioRecordingCancelIndicator = currentAudioRecordingCancelIndicator - } else { - animateCancelSlideIn = transition.isAnimated + let cancelTransformThreshold: CGFloat = 8.0 + + let indicatorTranslation = max(0.0, self.actionButtons.micButton.cancelTranslation - cancelTransformThreshold) + + audioRecordingCancelIndicator.frame = CGRect( + origin: CGPoint( + x: leftInset + floor((baseWidth - audioRecordingCancelIndicator.bounds.size.width - indicatorTranslation) / 2.0), + y: panelHeight - minimalHeight + floor((minimalHeight - audioRecordingCancelIndicator.bounds.size.height) / 2.0)), + size: audioRecordingCancelIndicator.bounds.size) + if self.actionButtons.micButton.cancelTranslation > cancelTransformThreshold { + let progress = 1 - (self.actionButtons.micButton.cancelTranslation - cancelTransformThreshold) / 80 + audioRecordingCancelIndicator.alpha = progress + } else { + audioRecordingCancelIndicator.alpha = 1 + } + + if animateCancelSlideIn { + let position = audioRecordingCancelIndicator.layer.position + audioRecordingCancelIndicator.layer.animatePosition(from: CGPoint(x: width + audioRecordingCancelIndicator.bounds.size.width, y: position.y), to: position, duration: 0.4, timingFunction: kCAMediaTimingFunctionSpring) + } + + audioRecordingCancelIndicator.updateIsDisplayingCancel(isLocked, animated: !animateCancelSlideIn) + + if isLocked || self.actionButtons.micButton.cancelTranslation > cancelTransformThreshold { + var deltaOffset: CGFloat = 0.0 + if audioRecordingCancelIndicator.layer.animation(forKey: "slide_juggle") != nil, let presentationLayer = audioRecordingCancelIndicator.layer.presentation() { + let translation = CGPoint(x: presentationLayer.transform.m41, y: presentationLayer.transform.m42) + deltaOffset = translation.x + } + audioRecordingCancelIndicator.layer.removeAnimation(forKey: "slide_juggle") + if !deltaOffset.isZero { + audioRecordingCancelIndicator.layer.animatePosition(from: CGPoint(x: deltaOffset, y: 0.0), to: CGPoint(), duration: 0.3, additive: true) + } + } else if audioRecordingCancelIndicator.layer.animation(forKey: "slide_juggle") == nil { + let slideJuggleAnimation = CABasicAnimation(keyPath: "transform") + slideJuggleAnimation.toValue = CATransform3DMakeTranslation(-6, 0, 0) + slideJuggleAnimation.duration = 1 + slideJuggleAnimation.timingFunction = CAMediaTimingFunction(name: CAMediaTimingFunctionName.easeInEaseOut) + slideJuggleAnimation.autoreverses = true + slideJuggleAnimation.repeatCount = Float.infinity + audioRecordingCancelIndicator.layer.add(slideJuggleAnimation, forKey: "slide_juggle") + } + + let audioRecordingTimeSize = audioRecordingTimeNode.measure(CGSize(width: 200.0, height: 100.0)) + + let cancelMinX = audioRecordingCancelIndicator.alpha > 0.5 ? audioRecordingCancelIndicator.frame.minX : width + + audioRecordingInfoContainerNode.frame = CGRect( + origin: CGPoint( + x: min(leftInset, cancelMinX - audioRecordingTimeSize.width - 8.0 - 28.0), + y: 0.0 + ), + size: CGSize(width: baseWidth, height: panelHeight) + ) + + audioRecordingTimeNode.frame = CGRect(origin: CGPoint(x: 40.0, y: panelHeight - minimalHeight + floor((minimalHeight - audioRecordingTimeSize.height) / 2.0)), size: audioRecordingTimeSize) + if animateTimeSlideIn { + let position = audioRecordingTimeNode.layer.position + audioRecordingTimeNode.layer.animatePosition(from: CGPoint(x: position.x - 10.0, y: position.y), to: position, duration: 0.5, timingFunction: kCAMediaTimingFunctionSpring) + audioRecordingTimeNode.layer.animateAlpha(from: 0, to: 1, duration: 0.5, timingFunction: kCAMediaTimingFunctionSpring) + } + + var animateDotAppearing = false + let audioRecordingDotNode: AnimationNode + if let currentAudioRecordingDotNode = self.audioRecordingDotNode, !currentAudioRecordingDotNode.played { + audioRecordingDotNode = currentAudioRecordingDotNode + } else { + self.audioRecordingDotNode?.removeFromSupernode() + audioRecordingDotNode = AnimationNode(animation: "voicebin") + self.audioRecordingDotNode = audioRecordingDotNode + self.addSubnode(audioRecordingDotNode) + } + + animateDotAppearing = transition.isAnimated && !isLocked && !hideInfo + + audioRecordingDotNode.frame = CGRect(origin: CGPoint(x: leftInset + 2.0 - UIScreenPixel, y: panelHeight - 44 + 1), size: CGSize(width: 40.0, height: 40)) + if animateDotAppearing { + audioRecordingDotNode.layer.animateScale(from: 0.3, to: 1, duration: 0.15, delay: 0, removeOnCompletion: false) + audioRecordingDotNode.layer.animateAlpha(from: 0, to: 1, duration: 0.15, delay: 0, completion: { [weak audioRecordingDotNode] finished in + if finished { + let animation = CAKeyframeAnimation(keyPath: "opacity") + animation.values = [1.0 as NSNumber, 1.0 as NSNumber, 0.0 as NSNumber] + animation.keyTimes = [0.0 as NSNumber, 0.4546 as NSNumber, 0.9091 as NSNumber, 1 as NSNumber] + animation.duration = 0.5 + animation.autoreverses = true + animation.repeatCount = Float.infinity - audioRecordingCancelIndicator = ChatTextInputAudioRecordingCancelIndicator(theme: interfaceState.theme, strings: interfaceState.strings, cancel: { [weak self] in - self?.interfaceInteraction?.finishMediaRecording(.dismiss) - }) - self.audioRecordingCancelIndicator = audioRecordingCancelIndicator - self.insertSubnode(audioRecordingCancelIndicator, at: 0) - } - - let cancelTransformThreshold: CGFloat = 8.0 - - let indicatorTranslation = max(0.0, self.actionButtons.micButton.cancelTranslation - cancelTransformThreshold) - - audioRecordingCancelIndicator.frame = CGRect( - origin: CGPoint( - x: leftInset + floor((baseWidth - audioRecordingCancelIndicator.bounds.size.width - indicatorTranslation) / 2.0), - y: panelHeight - minimalHeight + floor((minimalHeight - audioRecordingCancelIndicator.bounds.size.height) / 2.0)), - size: audioRecordingCancelIndicator.bounds.size) - if self.actionButtons.micButton.cancelTranslation > cancelTransformThreshold { - let progress = 1 - (self.actionButtons.micButton.cancelTranslation - cancelTransformThreshold) / 80 - audioRecordingCancelIndicator.alpha = progress - } else { - audioRecordingCancelIndicator.alpha = 1 - } - - if animateCancelSlideIn { - let position = audioRecordingCancelIndicator.layer.position - audioRecordingCancelIndicator.layer.animatePosition(from: CGPoint(x: width + audioRecordingCancelIndicator.bounds.size.width, y: position.y), to: position, duration: 0.4, timingFunction: kCAMediaTimingFunctionSpring) - } - - audioRecordingCancelIndicator.updateIsDisplayingCancel(isLocked, animated: !animateCancelSlideIn) - - if isLocked || self.actionButtons.micButton.cancelTranslation > cancelTransformThreshold { - var deltaOffset: CGFloat = 0.0 - if audioRecordingCancelIndicator.layer.animation(forKey: "slide_juggle") != nil, let presentationLayer = audioRecordingCancelIndicator.layer.presentation() { - let translation = CGPoint(x: presentationLayer.transform.m41, y: presentationLayer.transform.m42) - deltaOffset = translation.x - } - audioRecordingCancelIndicator.layer.removeAnimation(forKey: "slide_juggle") - if !deltaOffset.isZero { - audioRecordingCancelIndicator.layer.animatePosition(from: CGPoint(x: deltaOffset, y: 0.0), to: CGPoint(), duration: 0.3, additive: true) - } - } else if audioRecordingCancelIndicator.layer.animation(forKey: "slide_juggle") == nil { - let slideJuggleAnimation = CABasicAnimation(keyPath: "transform") - slideJuggleAnimation.toValue = CATransform3DMakeTranslation(-6, 0, 0) - slideJuggleAnimation.duration = 1 - slideJuggleAnimation.timingFunction = CAMediaTimingFunction(name: CAMediaTimingFunctionName.easeInEaseOut) - slideJuggleAnimation.autoreverses = true - slideJuggleAnimation.repeatCount = Float.infinity - audioRecordingCancelIndicator.layer.add(slideJuggleAnimation, forKey: "slide_juggle") - } - - var animateTimeSlideIn = false - let audioRecordingTimeNode: ChatTextInputAudioRecordingTimeNode - if let currentAudioRecordingTimeNode = self.audioRecordingTimeNode { - audioRecordingTimeNode = currentAudioRecordingTimeNode - } else { - audioRecordingTimeNode = ChatTextInputAudioRecordingTimeNode(theme: interfaceState.theme) - self.audioRecordingTimeNode = audioRecordingTimeNode - audioRecordingInfoContainerNode.addSubnode(audioRecordingTimeNode) - - if transition.isAnimated { - animateTimeSlideIn = true - } - } - - let audioRecordingTimeSize = audioRecordingTimeNode.measure(CGSize(width: 200.0, height: 100.0)) - - let cancelMinX = audioRecordingCancelIndicator.alpha > 0.5 ? audioRecordingCancelIndicator.frame.minX : width - - audioRecordingInfoContainerNode.frame = CGRect( - origin: CGPoint( - x: min(leftInset, cancelMinX - audioRecordingTimeSize.width - 8.0 - 28.0), - y: 0.0 - ), - size: CGSize(width: baseWidth, height: panelHeight) - ) - - audioRecordingTimeNode.frame = CGRect(origin: CGPoint(x: 40.0, y: panelHeight - minimalHeight + floor((minimalHeight - audioRecordingTimeSize.height) / 2.0)), size: audioRecordingTimeSize) - if animateTimeSlideIn { - let position = audioRecordingTimeNode.layer.position - audioRecordingTimeNode.layer.animatePosition(from: CGPoint(x: position.x - 10.0, y: position.y), to: position, duration: 0.5, timingFunction: kCAMediaTimingFunctionSpring) - audioRecordingTimeNode.layer.animateAlpha(from: 0, to: 1, duration: 0.5, timingFunction: kCAMediaTimingFunctionSpring) - } - - audioRecordingTimeNode.audioRecorder = recorder - - var animateDotAppearing = false - let audioRecordingDotNode: AnimationNode - if let currentAudioRecordingDotNode = self.audioRecordingDotNode, !currentAudioRecordingDotNode.played { - audioRecordingDotNode = currentAudioRecordingDotNode - } else { - self.audioRecordingDotNode?.removeFromSupernode() - audioRecordingDotNode = AnimationNode(animation: "voicebin") - self.audioRecordingDotNode = audioRecordingDotNode - self.addSubnode(audioRecordingDotNode) - } - - animateDotAppearing = transition.isAnimated && !isLocked - - audioRecordingDotNode.frame = CGRect(origin: CGPoint(x: leftInset + 2.0 - UIScreenPixel, y: panelHeight - 44 + 1), size: CGSize(width: 40.0, height: 40)) - if animateDotAppearing { - audioRecordingDotNode.layer.animateScale(from: 0.3, to: 1, duration: 0.15, delay: 0, removeOnCompletion: false) - audioRecordingDotNode.layer.animateAlpha(from: 0, to: 1, duration: 0.15, delay: 0, completion: { [weak audioRecordingDotNode] finished in - if finished { - let animation = CAKeyframeAnimation(keyPath: "opacity") - animation.values = [1.0 as NSNumber, 1.0 as NSNumber, 0.0 as NSNumber] - animation.keyTimes = [0.0 as NSNumber, 0.4546 as NSNumber, 0.9091 as NSNumber, 1 as NSNumber] - animation.duration = 0.5 - animation.autoreverses = true - animation.repeatCount = Float.infinity - - audioRecordingDotNode?.layer.add(animation, forKey: "recording") - } - }) - - self.attachmentButton.layer.animateAlpha(from: 1, to: 0, duration: 0.15, delay: 0, removeOnCompletion: false) - self.attachmentButton.layer.animateScale(from: 1, to: 0.3, duration: 0.15, delay: 0, removeOnCompletion: false) - } - case let .video(status, _): - switch status { - case let .recording(recordingStatus): - self.actionButtons.micButton.videoRecordingStatus = recordingStatus - case .editing: - self.actionButtons.micButton.videoRecordingStatus = nil - hideMicButton = true + audioRecordingDotNode?.layer.add(animation, forKey: "recording") } + }) + + self.attachmentButton.layer.animateAlpha(from: 1, to: 0, duration: 0.15, delay: 0, removeOnCompletion: false) + self.attachmentButton.layer.animateScale(from: 1, to: 0.3, duration: 0.15, delay: 0, removeOnCompletion: false) + } + + if hideInfo { + audioRecordingDotNode.layer.animateAlpha(from: audioRecordingDotNode.alpha, to: 0, duration: 0.15, delay: 0, removeOnCompletion: false) + audioRecordingTimeNode.layer.animateAlpha(from: audioRecordingTimeNode.alpha, to: 0, duration: 0.15, delay: 0, removeOnCompletion: false) + audioRecordingCancelIndicator.layer.animateAlpha(from: audioRecordingCancelIndicator.alpha, to: 0, duration: 0.15, delay: 0, removeOnCompletion: false) } } else { self.actionButtons.micButton.audioRecorder = nil diff --git a/submodules/TelegramUI/Sources/LegacyInstantVideoController.swift b/submodules/TelegramUI/Sources/LegacyInstantVideoController.swift index 3f58a8fa4c..00a8e2e167 100644 --- a/submodules/TelegramUI/Sources/LegacyInstantVideoController.swift +++ b/submodules/TelegramUI/Sources/LegacyInstantVideoController.swift @@ -17,9 +17,11 @@ import AppBundle final class InstantVideoControllerRecordingStatus { let micLevel: Signal + let duration: Signal - init(micLevel: Signal) { + init(micLevel: Signal, duration: Signal) { self.micLevel = micLevel + self.duration = duration } } @@ -30,12 +32,13 @@ final class InstantVideoController: LegacyController, StandalonePresentableContr var onStop: (() -> Void)? private let micLevelValue = ValuePromise(0.0) + private let durationValue = ValuePromise(0.0) let audioStatus: InstantVideoControllerRecordingStatus private var dismissedVideo = false override init(presentation: LegacyControllerPresentation, theme: PresentationTheme?, strings: PresentationStrings? = nil, initialLayout: ContainerViewLayout? = nil) { - self.audioStatus = InstantVideoControllerRecordingStatus(micLevel: self.micLevelValue.get()) + self.audioStatus = InstantVideoControllerRecordingStatus(micLevel: self.micLevelValue.get(), duration: self.durationValue.get()) super.init(presentation: presentation, theme: theme, initialLayout: initialLayout) @@ -52,6 +55,9 @@ final class InstantVideoController: LegacyController, StandalonePresentableContr captureController.micLevel = { [weak self] (level: CGFloat) -> Void in self?.micLevelValue.set(Float(level)) } + captureController.onDuration = { [weak self] duration in + self?.durationValue.set(duration) + } captureController.onDismiss = { [weak self] _ in self?.onDismiss?() }