mirror of
https://github.com/Swiftgram/Telegram-iOS.git
synced 2025-06-15 13:35:19 +00:00
Video message recording improvements
This commit is contained in:
parent
d44b531a58
commit
a3e0c910f7
@ -206,7 +206,7 @@ public final class AnimationNode: ASDisplayNode {
|
||||
}
|
||||
|
||||
public func preferredSize() -> CGSize? {
|
||||
if let animationView = animationView(), let animation = animationView.animation {
|
||||
if let animationView = self.animationView(), let animation = animationView.animation {
|
||||
return CGSize(width: animation.size.width * self.scale, height: animation.size.height * self.scale)
|
||||
} else {
|
||||
return nil
|
||||
|
@ -604,6 +604,10 @@ private final class CameraContext {
|
||||
return self.audioLevelPipe.signal()
|
||||
}
|
||||
|
||||
var transitionImage: Signal<UIImage?, NoError> {
|
||||
return .single(self.mainDeviceContext?.output.transitionImage)
|
||||
}
|
||||
|
||||
@objc private func sessionInterruptionEnded(notification: NSNotification) {
|
||||
}
|
||||
|
||||
@ -969,6 +973,20 @@ public final class Camera {
|
||||
}
|
||||
}
|
||||
|
||||
public var transitionImage: Signal<UIImage?, NoError> {
|
||||
return Signal { subscriber in
|
||||
let disposable = MetaDisposable()
|
||||
self.queue.async {
|
||||
if let context = self.contextRef?.takeUnretainedValue() {
|
||||
disposable.set(context.transitionImage.start(next: { codes in
|
||||
subscriber.putNext(codes)
|
||||
}))
|
||||
}
|
||||
}
|
||||
return disposable
|
||||
}
|
||||
}
|
||||
|
||||
public enum ModeChange: Equatable {
|
||||
case none
|
||||
case position
|
||||
|
@ -399,6 +399,10 @@ final class CameraOutput: NSObject {
|
||||
}
|
||||
}
|
||||
|
||||
var transitionImage: UIImage? {
|
||||
return self.videoRecorder?.transitionImage
|
||||
}
|
||||
|
||||
private weak var masterOutput: CameraOutput?
|
||||
func processVideoRecording(_ sampleBuffer: CMSampleBuffer, fromAdditionalOutput: Bool) {
|
||||
if let videoRecorder = self.videoRecorder, videoRecorder.isRecording {
|
||||
|
@ -99,6 +99,8 @@ class CameraRoundVideoFilter {
|
||||
|
||||
private(set) var isPrepared = false
|
||||
|
||||
let semaphore = DispatchSemaphore(value: 1)
|
||||
|
||||
init(ciContext: CIContext) {
|
||||
self.ciContext = ciContext
|
||||
}
|
||||
@ -141,6 +143,8 @@ class CameraRoundVideoFilter {
|
||||
}
|
||||
|
||||
func render(pixelBuffer: CVPixelBuffer, mirror: Bool) -> CVPixelBuffer? {
|
||||
self.semaphore.wait()
|
||||
|
||||
guard let resizeFilter = self.resizeFilter, let compositeFilter = self.compositeFilter, self.isPrepared else {
|
||||
return nil
|
||||
}
|
||||
@ -176,6 +180,9 @@ class CameraRoundVideoFilter {
|
||||
}
|
||||
|
||||
self.ciContext.render(finalImage, to: outputPixelBuffer, bounds: CGRect(origin: .zero, size: CGSize(width: 400, height: 400)), colorSpace: outputColorSpace)
|
||||
|
||||
self.semaphore.signal()
|
||||
|
||||
return outputPixelBuffer
|
||||
}
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ private final class VideoRecorderImpl {
|
||||
private var audioInput: AVAssetWriterInput?
|
||||
|
||||
private let ciContext: CIContext
|
||||
private var transitionImage: UIImage?
|
||||
fileprivate var transitionImage: UIImage?
|
||||
private var savedTransitionImage = false
|
||||
|
||||
private var pendingAudioSampleBuffers: [CMSampleBuffer] = []
|
||||
@ -533,4 +533,8 @@ public final class VideoRecorder {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var transitionImage: UIImage? {
|
||||
return self.impl.transitionImage
|
||||
}
|
||||
}
|
||||
|
@ -1736,7 +1736,7 @@ public final class MediaEditor {
|
||||
}
|
||||
}
|
||||
|
||||
public func videoFrames(asset: AVAsset, count: Int, mirror: Bool = false) -> Signal<([UIImage], Double), NoError> {
|
||||
public func videoFrames(asset: AVAsset?, count: Int, initialPlaceholder: UIImage? = nil, initialTimestamp: Double? = nil, mirror: Bool = false) -> Signal<([UIImage], Double), NoError> {
|
||||
func blurredImage(_ image: UIImage) -> UIImage? {
|
||||
guard let image = image.cgImage else {
|
||||
return nil
|
||||
@ -1769,55 +1769,82 @@ public func videoFrames(asset: AVAsset, count: Int, mirror: Bool = false) -> Sig
|
||||
guard count > 0 else {
|
||||
return .complete()
|
||||
}
|
||||
let scale = UIScreen.main.scale
|
||||
let imageGenerator = AVAssetImageGenerator(asset: asset)
|
||||
imageGenerator.maximumSize = CGSize(width: 48.0 * scale, height: 36.0 * scale)
|
||||
imageGenerator.appliesPreferredTrackTransform = true
|
||||
imageGenerator.requestedTimeToleranceBefore = .zero
|
||||
imageGenerator.requestedTimeToleranceAfter = .zero
|
||||
|
||||
|
||||
|
||||
var firstFrame: UIImage
|
||||
if let cgImage = try? imageGenerator.copyCGImage(at: .zero, actualTime: nil) {
|
||||
firstFrame = UIImage(cgImage: cgImage)
|
||||
if let blurred = blurredImage(firstFrame) {
|
||||
|
||||
var imageGenerator: AVAssetImageGenerator?
|
||||
if let asset {
|
||||
let scale = UIScreen.main.scale
|
||||
|
||||
imageGenerator = AVAssetImageGenerator(asset: asset)
|
||||
imageGenerator?.maximumSize = CGSize(width: 48.0 * scale, height: 36.0 * scale)
|
||||
imageGenerator?.appliesPreferredTrackTransform = true
|
||||
imageGenerator?.requestedTimeToleranceBefore = .zero
|
||||
imageGenerator?.requestedTimeToleranceAfter = .zero
|
||||
}
|
||||
|
||||
if var initialPlaceholder {
|
||||
initialPlaceholder = generateScaledImage(image: initialPlaceholder, size: initialPlaceholder.size.aspectFitted(CGSize(width: 144.0, height: 144.0)), scale: 1.0)!
|
||||
if let blurred = blurredImage(initialPlaceholder) {
|
||||
firstFrame = blurred
|
||||
} else {
|
||||
firstFrame = initialPlaceholder
|
||||
}
|
||||
} else if let imageGenerator {
|
||||
if let cgImage = try? imageGenerator.copyCGImage(at: .zero, actualTime: nil) {
|
||||
firstFrame = UIImage(cgImage: cgImage)
|
||||
if let blurred = blurredImage(firstFrame) {
|
||||
firstFrame = blurred
|
||||
}
|
||||
} else {
|
||||
firstFrame = generateSingleColorImage(size: CGSize(width: 24.0, height: 36.0), color: .black)!
|
||||
}
|
||||
} else {
|
||||
firstFrame = generateSingleColorImage(size: CGSize(width: 24.0, height: 36.0), color: .black)!
|
||||
}
|
||||
return Signal { subscriber in
|
||||
subscriber.putNext((Array(repeating: firstFrame, count: count), CACurrentMediaTime()))
|
||||
|
||||
var timestamps: [NSValue] = []
|
||||
let duration = asset.duration.seconds
|
||||
let interval = duration / Double(count)
|
||||
for i in 0 ..< count {
|
||||
timestamps.append(NSValue(time: CMTime(seconds: Double(i) * interval, preferredTimescale: CMTimeScale(1000))))
|
||||
}
|
||||
|
||||
var updatedFrames: [UIImage] = []
|
||||
imageGenerator.generateCGImagesAsynchronously(forTimes: timestamps) { _, image, _, _, _ in
|
||||
if let image {
|
||||
updatedFrames.append(UIImage(cgImage: image, scale: 1.0, orientation: mirror ? .upMirrored : .up))
|
||||
if updatedFrames.count == count {
|
||||
subscriber.putNext((updatedFrames, CACurrentMediaTime()))
|
||||
subscriber.putCompletion()
|
||||
} else {
|
||||
var tempFrames = updatedFrames
|
||||
for _ in 0 ..< count - updatedFrames.count {
|
||||
tempFrames.append(firstFrame)
|
||||
|
||||
if let asset {
|
||||
return Signal { subscriber in
|
||||
subscriber.putNext((Array(repeating: firstFrame, count: count), initialTimestamp ?? CACurrentMediaTime()))
|
||||
|
||||
var timestamps: [NSValue] = []
|
||||
let duration = asset.duration.seconds
|
||||
let interval = duration / Double(count)
|
||||
for i in 0 ..< count {
|
||||
timestamps.append(NSValue(time: CMTime(seconds: Double(i) * interval, preferredTimescale: CMTimeScale(1000))))
|
||||
}
|
||||
|
||||
var updatedFrames: [UIImage] = []
|
||||
imageGenerator?.generateCGImagesAsynchronously(forTimes: timestamps) { _, image, _, _, _ in
|
||||
if let image {
|
||||
updatedFrames.append(UIImage(cgImage: image, scale: 1.0, orientation: mirror ? .upMirrored : .up))
|
||||
if updatedFrames.count == count {
|
||||
subscriber.putNext((updatedFrames, CACurrentMediaTime()))
|
||||
subscriber.putCompletion()
|
||||
} else {
|
||||
var tempFrames = updatedFrames
|
||||
for _ in 0 ..< count - updatedFrames.count {
|
||||
tempFrames.append(firstFrame)
|
||||
}
|
||||
subscriber.putNext((tempFrames, CACurrentMediaTime()))
|
||||
}
|
||||
} else {
|
||||
if let previous = updatedFrames.last {
|
||||
updatedFrames.append(previous)
|
||||
}
|
||||
subscriber.putNext((tempFrames, CACurrentMediaTime()))
|
||||
}
|
||||
} else {
|
||||
if let previous = updatedFrames.last {
|
||||
updatedFrames.append(previous)
|
||||
}
|
||||
}
|
||||
|
||||
return ActionDisposable {
|
||||
imageGenerator?.cancelAllCGImageGeneration()
|
||||
}
|
||||
}
|
||||
|
||||
return ActionDisposable {
|
||||
imageGenerator.cancelAllCGImageGeneration()
|
||||
} else {
|
||||
var frames: [UIImage] = []
|
||||
for _ in 0 ..< count {
|
||||
frames.append(firstFrame)
|
||||
}
|
||||
return .single((frames, CACurrentMediaTime()))
|
||||
}
|
||||
}
|
||||
|
@ -1361,6 +1361,7 @@ final class MediaEditorScreenComponent: Component {
|
||||
component: AnyComponent(MediaScrubberComponent(
|
||||
context: component.context,
|
||||
style: .editor,
|
||||
theme: environment.theme,
|
||||
generationTimestamp: playerState.generationTimestamp,
|
||||
position: playerState.position,
|
||||
minDuration: minDuration,
|
||||
|
@ -74,6 +74,7 @@ public final class MediaScrubberComponent: Component {
|
||||
|
||||
let context: AccountContext
|
||||
let style: Style
|
||||
let theme: PresentationTheme
|
||||
|
||||
let generationTimestamp: Double
|
||||
|
||||
@ -92,6 +93,7 @@ public final class MediaScrubberComponent: Component {
|
||||
public init(
|
||||
context: AccountContext,
|
||||
style: Style,
|
||||
theme: PresentationTheme,
|
||||
generationTimestamp: Double,
|
||||
position: Double,
|
||||
minDuration: Double,
|
||||
@ -105,6 +107,7 @@ public final class MediaScrubberComponent: Component {
|
||||
) {
|
||||
self.context = context
|
||||
self.style = style
|
||||
self.theme = theme
|
||||
self.generationTimestamp = generationTimestamp
|
||||
self.position = position
|
||||
self.minDuration = minDuration
|
||||
@ -121,6 +124,9 @@ public final class MediaScrubberComponent: Component {
|
||||
if lhs.context !== rhs.context {
|
||||
return false
|
||||
}
|
||||
if lhs.theme !== rhs.theme {
|
||||
return false
|
||||
}
|
||||
if lhs.generationTimestamp != rhs.generationTimestamp {
|
||||
return false
|
||||
}
|
||||
@ -524,6 +530,7 @@ public final class MediaScrubberComponent: Component {
|
||||
self.trimView.isHollow = self.selectedTrackId != lowestVideoId || self.isAudioOnly
|
||||
let (leftHandleFrame, rightHandleFrame) = self.trimView.update(
|
||||
style: component.style,
|
||||
theme: component.theme,
|
||||
visualInsets: trimViewVisualInsets,
|
||||
scrubberSize: CGSize(width: trackViewWidth, height: fullTrackHeight),
|
||||
duration: mainTrimDuration,
|
||||
@ -537,6 +544,7 @@ public final class MediaScrubberComponent: Component {
|
||||
|
||||
let (ghostLeftHandleFrame, ghostRightHandleFrame) = self.ghostTrimView.update(
|
||||
style: component.style,
|
||||
theme: component.theme,
|
||||
visualInsets: .zero,
|
||||
scrubberSize: CGSize(width: scrubberSize.width, height: collapsedTrackHeight),
|
||||
duration: self.duration,
|
||||
@ -1300,6 +1308,7 @@ private class TrimView: UIView {
|
||||
|
||||
func update(
|
||||
style: MediaScrubberComponent.Style,
|
||||
theme: PresentationTheme,
|
||||
visualInsets: UIEdgeInsets,
|
||||
scrubberSize: CGSize,
|
||||
duration: Double,
|
||||
@ -1359,8 +1368,8 @@ private class TrimView: UIView {
|
||||
effectiveHandleWidth = 16.0
|
||||
fullTrackHeight = 33.0
|
||||
capsuleOffset = 8.0
|
||||
color = UIColor(rgb: 0x3478f6)
|
||||
highlightColor = UIColor(rgb: 0x3478f6)
|
||||
color = theme.chat.inputPanel.panelControlAccentColor
|
||||
highlightColor = theme.chat.inputPanel.panelControlAccentColor
|
||||
|
||||
if isFirstTime {
|
||||
let handleImage = generateImage(CGSize(width: effectiveHandleWidth, height: fullTrackHeight), rotatedContext: { size, context in
|
||||
|
@ -173,7 +173,10 @@ private final class CameraScreenComponent: CombinedComponent {
|
||||
super.init()
|
||||
|
||||
self.startRecording.connect({ [weak self] _ in
|
||||
self?.startVideoRecording(pressing: true)
|
||||
if let self, let controller = getController() {
|
||||
self.startVideoRecording(pressing: !controller.scheduledLock)
|
||||
controller.scheduledLock = false
|
||||
}
|
||||
})
|
||||
self.stopRecording.connect({ [weak self] _ in
|
||||
self?.stopVideoRecording()
|
||||
@ -508,6 +511,8 @@ public class VideoMessageCameraScreen: ViewController {
|
||||
}
|
||||
var previewStatePromise = Promise<PreviewState?>()
|
||||
|
||||
var transitioningToPreview = false
|
||||
|
||||
init(controller: VideoMessageCameraScreen) {
|
||||
self.controller = controller
|
||||
self.context = controller.context
|
||||
@ -736,16 +741,12 @@ public class VideoMessageCameraScreen: ViewController {
|
||||
self.results.append(result)
|
||||
self.resultsPipe.putNext(result)
|
||||
|
||||
self.transitioningToPreview = false
|
||||
|
||||
let composition = composition(with: self.results)
|
||||
controller.updatePreviewState({ _ in
|
||||
return PreviewState(composition: composition, trimRange: nil)
|
||||
}, transition: .spring(duration: 0.4))
|
||||
|
||||
// #if DEBUG
|
||||
// if case let .video(video) = result {
|
||||
// self.debugSaveResult(path: video.videoPath)
|
||||
// }
|
||||
// #endif
|
||||
}
|
||||
|
||||
private func debugSaveResult(path: String) {
|
||||
@ -895,7 +896,7 @@ public class VideoMessageCameraScreen: ViewController {
|
||||
CameraScreenComponent(
|
||||
context: self.context,
|
||||
cameraState: self.cameraState,
|
||||
isPreviewing: self.previewState != nil,
|
||||
isPreviewing: self.previewState != nil || self.transitioningToPreview,
|
||||
getController: { [weak self] in
|
||||
return self?.controller
|
||||
},
|
||||
@ -1065,30 +1066,63 @@ public class VideoMessageCameraScreen: ViewController {
|
||||
|
||||
public func takenRecordedData() -> Signal<RecordedVideoData?, NoError> {
|
||||
let previewState = self.node.previewStatePromise.get()
|
||||
return self.currentResults
|
||||
|> take(1)
|
||||
|> mapToSignal { results in
|
||||
var totalDuration: Double = 0.0
|
||||
for result in results {
|
||||
if case let .video(video) = result {
|
||||
totalDuration += video.duration
|
||||
}
|
||||
let count = 12
|
||||
|
||||
let initialPlaceholder: Signal<UIImage?, NoError>
|
||||
if let firstResult = self.node.results.first {
|
||||
if case let .video(video) = firstResult {
|
||||
initialPlaceholder = .single(video.thumbnail)
|
||||
} else {
|
||||
initialPlaceholder = .single(nil)
|
||||
}
|
||||
let composition = composition(with: results)
|
||||
return combineLatest(
|
||||
queue: Queue.mainQueue(),
|
||||
videoFrames(asset: composition, count: 12),
|
||||
previewState
|
||||
)
|
||||
|> map { framesAndUpdateTimestamp, previewState in
|
||||
} else {
|
||||
initialPlaceholder = self.camera?.transitionImage ?? .single(nil)
|
||||
}
|
||||
|
||||
let immediateResult: Signal<RecordedVideoData?, NoError> = initialPlaceholder
|
||||
|> take(1)
|
||||
|> mapToSignal { initialPlaceholder in
|
||||
return videoFrames(asset: nil, count: count, initialPlaceholder: initialPlaceholder)
|
||||
|> map { framesAndUpdateTimestamp in
|
||||
return RecordedVideoData(
|
||||
duration: totalDuration,
|
||||
duration: 1.0,
|
||||
frames: framesAndUpdateTimestamp.0,
|
||||
framesUpdateTimestamp: framesAndUpdateTimestamp.1,
|
||||
trimRange: previewState?.trimRange
|
||||
trimRange: nil
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
return immediateResult
|
||||
|> mapToSignal { immediateResult in
|
||||
return .single(immediateResult)
|
||||
|> then(
|
||||
self.currentResults
|
||||
|> take(1)
|
||||
|> mapToSignal { results in
|
||||
var totalDuration: Double = 0.0
|
||||
for result in results {
|
||||
if case let .video(video) = result {
|
||||
totalDuration += video.duration
|
||||
}
|
||||
}
|
||||
let composition = composition(with: results)
|
||||
return combineLatest(
|
||||
queue: Queue.mainQueue(),
|
||||
videoFrames(asset: composition, count: count, initialTimestamp: immediateResult?.framesUpdateTimestamp),
|
||||
previewState
|
||||
)
|
||||
|> map { framesAndUpdateTimestamp, previewState in
|
||||
return RecordedVideoData(
|
||||
duration: totalDuration,
|
||||
frames: framesAndUpdateTimestamp.0,
|
||||
framesUpdateTimestamp: framesAndUpdateTimestamp.1,
|
||||
trimRange: previewState?.trimRange
|
||||
)
|
||||
}
|
||||
}
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
public init(
|
||||
@ -1219,13 +1253,21 @@ public class VideoMessageCameraScreen: ViewController {
|
||||
private var waitingForNextResult = false
|
||||
public func stopVideoRecording() -> Bool {
|
||||
self.waitingForNextResult = true
|
||||
self.node.transitioningToPreview = true
|
||||
self.node.requestUpdateLayout(transition: .spring(duration: 0.4))
|
||||
|
||||
self.node.stopRecording.invoke(Void())
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
|
||||
fileprivate var scheduledLock = false
|
||||
public func lockVideoRecording() {
|
||||
self.updateCameraState({ $0.updatedRecording(.handsFree) }, transition: .spring(duration: 0.4))
|
||||
if case .none = self.cameraState.recording {
|
||||
self.scheduledLock = true
|
||||
} else {
|
||||
self.updateCameraState({ $0.updatedRecording(.handsFree) }, transition: .spring(duration: 0.4))
|
||||
}
|
||||
|
||||
self.node.maybePresentViewOnceTooltip()
|
||||
}
|
||||
|
@ -304,6 +304,7 @@ final class ChatRecordingPreviewInputPanelNode: ChatInputPanelNode {
|
||||
MediaScrubberComponent(
|
||||
context: context,
|
||||
style: .videoMessage,
|
||||
theme: interfaceState.theme,
|
||||
generationTimestamp: 0,
|
||||
position: 0,
|
||||
minDuration: 1.0,
|
||||
|
@ -2094,7 +2094,6 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate, Ch
|
||||
size: audioRecordingCancelIndicator.bounds.size)
|
||||
audioRecordingCancelIndicator.frame = audioRecordingCancelIndicatorFrame
|
||||
if self.actionButtons.micButton.cancelTranslation > cancelTransformThreshold {
|
||||
//let progress = 1 - (self.actionButtons.micButton.cancelTranslation - cancelTransformThreshold) / 80
|
||||
let progress: CGFloat = max(0.0, min(1.0, (audioRecordingCancelIndicatorFrame.minX - 100.0) / 10.0))
|
||||
audioRecordingCancelIndicator.alpha = progress
|
||||
} else {
|
||||
@ -2145,6 +2144,8 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate, Ch
|
||||
audioRecordingTimeNode.layer.animateAlpha(from: 0, to: 1, duration: 0.5, timingFunction: kCAMediaTimingFunctionSpring)
|
||||
}
|
||||
|
||||
let dotFrame = CGRect(origin: CGPoint(x: leftInset + 2.0 - UIScreenPixel, y: audioRecordingTimeNode.frame.midY - 20), size: CGSize(width: 40.0, height: 40))
|
||||
|
||||
var animateDotAppearing = false
|
||||
let audioRecordingDotNode: AnimationNode
|
||||
if let currentAudioRecordingDotNode = self.audioRecordingDotNode, !currentAudioRecordingDotNode.didPlay {
|
||||
@ -2152,9 +2153,12 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate, Ch
|
||||
} else {
|
||||
self.audioRecordingDotNode?.removeFromSupernode()
|
||||
audioRecordingDotNode = AnimationNode(animation: "BinRed")
|
||||
|
||||
self.audioRecordingDotNode = audioRecordingDotNode
|
||||
self.audioRecordingDotNodeDismissed = false
|
||||
self.clippingNode.insertSubnode(audioRecordingDotNode, belowSubnode: self.menuButton)
|
||||
audioRecordingDotNode.frame = dotFrame
|
||||
|
||||
self.animatingBinNode?.removeFromSupernode()
|
||||
self.animatingBinNode = nil
|
||||
}
|
||||
@ -2163,10 +2167,14 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate, Ch
|
||||
if let mediaRecordingState = mediaRecordingState, case .waitingForPreview = mediaRecordingState {
|
||||
animateDotAppearing = false
|
||||
}
|
||||
|
||||
audioRecordingDotNode.bounds = CGRect(origin: .zero, size: dotFrame.size)
|
||||
audioRecordingDotNode.position = dotFrame.center
|
||||
|
||||
audioRecordingDotNode.frame = CGRect(origin: CGPoint(x: leftInset + 2.0 - UIScreenPixel, y: audioRecordingTimeNode.frame.midY - 20), size: CGSize(width: 40.0, height: 40))
|
||||
if animateDotAppearing || animateCancelSlideIn {
|
||||
audioRecordingDotNode.layer.animateScale(from: 0.3, to: 1, duration: 0.15, delay: 0, removeOnCompletion: false)
|
||||
if animateDotAppearing {
|
||||
Queue.mainQueue().justDispatch {
|
||||
audioRecordingDotNode.layer.animateScale(from: 0.3, to: 1, duration: 0.15, delay: 0, removeOnCompletion: false)
|
||||
}
|
||||
audioRecordingTimeNode.started = { [weak audioRecordingDotNode] in
|
||||
if let audioRecordingDotNode = audioRecordingDotNode, audioRecordingDotNode.layer.animation(forKey: "recording") == nil {
|
||||
audioRecordingDotNode.layer.animateAlpha(from: CGFloat(audioRecordingDotNode.layer.presentation()?.opacity ?? 0), to: 1, duration: 0.15, delay: 0, completion: { [weak audioRecordingDotNode] finished in
|
||||
|
Loading…
x
Reference in New Issue
Block a user