mirror of
https://github.com/Swiftgram/Telegram-iOS.git
synced 2025-06-16 05:55:20 +00:00
Camera and editor improvements
This commit is contained in:
parent
359653260c
commit
ee3e2b540a
@ -796,6 +796,9 @@ public struct StoryCameraTransitionInCoordinator {
|
||||
public protocol TelegramRootControllerInterface: NavigationController {
|
||||
@discardableResult
|
||||
func openStoryCamera(transitionIn: StoryCameraTransitionIn?, transitionedIn: @escaping () -> Void, transitionOut: @escaping (Bool) -> StoryCameraTransitionOut?) -> StoryCameraTransitionInCoordinator?
|
||||
|
||||
func getContactsController() -> ViewController?
|
||||
func getChatsController() -> ViewController?
|
||||
}
|
||||
|
||||
public protocol SharedAccountContext: AnyObject {
|
||||
|
@ -163,7 +163,7 @@ private final class CameraContext {
|
||||
self.simplePreviewView = previewView
|
||||
self.secondaryPreviewView = secondaryPreviewView
|
||||
|
||||
self.dualPosition = configuration.position
|
||||
self.positionValue = configuration.position
|
||||
|
||||
self.mainDeviceContext = CameraDeviceContext(session: session, exclusive: true, additional: false)
|
||||
self.configure {
|
||||
@ -250,16 +250,16 @@ private final class CameraContext {
|
||||
return self._positionPromise.get()
|
||||
}
|
||||
|
||||
private var dualPosition: Camera.Position = .back
|
||||
private var positionValue: Camera.Position = .back
|
||||
func togglePosition() {
|
||||
if self.isDualCamEnabled {
|
||||
let targetPosition: Camera.Position
|
||||
if case .back = self.dualPosition {
|
||||
if case .back = self.positionValue {
|
||||
targetPosition = .front
|
||||
} else {
|
||||
targetPosition = .back
|
||||
}
|
||||
self.dualPosition = targetPosition
|
||||
self.positionValue = targetPosition
|
||||
self._positionPromise.set(targetPosition)
|
||||
|
||||
self.mainDeviceContext.output.markPositionChange(position: targetPosition)
|
||||
@ -273,7 +273,7 @@ private final class CameraContext {
|
||||
} else {
|
||||
targetPosition = .back
|
||||
}
|
||||
self.dualPosition = targetPosition
|
||||
self.positionValue = targetPosition
|
||||
self._positionPromise.set(targetPosition)
|
||||
self.modeChange = .position
|
||||
|
||||
@ -291,7 +291,7 @@ private final class CameraContext {
|
||||
self.mainDeviceContext.invalidate()
|
||||
|
||||
self._positionPromise.set(position)
|
||||
self.dualPosition = position
|
||||
self.positionValue = position
|
||||
self.modeChange = .position
|
||||
|
||||
self.mainDeviceContext.configure(position: position, previewView: self.simplePreviewView, audio: self.initialConfiguration.audio, photo: self.initialConfiguration.photo, metadata: self.initialConfiguration.metadata)
|
||||
@ -356,7 +356,7 @@ private final class CameraContext {
|
||||
self.additionalDeviceContext = nil
|
||||
|
||||
self.mainDeviceContext = CameraDeviceContext(session: self.session, exclusive: true, additional: false)
|
||||
self.mainDeviceContext.configure(position: self.dualPosition, previewView: self.simplePreviewView, audio: self.initialConfiguration.audio, photo: self.initialConfiguration.photo, metadata: self.initialConfiguration.metadata)
|
||||
self.mainDeviceContext.configure(position: self.positionValue, previewView: self.simplePreviewView, audio: self.initialConfiguration.audio, photo: self.initialConfiguration.photo, metadata: self.initialConfiguration.metadata)
|
||||
}
|
||||
self.mainDeviceContext.output.processSampleBuffer = { [weak self] sampleBuffer, pixelBuffer, connection in
|
||||
guard let self else {
|
||||
@ -446,7 +446,7 @@ private final class CameraContext {
|
||||
func takePhoto() -> Signal<PhotoCaptureResult, NoError> {
|
||||
let orientation = self.videoOrientation ?? .portrait
|
||||
if let additionalDeviceContext = self.additionalDeviceContext {
|
||||
let dualPosition = self.dualPosition
|
||||
let dualPosition = self.positionValue
|
||||
return combineLatest(
|
||||
self.mainDeviceContext.output.takePhoto(orientation: orientation, flashMode: self._flashMode),
|
||||
additionalDeviceContext.output.takePhoto(orientation: orientation, flashMode: self._flashMode)
|
||||
@ -469,13 +469,13 @@ private final class CameraContext {
|
||||
public func startRecording() -> Signal<Double, NoError> {
|
||||
if let additionalDeviceContext = self.additionalDeviceContext {
|
||||
return combineLatest(
|
||||
self.mainDeviceContext.output.startRecording(),
|
||||
additionalDeviceContext.output.startRecording()
|
||||
self.mainDeviceContext.output.startRecording(isDualCamera: true, position: self.positionValue),
|
||||
additionalDeviceContext.output.startRecording(isDualCamera: true)
|
||||
) |> map { value, _ in
|
||||
return value
|
||||
}
|
||||
} else {
|
||||
return self.mainDeviceContext.output.startRecording()
|
||||
return self.mainDeviceContext.output.startRecording(isDualCamera: false)
|
||||
}
|
||||
}
|
||||
|
||||
@ -486,13 +486,29 @@ private final class CameraContext {
|
||||
additionalDeviceContext.output.stopRecording()
|
||||
) |> mapToSignal { main, additional in
|
||||
if case let .finished(mainResult, _, duration, positionChangeTimestamps, _) = main, case let .finished(additionalResult, _, _, _, _) = additional {
|
||||
return .single(.finished(mainResult, additionalResult, duration, positionChangeTimestamps, CACurrentMediaTime()))
|
||||
var additionalTransitionImage = additionalResult.1
|
||||
if let cgImage = additionalResult.1.cgImage {
|
||||
additionalTransitionImage = UIImage(cgImage: cgImage, scale: 1.0, orientation: .leftMirrored)
|
||||
}
|
||||
return .single(.finished(mainResult, (additionalResult.0, additionalTransitionImage, true), duration, positionChangeTimestamps, CACurrentMediaTime()))
|
||||
} else {
|
||||
return .complete()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
let mirror = self.positionValue == .front
|
||||
return self.mainDeviceContext.output.stopRecording()
|
||||
|> map { result -> VideoCaptureResult in
|
||||
if case let .finished(mainResult, _, duration, positionChangeTimestamps, time) = result {
|
||||
var transitionImage = mainResult.1
|
||||
if mirror, let cgImage = transitionImage.cgImage {
|
||||
transitionImage = UIImage(cgImage: cgImage, scale: 1.0, orientation: .leftMirrored)
|
||||
}
|
||||
return .finished((mainResult.0, transitionImage, mirror), nil, duration, positionChangeTimestamps, time)
|
||||
} else {
|
||||
return result
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -7,7 +7,7 @@ import Vision
|
||||
import VideoToolbox
|
||||
|
||||
public enum VideoCaptureResult: Equatable {
|
||||
case finished((String, UIImage), (String, UIImage)?, Double, [(Bool, Double)], Double)
|
||||
case finished((String, UIImage, Bool), (String, UIImage, Bool)?, Double, [(Bool, Double)], Double)
|
||||
case failed
|
||||
|
||||
public static func == (lhs: VideoCaptureResult, rhs: VideoCaptureResult) -> Bool {
|
||||
@ -88,7 +88,6 @@ final class CameraOutput: NSObject {
|
||||
|
||||
private var photoCaptureRequests: [Int64: PhotoCaptureContext] = [:]
|
||||
private var videoRecorder: VideoRecorder?
|
||||
weak var overrideOutput: CameraOutput?
|
||||
|
||||
var activeFilter: CameraFilter?
|
||||
var faceLandmarks: Bool = false
|
||||
@ -316,7 +315,7 @@ final class CameraOutput: NSObject {
|
||||
}
|
||||
|
||||
private var recordingCompletionPipe = ValuePipe<VideoCaptureResult>()
|
||||
func startRecording() -> Signal<Double, NoError> {
|
||||
func startRecording(isDualCamera: Bool, position: Camera.Position? = nil) -> Signal<Double, NoError> {
|
||||
guard self.videoRecorder == nil else {
|
||||
return .complete()
|
||||
}
|
||||
@ -338,7 +337,7 @@ final class CameraOutput: NSObject {
|
||||
let outputFileURL = URL(fileURLWithPath: outputFilePath)
|
||||
let videoRecorder = VideoRecorder(configuration: VideoRecorder.Configuration(videoSettings: videoSettings, audioSettings: audioSettings), videoTransform: CGAffineTransform(rotationAngle: .pi / 2.0), fileUrl: outputFileURL, completion: { [weak self] result in
|
||||
if case let .success(transitionImage, duration, positionChangeTimestamps) = result {
|
||||
self?.recordingCompletionPipe.putNext(.finished((outputFilePath, transitionImage!), nil, duration, positionChangeTimestamps.map { ($0 == .front, $1) }, CACurrentMediaTime()))
|
||||
self?.recordingCompletionPipe.putNext(.finished((outputFilePath, transitionImage!, false), nil, duration, positionChangeTimestamps.map { ($0 == .front, $1) }, CACurrentMediaTime()))
|
||||
} else {
|
||||
self?.recordingCompletionPipe.putNext(.failed)
|
||||
}
|
||||
@ -347,6 +346,10 @@ final class CameraOutput: NSObject {
|
||||
videoRecorder?.start()
|
||||
self.videoRecorder = videoRecorder
|
||||
|
||||
if isDualCamera, let position {
|
||||
videoRecorder?.markPositionChange(position: position, time: .zero)
|
||||
}
|
||||
|
||||
return Signal { subscriber in
|
||||
let timer = SwiftSignalKit.Timer(timeout: 0.1, repeat: true, completion: { [weak videoRecorder] in
|
||||
subscriber.putNext(videoRecorder?.duration ?? 0.0)
|
||||
|
@ -86,14 +86,18 @@ private final class VideoRecorderImpl {
|
||||
}
|
||||
}
|
||||
|
||||
public func markPositionChange(position: Camera.Position) {
|
||||
public func markPositionChange(position: Camera.Position, time: CMTime? = nil) {
|
||||
self.queue.async {
|
||||
guard self.recordingStartSampleTime.isValid else {
|
||||
guard self.recordingStartSampleTime.isValid || time != nil else {
|
||||
return
|
||||
}
|
||||
let currentTime = CMTime(seconds: CACurrentMediaTime(), preferredTimescale: CMTimeScale(NSEC_PER_SEC))
|
||||
let delta = currentTime - self.recordingStartSampleTime
|
||||
self.positionChangeTimestamps.append((position, delta))
|
||||
if let time {
|
||||
self.positionChangeTimestamps.append((position, time))
|
||||
} else {
|
||||
let currentTime = CMTime(seconds: CACurrentMediaTime(), preferredTimescale: CMTimeScale(NSEC_PER_SEC))
|
||||
let delta = currentTime - self.recordingStartSampleTime
|
||||
self.positionChangeTimestamps.append((position, delta))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -486,9 +490,9 @@ public final class VideoRecorder {
|
||||
func stop() {
|
||||
self.impl.stopRecording()
|
||||
}
|
||||
|
||||
func markPositionChange(position: Camera.Position) {
|
||||
self.impl.markPositionChange(position: position)
|
||||
|
||||
func markPositionChange(position: Camera.Position, time: CMTime? = nil) {
|
||||
self.impl.markPositionChange(position: position, time: time)
|
||||
}
|
||||
|
||||
func appendSampleBuffer(_ sampleBuffer: CMSampleBuffer) {
|
||||
|
@ -2680,8 +2680,20 @@ public class ChatListControllerImpl: TelegramBaseController, ChatListController
|
||||
return
|
||||
}
|
||||
self.context.engine.peers.updatePeerStoriesHidden(id: peer.id, isHidden: true)
|
||||
|
||||
guard let parentController = self.parent as? TabBarController, let contactsController = (self.navigationController as? TelegramRootControllerInterface)?.getContactsController(), let sourceFrame = parentController.frameForControllerTab(controller: contactsController) else {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
let location = CGRect(origin: CGPoint(x: sourceFrame.midX, y: sourceFrame.minY - 8.0), size: CGSize())
|
||||
let tooltipController = TooltipScreen(
|
||||
account: self.context.account,
|
||||
sharedContext: self.context.sharedContext,
|
||||
text: "Stories from \(peer.compactDisplayTitle) will now be shown in Contacts, not Chats.",
|
||||
location: .point(location, .bottom),
|
||||
shouldDismissOnTouch: { _ in return .dismiss(consume: false) }
|
||||
)
|
||||
self.present(tooltipController, in: .window(.root))
|
||||
})))
|
||||
}
|
||||
|
||||
|
@ -21,13 +21,30 @@ public final class DrawingMediaEntityView: DrawingEntityView, DrawingEntityMedia
|
||||
if let previewView = self.previewView {
|
||||
previewView.isUserInteractionEnabled = false
|
||||
previewView.layer.allowsEdgeAntialiasing = true
|
||||
self.addSubview(previewView)
|
||||
if self.additionalView == nil {
|
||||
self.addSubview(previewView)
|
||||
}
|
||||
} else {
|
||||
oldValue?.removeFromSuperview()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
public var additionalView: DrawingStickerEntityView.VideoView? {
|
||||
didSet {
|
||||
if let additionalView = self.additionalView {
|
||||
self.addSubview(additionalView)
|
||||
} else {
|
||||
if let previous = oldValue, previous.superview === self {
|
||||
previous.removeFromSuperview()
|
||||
}
|
||||
if let previewView = self.previewView {
|
||||
self.addSubview(previewView)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private let snapTool = DrawingEntitySnapTool()
|
||||
|
||||
init(context: AccountContext, entity: DrawingMediaEntity) {
|
||||
@ -87,10 +104,17 @@ public final class DrawingMediaEntityView: DrawingEntityView, DrawingEntityMedia
|
||||
|
||||
if size.width > 0 && self.currentSize != size {
|
||||
self.currentSize = size
|
||||
self.previewView?.frame = CGRect(origin: .zero, size: size)
|
||||
|
||||
if self.previewView?.superview === self {
|
||||
self.previewView?.frame = CGRect(origin: .zero, size: size)
|
||||
}
|
||||
if let additionalView = self.additionalView, additionalView.superview === self {
|
||||
additionalView.frame = CGRect(origin: .zero, size: size)
|
||||
}
|
||||
self.update(animated: false)
|
||||
}
|
||||
if let additionalView = self.additionalView, additionalView.superview === self {
|
||||
self.additionalView?.frame = self.bounds
|
||||
}
|
||||
}
|
||||
|
||||
public var updated: (() -> Void)?
|
||||
@ -103,8 +127,10 @@ public final class DrawingMediaEntityView: DrawingEntityView, DrawingEntityMedia
|
||||
self.bounds = CGRect(origin: .zero, size: size)
|
||||
self.transform = CGAffineTransformScale(CGAffineTransformMakeRotation(self.mediaEntity.rotation), scale, scale)
|
||||
|
||||
self.previewView?.layer.transform = CATransform3DMakeScale(self.mediaEntity.mirrored ? -1.0 : 1.0, 1.0, 1.0)
|
||||
self.previewView?.frame = self.bounds
|
||||
if self.previewView?.superview === self {
|
||||
self.previewView?.layer.transform = CATransform3DMakeScale(self.mediaEntity.mirrored ? -1.0 : 1.0, 1.0, 1.0)
|
||||
self.previewView?.frame = self.bounds
|
||||
}
|
||||
|
||||
super.update(animated: animated)
|
||||
|
||||
|
@ -3021,6 +3021,16 @@ public final class DrawingToolsInteraction {
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var isVideo = false
|
||||
if let entity = entityView.entity as? DrawingStickerEntity {
|
||||
if case .video = entity.content {
|
||||
isVideo = true
|
||||
} else if case .dualVideoReference = entity.content {
|
||||
isVideo = true
|
||||
}
|
||||
}
|
||||
|
||||
let presentationData = self.context.sharedContext.currentPresentationData.with { $0 }.withUpdated(theme: defaultDarkPresentationTheme)
|
||||
var actions: [ContextMenuAction] = []
|
||||
actions.append(ContextMenuAction(content: .text(title: presentationData.strings.Paint_Delete, accessibilityLabel: presentationData.strings.Paint_Delete), action: { [weak self, weak entityView] in
|
||||
@ -3042,19 +3052,21 @@ public final class DrawingToolsInteraction {
|
||||
}
|
||||
}))
|
||||
}
|
||||
if !isTopmost {
|
||||
if !isTopmost && !isVideo {
|
||||
actions.append(ContextMenuAction(content: .text(title: presentationData.strings.Paint_MoveForward, accessibilityLabel: presentationData.strings.Paint_MoveForward), action: { [weak self, weak entityView] in
|
||||
if let self, let entityView {
|
||||
self.entitiesView.bringToFront(uuid: entityView.entity.uuid)
|
||||
}
|
||||
}))
|
||||
}
|
||||
actions.append(ContextMenuAction(content: .text(title: presentationData.strings.Paint_Duplicate, accessibilityLabel: presentationData.strings.Paint_Duplicate), action: { [weak self, weak entityView] in
|
||||
if let self, let entityView {
|
||||
let newEntity = self.entitiesView.duplicate(entityView.entity)
|
||||
self.entitiesView.selectEntity(newEntity)
|
||||
}
|
||||
}))
|
||||
if !isVideo {
|
||||
actions.append(ContextMenuAction(content: .text(title: presentationData.strings.Paint_Duplicate, accessibilityLabel: presentationData.strings.Paint_Duplicate), action: { [weak self, weak entityView] in
|
||||
if let self, let entityView {
|
||||
let newEntity = self.entitiesView.duplicate(entityView.entity)
|
||||
self.entitiesView.selectEntity(newEntity)
|
||||
}
|
||||
}))
|
||||
}
|
||||
let entityFrame = entityView.convert(entityView.selectionBounds, to: node.view).offsetBy(dx: 0.0, dy: -6.0)
|
||||
let controller = ContextMenuController(actions: actions)
|
||||
let bounds = node.bounds.insetBy(dx: 0.0, dy: 160.0)
|
||||
|
@ -10,22 +10,63 @@ import StickerResources
|
||||
import AccountContext
|
||||
import MediaEditor
|
||||
|
||||
final class DrawingStickerEntityView: DrawingEntityView {
|
||||
public final class DrawingStickerEntityView: DrawingEntityView {
|
||||
public class VideoView: UIView {
|
||||
init(player: AVPlayer) {
|
||||
super.init(frame: .zero)
|
||||
|
||||
self.videoLayer.player = player
|
||||
}
|
||||
|
||||
required init?(coder: NSCoder) {
|
||||
fatalError("init(coder:) has not been implemented")
|
||||
}
|
||||
|
||||
var videoLayer: AVPlayerLayer {
|
||||
guard let layer = self.layer as? AVPlayerLayer else {
|
||||
fatalError()
|
||||
}
|
||||
return layer
|
||||
}
|
||||
|
||||
public override class var layerClass: AnyClass {
|
||||
return AVPlayerLayer.self
|
||||
}
|
||||
}
|
||||
|
||||
private var stickerEntity: DrawingStickerEntity {
|
||||
return self.entity as! DrawingStickerEntity
|
||||
}
|
||||
|
||||
var started: ((Double) -> Void)?
|
||||
|
||||
public var updated: () -> Void = {}
|
||||
|
||||
private var currentSize: CGSize?
|
||||
|
||||
private let imageNode: TransformImageNode
|
||||
private var animationNode: AnimatedStickerNode?
|
||||
|
||||
private var videoContainerView: UIView?
|
||||
private var videoPlayer: AVPlayer?
|
||||
private var videoLayer: AVPlayerLayer?
|
||||
public var videoView: VideoView?
|
||||
private var videoImageView: UIImageView?
|
||||
|
||||
public var mainView: MediaEditorPreviewView? {
|
||||
didSet {
|
||||
if let mainView = self.mainView {
|
||||
self.videoContainerView?.addSubview(mainView)
|
||||
} else {
|
||||
if let previous = oldValue, previous.superview === self {
|
||||
previous.removeFromSuperview()
|
||||
}
|
||||
if let videoView = self.videoView {
|
||||
self.videoContainerView?.addSubview(videoView)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private var didSetUpAnimationNode = false
|
||||
private let stickerFetchedDisposable = MetaDisposable()
|
||||
private let cachedDisposable = MetaDisposable()
|
||||
@ -69,7 +110,7 @@ final class DrawingStickerEntityView: DrawingEntityView {
|
||||
}
|
||||
|
||||
private var video: String? {
|
||||
if case let .video(path, _) = self.stickerEntity.content {
|
||||
if case let .video(path, _, _) = self.stickerEntity.content {
|
||||
return path
|
||||
} else {
|
||||
return nil
|
||||
@ -82,13 +123,15 @@ final class DrawingStickerEntityView: DrawingEntityView {
|
||||
return file.dimensions?.cgSize ?? CGSize(width: 512.0, height: 512.0)
|
||||
case let .image(image):
|
||||
return image.size
|
||||
case let .video(_, image):
|
||||
case let .video(_, image, _):
|
||||
if let image {
|
||||
let minSide = min(image.size.width, image.size.height)
|
||||
return CGSize(width: minSide, height: minSide)
|
||||
} else {
|
||||
return CGSize(width: 512.0, height: 512.0)
|
||||
}
|
||||
case .dualVideoReference:
|
||||
return CGSize(width: 512.0, height: 512.0)
|
||||
}
|
||||
}
|
||||
|
||||
@ -112,6 +155,10 @@ final class DrawingStickerEntityView: DrawingEntityView {
|
||||
}
|
||||
}
|
||||
self.addSubnode(animationNode)
|
||||
|
||||
if file.isCustomTemplateEmoji {
|
||||
animationNode.dynamicColor = UIColor(rgb: 0xffffff)
|
||||
}
|
||||
}
|
||||
self.imageNode.setSignal(chatMessageAnimatedSticker(postbox: self.context.account.postbox, userLocation: .other, file: file, small: false, size: dimensions.cgSize.aspectFitted(CGSize(width: 256.0, height: 256.0))))
|
||||
self.stickerFetchedDisposable.set(freeMediaFileResourceInteractiveFetched(account: self.context.account, userLocation: .other, fileReference: stickerPackFileReference(file), resource: file.resource).start())
|
||||
@ -139,30 +186,34 @@ final class DrawingStickerEntityView: DrawingEntityView {
|
||||
return context
|
||||
}))
|
||||
self.setNeedsLayout()
|
||||
} else if case let .video(videoPath, image) = self.stickerEntity.content {
|
||||
} else if case let .video(videoPath, image, _) = self.stickerEntity.content {
|
||||
let url = URL(fileURLWithPath: videoPath)
|
||||
let asset = AVURLAsset(url: url)
|
||||
let playerItem = AVPlayerItem(asset: asset)
|
||||
let player = AVPlayer(playerItem: playerItem)
|
||||
player.automaticallyWaitsToMinimizeStalling = false
|
||||
let layer = AVPlayerLayer(player: player)
|
||||
layer.masksToBounds = true
|
||||
layer.videoGravity = .resizeAspectFill
|
||||
|
||||
let videoContainerView = UIView()
|
||||
videoContainerView.clipsToBounds = true
|
||||
|
||||
self.layer.addSublayer(layer)
|
||||
let videoView = VideoView(player: player)
|
||||
videoContainerView.addSubview(videoView)
|
||||
|
||||
self.addSubview(videoContainerView)
|
||||
|
||||
self.videoPlayer = player
|
||||
self.videoLayer = layer
|
||||
self.videoContainerView = videoContainerView
|
||||
self.videoView = videoView
|
||||
|
||||
let imageView = UIImageView(image: image)
|
||||
imageView.clipsToBounds = true
|
||||
imageView.contentMode = .scaleAspectFill
|
||||
self.addSubview(imageView)
|
||||
videoContainerView.addSubview(imageView)
|
||||
self.videoImageView = imageView
|
||||
}
|
||||
}
|
||||
|
||||
override func play() {
|
||||
public override func play() {
|
||||
self.isVisible = true
|
||||
self.applyVisibility()
|
||||
|
||||
@ -180,7 +231,7 @@ final class DrawingStickerEntityView: DrawingEntityView {
|
||||
}
|
||||
}
|
||||
|
||||
override func pause() {
|
||||
public override func pause() {
|
||||
self.isVisible = false
|
||||
self.applyVisibility()
|
||||
|
||||
@ -189,7 +240,7 @@ final class DrawingStickerEntityView: DrawingEntityView {
|
||||
}
|
||||
}
|
||||
|
||||
override func seek(to timestamp: Double) {
|
||||
public override func seek(to timestamp: Double) {
|
||||
self.isVisible = false
|
||||
self.isPlaying = false
|
||||
self.animationNode?.seekTo(.timestamp(timestamp))
|
||||
@ -233,7 +284,7 @@ final class DrawingStickerEntityView: DrawingEntityView {
|
||||
}
|
||||
|
||||
private var didApplyVisibility = false
|
||||
override func layoutSubviews() {
|
||||
public override func layoutSubviews() {
|
||||
super.layoutSubviews()
|
||||
|
||||
let size = self.bounds.size
|
||||
@ -258,20 +309,23 @@ final class DrawingStickerEntityView: DrawingEntityView {
|
||||
}
|
||||
}
|
||||
|
||||
if let videoLayer = self.videoLayer {
|
||||
videoLayer.cornerRadius = imageFrame.width / 2.0
|
||||
videoLayer.frame = imageFrame
|
||||
if let videoView = self.videoView {
|
||||
let videoSize = CGSize(width: imageFrame.width, height: imageFrame.width / 9.0 * 16.0)
|
||||
videoView.frame = CGRect(origin: CGPoint(x: 0.0, y: floorToScreenPixels((imageFrame.height - videoSize.height) / 2.0)), size: videoSize)
|
||||
}
|
||||
if let videoContainerView = self.videoContainerView {
|
||||
videoContainerView.layer.cornerRadius = imageFrame.width / 2.0
|
||||
videoContainerView.frame = imageFrame
|
||||
}
|
||||
if let videoImageView = self.videoImageView {
|
||||
videoImageView.layer.cornerRadius = imageFrame.width / 2.0
|
||||
videoImageView.frame = imageFrame
|
||||
videoImageView.frame = CGRect(origin: .zero, size: imageFrame.size)
|
||||
}
|
||||
|
||||
self.update(animated: false)
|
||||
}
|
||||
}
|
||||
|
||||
override func update(animated: Bool) {
|
||||
public override func update(animated: Bool) {
|
||||
self.center = self.stickerEntity.position
|
||||
|
||||
let size = self.stickerEntity.baseSize
|
||||
@ -298,20 +352,22 @@ final class DrawingStickerEntityView: DrawingEntityView {
|
||||
UIView.animate(withDuration: 0.25, animations: {
|
||||
self.imageNode.transform = animationTargetTransform
|
||||
self.animationNode?.transform = animationTargetTransform
|
||||
self.videoLayer?.transform = animationTargetTransform
|
||||
self.videoContainerView?.layer.transform = animationTargetTransform
|
||||
}, completion: { finished in
|
||||
self.imageNode.transform = staticTransform
|
||||
self.animationNode?.transform = staticTransform
|
||||
self.videoLayer?.transform = staticTransform
|
||||
self.videoContainerView?.layer.transform = staticTransform
|
||||
})
|
||||
} else {
|
||||
CATransaction.begin()
|
||||
CATransaction.setDisableActions(true)
|
||||
self.imageNode.transform = staticTransform
|
||||
self.animationNode?.transform = staticTransform
|
||||
self.videoLayer?.transform = staticTransform
|
||||
self.videoContainerView?.layer.transform = staticTransform
|
||||
CATransaction.commit()
|
||||
}
|
||||
|
||||
self.updated()
|
||||
|
||||
super.update(animated: animated)
|
||||
}
|
||||
|
@ -148,6 +148,8 @@ private class LegacyPaintStickerEntity: LegacyPaintEntity {
|
||||
self.imagePromise.set(.single(image))
|
||||
case .video:
|
||||
self.file = nil
|
||||
case .dualVideoReference:
|
||||
self.file = nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -174,7 +174,8 @@ private enum ApplicationSpecificGlobalNotice: Int32 {
|
||||
case chatWallpaperDarkPreviewTip = 40
|
||||
case displayChatListContacts = 41
|
||||
case displayChatListStoriesTooltip = 42
|
||||
case storiesPrivacyTooltip = 43
|
||||
case storiesCameraTooltip = 43
|
||||
case storiesDualCameraTooltip = 44
|
||||
|
||||
var key: ValueBoxKey {
|
||||
let v = ValueBoxKey(length: 4)
|
||||
@ -400,6 +401,14 @@ private struct ApplicationSpecificNoticeKeys {
|
||||
static func displayChatListStoriesTooltip() -> NoticeEntryKey {
|
||||
return NoticeEntryKey(namespace: noticeNamespace(namespace: globalNamespace), key: ApplicationSpecificGlobalNotice.displayChatListStoriesTooltip.key)
|
||||
}
|
||||
|
||||
static func storiesCameraTooltip() -> NoticeEntryKey {
|
||||
return NoticeEntryKey(namespace: noticeNamespace(namespace: globalNamespace), key: ApplicationSpecificGlobalNotice.storiesCameraTooltip.key)
|
||||
}
|
||||
|
||||
static func storiesDualCameraTooltip() -> NoticeEntryKey {
|
||||
return NoticeEntryKey(namespace: noticeNamespace(namespace: globalNamespace), key: ApplicationSpecificGlobalNotice.storiesDualCameraTooltip.key)
|
||||
}
|
||||
}
|
||||
|
||||
public struct ApplicationSpecificNotice {
|
||||
|
@ -348,7 +348,7 @@ private final class CameraScreenComponent: CombinedComponent {
|
||||
private var lastFlipTimestamp: Double?
|
||||
func togglePosition(_ action: ActionSlot<Void>) {
|
||||
let currentTimestamp = CACurrentMediaTime()
|
||||
if let lastFlipTimestamp = self.lastFlipTimestamp, currentTimestamp - lastFlipTimestamp < 1.3 {
|
||||
if let lastFlipTimestamp = self.lastFlipTimestamp, currentTimestamp - lastFlipTimestamp < 1.0 {
|
||||
return
|
||||
}
|
||||
self.lastFlipTimestamp = currentTimestamp
|
||||
@ -380,8 +380,8 @@ private final class CameraScreenComponent: CombinedComponent {
|
||||
switch value {
|
||||
case .began:
|
||||
return .single(.pendingImage)
|
||||
case let .finished(mainImage, additionalImage, _):
|
||||
return .single(.image(mainImage, additionalImage, .bottomRight))
|
||||
case let .finished(image, additionalImage, _):
|
||||
return .single(.image(CameraScreen.Result.Image(image: image, additionalImage: additionalImage, additionalImagePosition: .bottomRight)))
|
||||
case .failed:
|
||||
return .complete()
|
||||
}
|
||||
@ -409,7 +409,7 @@ private final class CameraScreenComponent: CombinedComponent {
|
||||
self.resultDisposable.set((self.camera.stopRecording()
|
||||
|> deliverOnMainQueue).start(next: { [weak self] result in
|
||||
if let self, case let .finished(mainResult, additionalResult, duration, positionChangeTimestamps, _) = result {
|
||||
self.completion.invoke(.single(.video(mainResult.0, mainResult.1, additionalResult?.0, additionalResult?.1, PixelDimensions(width: 1080, height: 1920), duration, positionChangeTimestamps, .bottomRight)))
|
||||
self.completion.invoke(.single(.video(CameraScreen.Result.Video(videoPath: mainResult.0, coverImage: mainResult.1, mirror: mainResult.2, additionalVideoPath: additionalResult?.0, additionalCoverImage: additionalResult?.1, dimensions: PixelDimensions(width: 1080, height: 1920), duration: duration, positionChangeTimestamps: positionChangeTimestamps, additionalVideoPosition: .bottomRight))))
|
||||
}
|
||||
}))
|
||||
self.isTransitioning = true
|
||||
@ -553,7 +553,7 @@ private final class CameraScreenComponent: CombinedComponent {
|
||||
transition: .immediate
|
||||
)
|
||||
context.add(flashButton
|
||||
.position(CGPoint(x: isTablet ? availableSize.width - smallPanelWidth / 2.0 : availableSize.width - topControlInset - flashButton.size.width / 2.0, y: environment.safeInsets.top + topControlInset + flashButton.size.height / 2.0))
|
||||
.position(CGPoint(x: isTablet ? availableSize.width - smallPanelWidth / 2.0 : availableSize.width - topControlInset - flashButton.size.width / 2.0 - 5.0, y: environment.safeInsets.top + topControlInset + flashButton.size.height / 2.0))
|
||||
.appear(.default(scale: true))
|
||||
.disappear(.default(scale: true))
|
||||
)
|
||||
@ -578,7 +578,7 @@ private final class CameraScreenComponent: CombinedComponent {
|
||||
transition: .immediate
|
||||
)
|
||||
context.add(dualButton
|
||||
.position(CGPoint(x: availableSize.width / 2.0, y: environment.safeInsets.top + topControlInset + dualButton.size.height / 2.0))
|
||||
.position(CGPoint(x: availableSize.width - topControlInset - flashButton.size.width / 2.0 - 52.0, y: environment.safeInsets.top + topControlInset + dualButton.size.height / 2.0 + 1.0))
|
||||
.appear(.default(scale: true))
|
||||
.disappear(.default(scale: true))
|
||||
)
|
||||
@ -734,7 +734,7 @@ private final class CameraScreenComponent: CombinedComponent {
|
||||
}
|
||||
|
||||
var isVideoRecording = false
|
||||
if case .video = state.cameraState.mode, isTablet {
|
||||
if case .video = state.cameraState.mode {
|
||||
isVideoRecording = true
|
||||
} else if state.cameraState.recording != .none {
|
||||
isVideoRecording = true
|
||||
@ -906,18 +906,36 @@ public class CameraScreen: ViewController {
|
||||
}
|
||||
|
||||
public enum Result {
|
||||
public struct Image {
|
||||
public let image: UIImage
|
||||
public let additionalImage: UIImage?
|
||||
public let additionalImagePosition: CameraScreen.PIPPosition
|
||||
}
|
||||
|
||||
public struct Video {
|
||||
public let videoPath: String
|
||||
public let coverImage: UIImage?
|
||||
public let mirror: Bool
|
||||
public let additionalVideoPath: String?
|
||||
public let additionalCoverImage: UIImage?
|
||||
public let dimensions: PixelDimensions
|
||||
public let duration: Double
|
||||
public let positionChangeTimestamps: [(Bool, Double)]
|
||||
public let additionalVideoPosition: CameraScreen.PIPPosition
|
||||
}
|
||||
|
||||
case pendingImage
|
||||
case image(UIImage, UIImage?, CameraScreen.PIPPosition)
|
||||
case video(String, UIImage?, String?, UIImage?, PixelDimensions, Double, [(Bool, Double)], CameraScreen.PIPPosition)
|
||||
case image(Image)
|
||||
case video(Video)
|
||||
case asset(PHAsset)
|
||||
case draft(MediaEditorDraft)
|
||||
|
||||
func withPIPPosition(_ position: CameraScreen.PIPPosition) -> Result {
|
||||
switch self {
|
||||
case let .image(mainImage, additionalImage, _):
|
||||
return .image(mainImage, additionalImage, position)
|
||||
case let .video(mainPath, mainImage, additionalPath, additionalImage, dimensions, duration, positionChangeTimestamps, _):
|
||||
return .video(mainPath, mainImage, additionalPath, additionalImage, dimensions, duration, positionChangeTimestamps, position)
|
||||
case let .image(result):
|
||||
return .image(Image(image: result.image, additionalImage: result.additionalImage, additionalImagePosition: position))
|
||||
case let .video(result):
|
||||
return .video(Video(videoPath: result.videoPath, coverImage: result.coverImage, mirror: result.mirror, additionalVideoPath: result.additionalVideoPath, additionalCoverImage: result.additionalCoverImage, dimensions: result.dimensions, duration: result.duration, positionChangeTimestamps: result.positionChangeTimestamps, additionalVideoPosition: position))
|
||||
default:
|
||||
return self
|
||||
}
|
||||
@ -1202,10 +1220,7 @@ public class CameraScreen: ViewController {
|
||||
}
|
||||
|
||||
if self.isDualCamEnabled && previousPosition != newPosition {
|
||||
CATransaction.begin()
|
||||
CATransaction.setDisableActions(true)
|
||||
self.requestUpdateLayout(hasAppeared: false, transition: .immediate)
|
||||
CATransaction.commit()
|
||||
self.animateDualCameraPositionSwitch()
|
||||
} else if dualCamWasEnabled != self.isDualCamEnabled {
|
||||
self.requestUpdateLayout(hasAppeared: false, transition: .spring(duration: 0.4))
|
||||
}
|
||||
@ -1313,6 +1328,58 @@ public class CameraScreen: ViewController {
|
||||
}
|
||||
}
|
||||
|
||||
func animateDualCameraPositionSwitch() {
|
||||
let duration: Double = 0.5
|
||||
let timingFunction = kCAMediaTimingFunctionSpring
|
||||
|
||||
if let additionalSnapshot = self.additionalPreviewContainerView.snapshotView(afterScreenUpdates: false) {
|
||||
additionalSnapshot.frame = self.additionalPreviewContainerView.frame
|
||||
self.additionalPreviewContainerView.superview?.addSubview(additionalSnapshot)
|
||||
|
||||
additionalSnapshot.layer.animateScale(from: 1.0, to: 0.01, duration: 0.35, timingFunction: timingFunction, removeOnCompletion: false)
|
||||
additionalSnapshot.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.15, removeOnCompletion: false, completion: { [weak additionalSnapshot] _ in
|
||||
additionalSnapshot?.removeFromSuperview()
|
||||
})
|
||||
}
|
||||
|
||||
CATransaction.begin()
|
||||
CATransaction.setDisableActions(true)
|
||||
self.requestUpdateLayout(hasAppeared: false, transition: .immediate)
|
||||
CATransaction.commit()
|
||||
|
||||
self.additionalPreviewContainerView.layer.animate(
|
||||
from: 12.0 as NSNumber,
|
||||
to: self.additionalPreviewContainerView.layer.cornerRadius as NSNumber,
|
||||
keyPath: "cornerRadius",
|
||||
timingFunction: timingFunction,
|
||||
duration: duration
|
||||
)
|
||||
|
||||
self.additionalPreviewContainerView.layer.animatePosition(
|
||||
from: self.mainPreviewContainerView.center,
|
||||
to: self.additionalPreviewContainerView.center,
|
||||
duration: duration,
|
||||
timingFunction: timingFunction
|
||||
)
|
||||
|
||||
let scale = self.mainPreviewContainerView.frame.width / self.additionalPreviewContainerView.frame.width
|
||||
self.additionalPreviewContainerView.layer.animateScale(
|
||||
from: scale,
|
||||
to: 1.0,
|
||||
duration: duration,
|
||||
timingFunction: timingFunction
|
||||
)
|
||||
|
||||
let aspectRatio = self.mainPreviewContainerView.frame.height / self.mainPreviewContainerView.frame.width
|
||||
let height = self.additionalPreviewContainerView.bounds.width * aspectRatio
|
||||
self.additionalPreviewContainerView.layer.animateBounds(
|
||||
from: CGRect(origin: CGPoint(x: 0.0, y: floorToScreenPixels((self.additionalPreviewContainerView.bounds.height - height) / 2.0)), size: CGSize(width: self.additionalPreviewContainerView.bounds.width, height: height)),
|
||||
to: self.additionalPreviewContainerView.bounds,
|
||||
duration: duration,
|
||||
timingFunction: timingFunction
|
||||
)
|
||||
}
|
||||
|
||||
func animateIn() {
|
||||
self.transitionDimView.alpha = 0.0
|
||||
self.backgroundView.alpha = 0.0
|
||||
@ -1545,8 +1612,8 @@ public class CameraScreen: ViewController {
|
||||
override func hitTest(_ point: CGPoint, with event: UIEvent?) -> UIView? {
|
||||
let result = super.hitTest(point, with: event)
|
||||
if result == self.componentHost.view {
|
||||
if self.additionalPreviewView.bounds.contains(self.view.convert(point, to: self.additionalPreviewView)) {
|
||||
return self.additionalPreviewView
|
||||
if self.additionalPreviewContainerView.bounds.contains(self.view.convert(point, to: self.additionalPreviewContainerView)) {
|
||||
return self.additionalPreviewContainerView
|
||||
} else {
|
||||
return self.mainPreviewView
|
||||
}
|
||||
@ -1557,13 +1624,6 @@ public class CameraScreen: ViewController {
|
||||
func requestUpdateLayout(hasAppeared: Bool, transition: Transition) {
|
||||
if let layout = self.validLayout {
|
||||
self.containerLayoutUpdated(layout: layout, forceUpdate: true, hasAppeared: hasAppeared, transition: transition)
|
||||
|
||||
if let view = self.componentHost.findTaggedView(tag: flashButtonTag) {
|
||||
view.layer.shadowOffset = CGSize(width: 0.0, height: 0.0)
|
||||
view.layer.shadowRadius = 3.0
|
||||
view.layer.shadowColor = UIColor.black.cgColor
|
||||
view.layer.shadowOpacity = 0.35
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1676,6 +1736,13 @@ public class CameraScreen: ViewController {
|
||||
transition.setFrame(view: componentView, frame: componentFrame)
|
||||
}
|
||||
|
||||
if let view = self.componentHost.findTaggedView(tag: flashButtonTag), view.layer.shadowOpacity.isZero {
|
||||
view.layer.shadowOffset = CGSize(width: 0.0, height: 0.0)
|
||||
view.layer.shadowRadius = 3.0
|
||||
view.layer.shadowColor = UIColor.black.cgColor
|
||||
view.layer.shadowOpacity = 0.25
|
||||
}
|
||||
|
||||
transition.setPosition(view: self.backgroundView, position: CGPoint(x: layout.size.width / 2.0, y: layout.size.height / 2.0))
|
||||
transition.setBounds(view: self.backgroundView, bounds: CGRect(origin: .zero, size: layout.size))
|
||||
|
||||
@ -1723,7 +1790,11 @@ public class CameraScreen: ViewController {
|
||||
origin = origin.offsetBy(dx: pipTranslation.x, dy: pipTranslation.y)
|
||||
}
|
||||
|
||||
let additionalPreviewInnerSize = previewFrame.size.aspectFilled(CGSize(width: circleSide, height: circleSide))
|
||||
let additionalPreviewInnerFrame = CGRect(origin: CGPoint(x: 0.0, y: floorToScreenPixels((circleSide - additionalPreviewInnerSize.height) / 2.0)), size: additionalPreviewInnerSize)
|
||||
|
||||
let additionalPreviewFrame = CGRect(origin: CGPoint(x: origin.x - circleSide / 2.0, y: origin.y - circleSide / 2.0), size: CGSize(width: circleSide, height: circleSide))
|
||||
|
||||
transition.setPosition(view: self.additionalPreviewContainerView, position: additionalPreviewFrame.center)
|
||||
transition.setBounds(view: self.additionalPreviewContainerView, bounds: CGRect(origin: .zero, size: additionalPreviewFrame.size))
|
||||
self.additionalPreviewContainerView.layer.cornerRadius = additionalPreviewFrame.width / 2.0
|
||||
@ -1757,7 +1828,7 @@ public class CameraScreen: ViewController {
|
||||
}
|
||||
|
||||
mainPreviewView.frame = CGRect(origin: .zero, size: previewFrame.size)
|
||||
additionalPreviewView.frame = CGRect(origin: .zero, size: additionalPreviewFrame.size)
|
||||
additionalPreviewView.frame = additionalPreviewInnerFrame
|
||||
|
||||
self.previewFrameLeftDimView.isHidden = !isTablet
|
||||
transition.setFrame(view: self.previewFrameLeftDimView, frame: CGRect(origin: .zero, size: CGSize(width: viewfinderFrame.minX, height: viewfinderFrame.height)))
|
||||
@ -2018,6 +2089,7 @@ public class CameraScreen: ViewController {
|
||||
if let layout = self.validLayout, case .regular = layout.metrics.widthClass {
|
||||
return
|
||||
}
|
||||
let transitionFraction = max(0.0, min(1.0, transitionFraction))
|
||||
let offsetX = floorToScreenPixels((1.0 - transitionFraction) * self.node.frame.width * -1.0)
|
||||
transition.updateTransform(layer: self.node.backgroundView.layer, transform: CGAffineTransform(translationX: offsetX, y: 0.0))
|
||||
transition.updateTransform(layer: self.node.containerView.layer, transform: CGAffineTransform(translationX: offsetX, y: 0.0))
|
||||
@ -2114,22 +2186,22 @@ private final class DualIconComponent: Component {
|
||||
override init(frame: CGRect) {
|
||||
super.init(frame: frame)
|
||||
|
||||
let image = generateImage(CGSize(width: 36.0, height: 36.0), rotatedContext: { size, context in
|
||||
let image = generateImage(CGSize(width: 36.0, height: 36.0), contextGenerator: { size, context in
|
||||
context.clear(CGRect(origin: .zero, size: size))
|
||||
|
||||
if let image = UIImage(bundleImageName: "Camera/DualIcon"), let cgImage = image.cgImage {
|
||||
context.draw(cgImage, in: CGRect(origin: CGPoint(x: floorToScreenPixels((size.width - image.size.width) / 2.0), y: floorToScreenPixels((size.height - image.size.height) / 2.0)), size: image.size))
|
||||
context.draw(cgImage, in: CGRect(origin: CGPoint(x: floorToScreenPixels((size.width - image.size.width) / 2.0), y: floorToScreenPixels((size.height - image.size.height) / 2.0) - 1.0), size: image.size))
|
||||
}
|
||||
})
|
||||
|
||||
let selectedImage = generateImage(CGSize(width: 36.0, height: 36.0), rotatedContext: { size, context in
|
||||
let selectedImage = generateImage(CGSize(width: 36.0, height: 36.0), contextGenerator: { size, context in
|
||||
context.clear(CGRect(origin: .zero, size: size))
|
||||
context.setFillColor(UIColor.white.cgColor)
|
||||
context.fillEllipse(in: CGRect(origin: .zero, size: size))
|
||||
|
||||
if let image = UIImage(bundleImageName: "Camera/DualIcon"), let cgImage = image.cgImage {
|
||||
context.setBlendMode(.clear)
|
||||
context.clip(to: CGRect(origin: CGPoint(x: floorToScreenPixels((size.width - image.size.width) / 2.0), y: floorToScreenPixels((size.height - image.size.height) / 2.0)), size: image.size), mask: cgImage)
|
||||
context.clip(to: CGRect(origin: CGPoint(x: floorToScreenPixels((size.width - image.size.width) / 2.0), y: floorToScreenPixels((size.height - image.size.height) / 2.0) - 1.0), size: image.size), mask: cgImage)
|
||||
context.fill(CGRect(origin: .zero, size: size))
|
||||
}
|
||||
})
|
||||
@ -2138,9 +2210,9 @@ private final class DualIconComponent: Component {
|
||||
self.iconView.highlightedImage = selectedImage
|
||||
|
||||
self.iconView.layer.shadowOffset = CGSize(width: 0.0, height: 0.0)
|
||||
self.iconView.layer.shadowRadius = 4.0
|
||||
self.iconView.layer.shadowRadius = 3.0
|
||||
self.iconView.layer.shadowColor = UIColor.black.cgColor
|
||||
self.iconView.layer.shadowOpacity = 0.2
|
||||
self.iconView.layer.shadowOpacity = 0.25
|
||||
|
||||
self.addSubview(self.iconView)
|
||||
}
|
||||
|
@ -20,6 +20,8 @@ typedef struct {
|
||||
float warmth;
|
||||
float grain;
|
||||
float vignette;
|
||||
float hasCurves;
|
||||
float2 empty;
|
||||
} MediaEditorAdjustments;
|
||||
|
||||
half3 fade(half3 color, float fadeAmount) {
|
||||
@ -97,7 +99,9 @@ fragment half4 adjustmentsFragmentShader(RasterizerData in [[stage_in]],
|
||||
half4 source = sourceImage.sample(samplr, float2(in.texCoord.x, in.texCoord.y));
|
||||
half4 result = source;
|
||||
|
||||
//result = half4(applyRGBCurve(hslToRgb(applyLuminanceCurve(rgbToHsl(result.rgb), allCurve)), redCurve, greenCurve, blueCurve), result.a);
|
||||
if (adjustments.hasCurves > epsilon) {
|
||||
result = half4(applyRGBCurve(hslToRgb(applyLuminanceCurve(rgbToHsl(result.rgb), allCurve)), redCurve, greenCurve, blueCurve), result.a);
|
||||
}
|
||||
|
||||
if (abs(adjustments.highlights) > epsilon || abs(adjustments.shadows) > epsilon) {
|
||||
const float3 hsLuminanceWeighting = float3(0.3, 0.3, 0.3);
|
||||
@ -181,5 +185,20 @@ fragment half4 adjustmentsFragmentShader(RasterizerData in [[stage_in]],
|
||||
result.rgb = half3(mix(pow(float3(result.rgb), float3(1.0 / (1.0 - mag))), float3(0.0), mag * mag));
|
||||
}
|
||||
|
||||
if (abs(adjustments.grain) > epsilon) {
|
||||
const float grainSize = 2.3;
|
||||
float3 rotOffset = float3(1.425, 3.892, 5.835);
|
||||
float2 rotCoordsR = coordRot(in.texCoord, rotOffset.x);
|
||||
half3 noise = half3(pnoise3D(float3(rotCoordsR * float2(adjustments.dimensions.x / grainSize, adjustments.dimensions.y / grainSize), 0.0)));
|
||||
|
||||
half3 lumcoeff = half3(0.299, 0.587, 0.114);
|
||||
float luminance = dot(result.rgb, lumcoeff);
|
||||
float lum = smoothstep(0.2, 0.0, luminance);
|
||||
lum += luminance;
|
||||
|
||||
noise = mix(noise, half3(0.0), pow(lum, 4.0));
|
||||
result.rgb = result.rgb + noise * adjustments.grain * 0.04;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
@ -25,7 +25,7 @@ fragment half4 blurRadialFragmentShader(RasterizerData in [[stage_in]],
|
||||
half4 sourceColor = sourceTexture.sample(sourceSampler, in.texCoord);
|
||||
half4 blurredColor = blurTexture.sample(blurSampler, in.texCoord);
|
||||
|
||||
float2 texCoord = float2(in.texCoord.x, (in.texCoord.y * values.aspectRatio));
|
||||
float2 texCoord = float2(in.texCoord.x, (in.texCoord.y * values.aspectRatio + 0.5 - 0.5 * values.aspectRatio));
|
||||
half distanceFromCenter = distance(values.position, texCoord);
|
||||
|
||||
half3 result = mix(blurredColor.rgb, sourceColor.rgb, smoothstep(1.0, values.falloff, clamp(distanceFromCenter / values.size, 0.0, 1.0)));
|
||||
@ -45,7 +45,7 @@ fragment half4 blurLinearFragmentShader(RasterizerData in [[stage_in]],
|
||||
half4 sourceColor = sourceTexture.sample(sourceSampler, in.texCoord);
|
||||
half4 blurredColor = blurTexture.sample(blurSampler, in.texCoord);
|
||||
|
||||
float2 texCoord = float2(in.texCoord.x, (in.texCoord.y * values.aspectRatio));
|
||||
float2 texCoord = float2(in.texCoord.x, (in.texCoord.y * values.aspectRatio + 0.5 - 0.5 * values.aspectRatio));
|
||||
half distanceFromCenter = abs((texCoord.x - values.position.x) * sin(-values.rotation) + (texCoord.y - values.position.y) * cos(-values.rotation));
|
||||
|
||||
half3 result = mix(blurredColor.rgb, sourceColor.rgb, smoothstep(1.0, values.falloff, clamp(distanceFromCenter / values.size, 0.0, 1.0)));
|
||||
|
@ -5,4 +5,5 @@
|
||||
typedef struct {
|
||||
float4 pos [[position]];
|
||||
float2 texCoord;
|
||||
float2 localPos;
|
||||
} RasterizerData;
|
||||
|
@ -6,6 +6,7 @@ using namespace metal;
|
||||
typedef struct {
|
||||
float4 pos;
|
||||
float2 texCoord;
|
||||
float2 localPos;
|
||||
} VertexData;
|
||||
|
||||
vertex RasterizerData defaultVertexShader(uint vertexID [[vertex_id]],
|
||||
@ -14,6 +15,7 @@ vertex RasterizerData defaultVertexShader(uint vertexID [[vertex_id]],
|
||||
|
||||
out.pos = vector_float4(0.0, 0.0, 0.0, 1.0);
|
||||
out.pos.xy = vertices[vertexID].pos.xy;
|
||||
out.localPos = vertices[vertexID].localPos.xy;
|
||||
|
||||
out.texCoord = vertices[vertexID].texCoord;
|
||||
|
||||
|
@ -0,0 +1,42 @@
|
||||
#include <metal_stdlib>
|
||||
#include "EditorCommon.h"
|
||||
|
||||
using namespace metal;
|
||||
|
||||
typedef struct {
|
||||
float4 pos;
|
||||
float2 texCoord;
|
||||
float4 localPos;
|
||||
} VertexData;
|
||||
|
||||
|
||||
float sdfRoundedRectangle(float2 uv, float2 position, float2 size, float radius) {
|
||||
float2 q = abs(uv - position) - size + radius;
|
||||
return length(max(q, 0.0)) + min(max(q.x, q.y), 0.0) - radius;
|
||||
}
|
||||
|
||||
fragment half4 dualFragmentShader(RasterizerData in [[stage_in]],
|
||||
texture2d<half, access::sample> texture [[texture(0)]],
|
||||
constant uint2 &resolution[[buffer(0)]],
|
||||
constant float &roundness[[buffer(1)]],
|
||||
constant float &alpha[[buffer(2)]]
|
||||
) {
|
||||
float2 R = float2(resolution.x, resolution.y);
|
||||
|
||||
float2 uv = (in.localPos - float2(0.5, 0.5)) * 2.0;
|
||||
if (R.x > R.y) {
|
||||
uv.y = uv.y * R.y / R.x;
|
||||
} else {
|
||||
uv.x = uv.x * R.x / R.y;
|
||||
}
|
||||
float aspectRatio = R.x / R.y;
|
||||
|
||||
constexpr sampler samplr(filter::linear, mag_filter::linear, min_filter::linear);
|
||||
half3 color = texture.sample(samplr, in.texCoord).rgb;
|
||||
|
||||
float t = 1.0 / resolution.y;
|
||||
float side = 1.0 * aspectRatio;
|
||||
float distance = smoothstep(t, -t, sdfRoundedRectangle(uv, float2(0.0, 0.0), float2(side, mix(1.0, side, roundness)), side * roundness));
|
||||
|
||||
return mix(half4(color, 0.0), half4(color, 1.0 * alpha), distance);
|
||||
}
|
@ -21,3 +21,6 @@ half3 yuvToRgb(half3 inP);
|
||||
half easeInOutSigmoid(half value, half strength);
|
||||
|
||||
half powerCurve(half inVal, half mag);
|
||||
|
||||
float pnoise3D(float3 p);
|
||||
float2 coordRot(float2 tc, float angle);
|
||||
|
@ -132,3 +132,73 @@ half powerCurve(half inVal, half mag) {
|
||||
outVal = pow((1.0 - inVal), power);
|
||||
return outVal;
|
||||
}
|
||||
|
||||
float4 rnm(float2 tc) {
|
||||
float noise = sin(dot(tc, float2(12.9898, 78.233))) * 43758.5453;
|
||||
|
||||
float noiseR = fract(noise) * 2.0-1.0;
|
||||
float noiseG = fract(noise * 1.2154) * 2.0-1.0;
|
||||
float noiseB = fract(noise * 1.3453) * 2.0-1.0;
|
||||
float noiseA = fract(noise * 1.3647) * 2.0-1.0;
|
||||
|
||||
return float4(noiseR,noiseG,noiseB,noiseA);
|
||||
}
|
||||
|
||||
float fade(float t) {
|
||||
return t*t*t*(t*(t*6.0-15.0)+10.0);
|
||||
}
|
||||
|
||||
float pnoise3D(float3 p) {
|
||||
const half permTexUnit = 1.0 / 256.0;
|
||||
const half permTexUnitHalf = 0.5 / 256.0;
|
||||
|
||||
float3 pi = permTexUnit * floor(p) + permTexUnitHalf;
|
||||
float3 pf = fract(p);
|
||||
|
||||
// Noise contributions from (x=0, y=0), z=0 and z=1
|
||||
float perm00 = rnm(pi.xy).a ;
|
||||
float3 grad000 = rnm(float2(perm00, pi.z)).rgb * 4.0 - 1.0;
|
||||
float n000 = dot(grad000, pf);
|
||||
float3 grad001 = rnm(float2(perm00, pi.z + permTexUnit)).rgb * 4.0 - 1.0;
|
||||
float n001 = dot(grad001, pf - float3(0.0, 0.0, 1.0));
|
||||
|
||||
// Noise contributions from (x=0, y=1), z=0 and z=1
|
||||
float perm01 = rnm(pi.xy + float2(0.0, permTexUnit)).a ;
|
||||
float3 grad010 = rnm(float2(perm01, pi.z)).rgb * 4.0 - 1.0;
|
||||
float n010 = dot(grad010, pf - float3(0.0, 1.0, 0.0));
|
||||
float3 grad011 = rnm(float2(perm01, pi.z + permTexUnit)).rgb * 4.0 - 1.0;
|
||||
float n011 = dot(grad011, pf - float3(0.0, 1.0, 1.0));
|
||||
|
||||
// Noise contributions from (x=1, y=0), z=0 and z=1
|
||||
float perm10 = rnm(pi.xy + float2(permTexUnit, 0.0)).a ;
|
||||
float3 grad100 = rnm(float2(perm10, pi.z)).rgb * 4.0 - 1.0;
|
||||
float n100 = dot(grad100, pf - float3(1.0, 0.0, 0.0));
|
||||
float3 grad101 = rnm(float2(perm10, pi.z + permTexUnit)).rgb * 4.0 - 1.0;
|
||||
float n101 = dot(grad101, pf - float3(1.0, 0.0, 1.0));
|
||||
|
||||
// Noise contributions from (x=1, y=1), z=0 and z=1
|
||||
float perm11 = rnm(pi.xy + float2(permTexUnit, permTexUnit)).a ;
|
||||
float3 grad110 = rnm(float2(perm11, pi.z)).rgb * 4.0 - 1.0;
|
||||
float n110 = dot(grad110, pf - float3(1.0, 1.0, 0.0));
|
||||
float3 grad111 = rnm(float2(perm11, pi.z + permTexUnit)).rgb * 4.0 - 1.0;
|
||||
float n111 = dot(grad111, pf - float3(1.0, 1.0, 1.0));
|
||||
|
||||
// Blend contributions along x
|
||||
float4 n_x = mix(float4(n000, n001, n010, n011), float4(n100, n101, n110, n111), fade(pf.x));
|
||||
|
||||
// Blend contributions along y
|
||||
float2 n_xy = mix(n_x.xy, n_x.zw, fade(pf.y));
|
||||
|
||||
// Blend contributions along z
|
||||
float n_xyz = mix(n_xy.x, n_xy.y, fade(pf.z));
|
||||
|
||||
return n_xyz;
|
||||
}
|
||||
|
||||
float2 coordRot(float2 tc, float angle) {
|
||||
float rotX = ((tc.x * 2.0 - 1.0) * cos(angle)) - ((tc.y * 2.0 - 1.0) * sin(angle));
|
||||
float rotY = ((tc.y * 2.0 - 1.0) * cos(angle)) + ((tc.x * 2.0 - 1.0) * sin(angle));
|
||||
rotX = rotX * 0.5 + 0.5;
|
||||
rotY = rotY * 0.5 + 0.5;
|
||||
return float2(rotX, rotY);
|
||||
}
|
||||
|
@ -18,6 +18,8 @@ struct MediaEditorAdjustments {
|
||||
var warmth: simd_float1
|
||||
var grain: simd_float1
|
||||
var vignette: simd_float1
|
||||
var hasCurves: simd_float1
|
||||
var empty: simd_float2
|
||||
|
||||
var hasValues: Bool {
|
||||
let epsilon: simd_float1 = 0.005
|
||||
@ -55,6 +57,9 @@ struct MediaEditorAdjustments {
|
||||
if abs(self.vignette) > epsilon {
|
||||
return true
|
||||
}
|
||||
if abs(self.hasCurves) > epsilon {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
@ -77,7 +82,9 @@ final class AdjustmentsRenderPass: DefaultRenderPass {
|
||||
exposure: 0.0,
|
||||
warmth: 0.0,
|
||||
grain: 0.0,
|
||||
vignette: 0.0
|
||||
vignette: 0.0,
|
||||
hasCurves: 0.0,
|
||||
empty: simd_float2(0.0, 0.0)
|
||||
)
|
||||
|
||||
var allCurve: [Float] = Array(repeating: 0, count: 200)
|
||||
|
@ -16,7 +16,8 @@ public final class DrawingStickerEntity: DrawingEntity, Codable {
|
||||
public enum Content: Equatable {
|
||||
case file(TelegramMediaFile)
|
||||
case image(UIImage)
|
||||
case video(String, UIImage?)
|
||||
case video(String, UIImage?, Bool)
|
||||
case dualVideoReference
|
||||
|
||||
public static func == (lhs: Content, rhs: Content) -> Bool {
|
||||
switch lhs {
|
||||
@ -32,9 +33,15 @@ public final class DrawingStickerEntity: DrawingEntity, Codable {
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
case let .video(lhsPath, _):
|
||||
if case let .video(rhsPath, _) = rhs {
|
||||
return lhsPath == rhsPath
|
||||
case let .video(lhsPath, _, lhsInternalMirrored):
|
||||
if case let .video(rhsPath, _, rhsInternalMirrored) = rhs {
|
||||
return lhsPath == rhsPath && lhsInternalMirrored == rhsInternalMirrored
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
case .dualVideoReference:
|
||||
if case .dualVideoReference = rhs {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
@ -47,6 +54,8 @@ public final class DrawingStickerEntity: DrawingEntity, Codable {
|
||||
case imagePath
|
||||
case videoPath
|
||||
case videoImagePath
|
||||
case videoMirrored
|
||||
case dualVideo
|
||||
case referenceDrawingSize
|
||||
case position
|
||||
case scale
|
||||
@ -83,6 +92,8 @@ public final class DrawingStickerEntity: DrawingEntity, Codable {
|
||||
return false
|
||||
case .video:
|
||||
return true
|
||||
case .dualVideoReference:
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
@ -107,7 +118,9 @@ public final class DrawingStickerEntity: DrawingEntity, Codable {
|
||||
public init(from decoder: Decoder) throws {
|
||||
let container = try decoder.container(keyedBy: CodingKeys.self)
|
||||
self.uuid = try container.decode(UUID.self, forKey: .uuid)
|
||||
if let file = try container.decodeIfPresent(TelegramMediaFile.self, forKey: .file) {
|
||||
if let _ = try container.decodeIfPresent(Bool.self, forKey: .dualVideo) {
|
||||
self.content = .dualVideoReference
|
||||
} else if let file = try container.decodeIfPresent(TelegramMediaFile.self, forKey: .file) {
|
||||
self.content = .file(file)
|
||||
} else if let imagePath = try container.decodeIfPresent(String.self, forKey: .imagePath), let image = UIImage(contentsOfFile: fullEntityMediaPath(imagePath)) {
|
||||
self.content = .image(image)
|
||||
@ -116,7 +129,8 @@ public final class DrawingStickerEntity: DrawingEntity, Codable {
|
||||
if let imagePath = try container.decodeIfPresent(String.self, forKey: .videoImagePath), let image = UIImage(contentsOfFile: fullEntityMediaPath(imagePath)) {
|
||||
imageValue = image
|
||||
}
|
||||
self.content = .video(videoPath, imageValue)
|
||||
let videoMirrored = try container.decodeIfPresent(Bool.self, forKey: .videoMirrored) ?? false
|
||||
self.content = .video(videoPath, imageValue, videoMirrored)
|
||||
} else {
|
||||
fatalError()
|
||||
}
|
||||
@ -141,7 +155,7 @@ public final class DrawingStickerEntity: DrawingEntity, Codable {
|
||||
try? imageData.write(to: URL(fileURLWithPath: fullImagePath))
|
||||
try container.encodeIfPresent(imagePath, forKey: .imagePath)
|
||||
}
|
||||
case let .video(path, image):
|
||||
case let .video(path, image, videoMirrored):
|
||||
try container.encode(path, forKey: .videoPath)
|
||||
let imagePath = "\(self.uuid).jpg"
|
||||
let fullImagePath = fullEntityMediaPath(imagePath)
|
||||
@ -150,6 +164,9 @@ public final class DrawingStickerEntity: DrawingEntity, Codable {
|
||||
try? imageData.write(to: URL(fileURLWithPath: fullImagePath))
|
||||
try container.encodeIfPresent(imagePath, forKey: .videoImagePath)
|
||||
}
|
||||
try container.encode(videoMirrored, forKey: .videoMirrored)
|
||||
case .dualVideoReference:
|
||||
try container.encode(true, forKey: .dualVideo)
|
||||
}
|
||||
try container.encode(self.referenceDrawingSize, forKey: .referenceDrawingSize)
|
||||
try container.encode(self.position, forKey: .position)
|
||||
|
@ -25,13 +25,13 @@ public struct MediaEditorPlayerState {
|
||||
public final class MediaEditor {
|
||||
public enum Subject {
|
||||
case image(UIImage, PixelDimensions)
|
||||
case video(String, UIImage?, PixelDimensions, Double)
|
||||
case video(String, UIImage?, Bool, String?, PixelDimensions, Double)
|
||||
case asset(PHAsset)
|
||||
case draft(MediaEditorDraft)
|
||||
|
||||
var dimensions: PixelDimensions {
|
||||
switch self {
|
||||
case let .image(_, dimensions), let .video(_, _, dimensions, _):
|
||||
case let .image(_, dimensions), let .video(_, _, _, _, dimensions, _):
|
||||
return dimensions
|
||||
case let .asset(asset):
|
||||
return PixelDimensions(width: Int32(asset.pixelWidth), height: Int32(asset.pixelHeight))
|
||||
@ -43,6 +43,7 @@ public final class MediaEditor {
|
||||
|
||||
private let subject: Subject
|
||||
private var player: AVPlayer?
|
||||
private var additionalPlayer: AVPlayer?
|
||||
private var timeObserver: Any?
|
||||
private var didPlayToEndTimeObserver: NSObjectProtocol?
|
||||
|
||||
@ -100,6 +101,10 @@ public final class MediaEditor {
|
||||
return self.renderChain.blurPass.maskTexture != nil
|
||||
}
|
||||
|
||||
public var sourceIsVideo: Bool {
|
||||
self.player != nil
|
||||
}
|
||||
|
||||
public var resultIsVideo: Bool {
|
||||
return self.player != nil || self.values.entities.contains(where: { $0.entity.isAnimated })
|
||||
}
|
||||
@ -260,6 +265,11 @@ public final class MediaEditor {
|
||||
videoTrimRange: nil,
|
||||
videoIsMuted: false,
|
||||
videoIsFullHd: false,
|
||||
additionalVideoPath: nil,
|
||||
additionalVideoPosition: nil,
|
||||
additionalVideoScale: nil,
|
||||
additionalVideoRotation: nil,
|
||||
additionalVideoPositionChanges: [],
|
||||
drawing: nil,
|
||||
entities: [],
|
||||
toolValues: [:]
|
||||
@ -281,7 +291,7 @@ public final class MediaEditor {
|
||||
if case let .asset(asset) = subject {
|
||||
self.playerPlaybackState = (asset.duration, 0.0, false, false)
|
||||
self.playerPlaybackStatePromise.set(.single(self.playerPlaybackState))
|
||||
} else if case let .video(_, _, _, duration) = subject {
|
||||
} else if case let .video(_, _, _, _, _, duration) = subject {
|
||||
self.playerPlaybackState = (duration, 0.0, false, true)
|
||||
self.playerPlaybackStatePromise.set(.single(self.playerPlaybackState))
|
||||
}
|
||||
@ -308,11 +318,11 @@ public final class MediaEditor {
|
||||
print("error")
|
||||
}
|
||||
|
||||
let textureSource: Signal<(TextureSource, UIImage?, AVPlayer?, UIColor, UIColor), NoError>
|
||||
let textureSource: Signal<(TextureSource, UIImage?, AVPlayer?, AVPlayer?, UIColor, UIColor), NoError>
|
||||
switch subject {
|
||||
case let .image(image, _):
|
||||
let colors = mediaEditorGetGradientColors(from: image)
|
||||
textureSource = .single((ImageTextureSource(image: image, renderTarget: renderTarget), image, nil, colors.0, colors.1))
|
||||
textureSource = .single((ImageTextureSource(image: image, renderTarget: renderTarget), image, nil, nil, colors.0, colors.1))
|
||||
case let .draft(draft):
|
||||
if draft.isVideo {
|
||||
textureSource = Signal { subscriber in
|
||||
@ -325,7 +335,7 @@ public final class MediaEditor {
|
||||
|
||||
if let gradientColors = draft.values.gradientColors {
|
||||
let colors = (gradientColors.first!, gradientColors.last!)
|
||||
subscriber.putNext((VideoTextureSource(player: player, renderTarget: renderTarget), nil, player, colors.0, colors.1))
|
||||
subscriber.putNext((VideoTextureSource(player: player, additionalPlayer: nil, mirror: false, renderTarget: renderTarget), nil, player, nil, colors.0, colors.1))
|
||||
subscriber.putCompletion()
|
||||
|
||||
return EmptyDisposable
|
||||
@ -336,9 +346,9 @@ public final class MediaEditor {
|
||||
imageGenerator.generateCGImagesAsynchronously(forTimes: [NSValue(time: CMTime(seconds: 0, preferredTimescale: CMTimeScale(30.0)))]) { _, image, _, _, _ in
|
||||
if let image {
|
||||
let colors = mediaEditorGetGradientColors(from: UIImage(cgImage: image))
|
||||
subscriber.putNext((VideoTextureSource(player: player, renderTarget: renderTarget), nil, player, colors.0, colors.1))
|
||||
subscriber.putNext((VideoTextureSource(player: player, additionalPlayer: nil, mirror: false, renderTarget: renderTarget), nil, player, nil, colors.0, colors.1))
|
||||
} else {
|
||||
subscriber.putNext((VideoTextureSource(player: player, renderTarget: renderTarget), nil, player, .black, .black))
|
||||
subscriber.putNext((VideoTextureSource(player: player, additionalPlayer: nil, mirror: false, renderTarget: renderTarget), nil, player, nil, .black, .black))
|
||||
}
|
||||
subscriber.putCompletion()
|
||||
}
|
||||
@ -357,19 +367,24 @@ public final class MediaEditor {
|
||||
} else {
|
||||
colors = mediaEditorGetGradientColors(from: image)
|
||||
}
|
||||
textureSource = .single((ImageTextureSource(image: image, renderTarget: renderTarget), image, nil, colors.0, colors.1))
|
||||
textureSource = .single((ImageTextureSource(image: image, renderTarget: renderTarget), image, nil, nil, colors.0, colors.1))
|
||||
}
|
||||
case let .video(path, transitionImage, _, _):
|
||||
case let .video(path, transitionImage, mirror, additionalPath, _, _):
|
||||
textureSource = Signal { subscriber in
|
||||
let url = URL(fileURLWithPath: path)
|
||||
let asset = AVURLAsset(url: url)
|
||||
|
||||
let playerItem = AVPlayerItem(asset: asset)
|
||||
let player = AVPlayer(playerItem: playerItem)
|
||||
let asset = AVURLAsset(url: URL(fileURLWithPath: path))
|
||||
let player = AVPlayer(playerItem: AVPlayerItem(asset: asset))
|
||||
player.automaticallyWaitsToMinimizeStalling = false
|
||||
|
||||
var additionalPlayer: AVPlayer?
|
||||
if let additionalPath {
|
||||
let additionalAsset = AVURLAsset(url: URL(fileURLWithPath: additionalPath))
|
||||
additionalPlayer = AVPlayer(playerItem: AVPlayerItem(asset: additionalAsset))
|
||||
additionalPlayer?.automaticallyWaitsToMinimizeStalling = false
|
||||
}
|
||||
|
||||
if let transitionImage {
|
||||
let colors = mediaEditorGetGradientColors(from: transitionImage)
|
||||
subscriber.putNext((VideoTextureSource(player: player, renderTarget: renderTarget), nil, player, colors.0, colors.1))
|
||||
subscriber.putNext((VideoTextureSource(player: player, additionalPlayer: additionalPlayer, mirror: mirror, renderTarget: renderTarget), nil, player, additionalPlayer, colors.0, colors.1))
|
||||
subscriber.putCompletion()
|
||||
|
||||
return EmptyDisposable
|
||||
@ -380,9 +395,9 @@ public final class MediaEditor {
|
||||
imageGenerator.generateCGImagesAsynchronously(forTimes: [NSValue(time: CMTime(seconds: 0, preferredTimescale: CMTimeScale(30.0)))]) { _, image, _, _, _ in
|
||||
if let image {
|
||||
let colors = mediaEditorGetGradientColors(from: UIImage(cgImage: image))
|
||||
subscriber.putNext((VideoTextureSource(player: player, renderTarget: renderTarget), nil, player, colors.0, colors.1))
|
||||
subscriber.putNext((VideoTextureSource(player: player, additionalPlayer: additionalPlayer, mirror: mirror, renderTarget: renderTarget), nil, player, additionalPlayer, colors.0, colors.1))
|
||||
} else {
|
||||
subscriber.putNext((VideoTextureSource(player: player, renderTarget: renderTarget), nil, player, .black, .black))
|
||||
subscriber.putNext((VideoTextureSource(player: player, additionalPlayer: additionalPlayer, mirror: mirror, renderTarget: renderTarget), nil, player, additionalPlayer, .black, .black))
|
||||
}
|
||||
subscriber.putCompletion()
|
||||
}
|
||||
@ -410,7 +425,7 @@ public final class MediaEditor {
|
||||
let playerItem = AVPlayerItem(asset: asset)
|
||||
let player = AVPlayer(playerItem: playerItem)
|
||||
player.automaticallyWaitsToMinimizeStalling = false
|
||||
subscriber.putNext((VideoTextureSource(player: player, renderTarget: renderTarget), nil, player, colors.0, colors.1))
|
||||
subscriber.putNext((VideoTextureSource(player: player, additionalPlayer: nil, mirror: false, renderTarget: renderTarget), nil, player, nil, colors.0, colors.1))
|
||||
subscriber.putCompletion()
|
||||
}
|
||||
})
|
||||
@ -436,7 +451,7 @@ public final class MediaEditor {
|
||||
}
|
||||
if !degraded {
|
||||
let colors = mediaEditorGetGradientColors(from: image)
|
||||
subscriber.putNext((ImageTextureSource(image: image, renderTarget: renderTarget), image, nil, colors.0, colors.1))
|
||||
subscriber.putNext((ImageTextureSource(image: image, renderTarget: renderTarget), image, nil, nil, colors.0, colors.1))
|
||||
subscriber.putCompletion()
|
||||
}
|
||||
}
|
||||
@ -451,12 +466,14 @@ public final class MediaEditor {
|
||||
self.textureSourceDisposable = (textureSource
|
||||
|> deliverOnMainQueue).start(next: { [weak self] sourceAndColors in
|
||||
if let self {
|
||||
let (source, image, player, topColor, bottomColor) = sourceAndColors
|
||||
let (source, image, player, additionalPlayer, topColor, bottomColor) = sourceAndColors
|
||||
self.renderer.onNextRender = { [weak self] in
|
||||
self?.onFirstDisplay()
|
||||
}
|
||||
self.renderer.textureSource = source
|
||||
self.player = player
|
||||
self.additionalPlayer = additionalPlayer
|
||||
|
||||
self.playerPromise.set(.single(player))
|
||||
self.gradientColorsValue = (topColor, bottomColor)
|
||||
self.setGradientColors([topColor, bottomColor])
|
||||
@ -485,13 +502,16 @@ public final class MediaEditor {
|
||||
if let self {
|
||||
let start = self.values.videoTrimRange?.lowerBound ?? 0.0
|
||||
self.player?.seek(to: CMTime(seconds: start, preferredTimescale: CMTimeScale(1000)))
|
||||
self.additionalPlayer?.seek(to: CMTime(seconds: start, preferredTimescale: CMTimeScale(1000)))
|
||||
self.onPlaybackAction(.seek(start))
|
||||
self.player?.play()
|
||||
self.additionalPlayer?.play()
|
||||
self.onPlaybackAction(.play)
|
||||
}
|
||||
})
|
||||
Queue.mainQueue().justDispatch {
|
||||
player.playImmediately(atRate: 1.0)
|
||||
additionalPlayer?.playImmediately(atRate: 1.0)
|
||||
self.onPlaybackAction(.play)
|
||||
self.volumeFade = self.player?.fadeVolume(from: 0.0, to: 1.0, duration: 0.4)
|
||||
}
|
||||
@ -510,18 +530,29 @@ public final class MediaEditor {
|
||||
}
|
||||
|
||||
private var skipRendering = false
|
||||
private func updateValues(skipRendering: Bool = false, _ f: (MediaEditorValues) -> MediaEditorValues) {
|
||||
if skipRendering {
|
||||
private var forceRendering = false
|
||||
|
||||
private enum UpdateMode {
|
||||
case generic
|
||||
case skipRendering
|
||||
case forceRendering
|
||||
}
|
||||
private func updateValues(mode: UpdateMode = .generic, _ f: (MediaEditorValues) -> MediaEditorValues) {
|
||||
if case .skipRendering = mode {
|
||||
self.skipRendering = true
|
||||
} else if case .forceRendering = mode {
|
||||
self.forceRendering = true
|
||||
}
|
||||
self.values = f(self.values)
|
||||
if skipRendering {
|
||||
if case .skipRendering = mode {
|
||||
self.skipRendering = false
|
||||
} else if case .forceRendering = mode {
|
||||
self.forceRendering = false
|
||||
}
|
||||
}
|
||||
|
||||
public func setCrop(offset: CGPoint, scale: CGFloat, rotation: CGFloat, mirroring: Bool) {
|
||||
self.updateValues(skipRendering: true) { values in
|
||||
self.updateValues(mode: .skipRendering) { values in
|
||||
return values.withUpdatedCrop(offset: offset, scale: scale, rotation: rotation, mirroring: mirroring)
|
||||
}
|
||||
}
|
||||
@ -546,13 +577,13 @@ public final class MediaEditor {
|
||||
|
||||
public func setVideoIsMuted(_ videoIsMuted: Bool) {
|
||||
self.player?.isMuted = videoIsMuted
|
||||
self.updateValues(skipRendering: true) { values in
|
||||
self.updateValues(mode: .skipRendering) { values in
|
||||
return values.withUpdatedVideoIsMuted(videoIsMuted)
|
||||
}
|
||||
}
|
||||
|
||||
public func setVideoIsFullHd(_ videoIsFullHd: Bool) {
|
||||
self.updateValues(skipRendering: true) { values in
|
||||
self.updateValues(mode: .skipRendering) { values in
|
||||
return values.withUpdatedVideoIsFullHd(videoIsFullHd)
|
||||
}
|
||||
}
|
||||
@ -575,6 +606,7 @@ public final class MediaEditor {
|
||||
}
|
||||
if !play {
|
||||
player.pause()
|
||||
self.additionalPlayer?.pause()
|
||||
self.onPlaybackAction(.pause)
|
||||
}
|
||||
let targetPosition = CMTime(seconds: position, preferredTimescale: CMTimeScale(60.0))
|
||||
@ -586,6 +618,7 @@ public final class MediaEditor {
|
||||
}
|
||||
if play {
|
||||
player.play()
|
||||
self.additionalPlayer?.play()
|
||||
self.onPlaybackAction(.play)
|
||||
}
|
||||
}
|
||||
@ -596,16 +629,19 @@ public final class MediaEditor {
|
||||
|
||||
public func play() {
|
||||
self.player?.play()
|
||||
self.additionalPlayer?.play()
|
||||
self.onPlaybackAction(.play)
|
||||
}
|
||||
|
||||
public func stop() {
|
||||
self.player?.pause()
|
||||
self.additionalPlayer?.pause()
|
||||
self.onPlaybackAction(.pause)
|
||||
}
|
||||
|
||||
public func invalidate() {
|
||||
self.player?.pause()
|
||||
self.additionalPlayer?.pause()
|
||||
self.onPlaybackAction(.pause)
|
||||
self.renderer.textureSource?.invalidate()
|
||||
}
|
||||
@ -625,27 +661,41 @@ public final class MediaEditor {
|
||||
}
|
||||
}
|
||||
})
|
||||
self.additionalPlayer?.seek(to: targetPosition, toleranceBefore: .zero, toleranceAfter: .zero)
|
||||
self.onPlaybackAction(.seek(targetPosition.seconds))
|
||||
}
|
||||
|
||||
public func setVideoTrimRange(_ trimRange: Range<Double>, apply: Bool) {
|
||||
self.updateValues(skipRendering: true) { values in
|
||||
self.updateValues(mode: .skipRendering) { values in
|
||||
return values.withUpdatedVideoTrimRange(trimRange)
|
||||
}
|
||||
|
||||
if apply {
|
||||
self.player?.currentItem?.forwardPlaybackEndTime = CMTime(seconds: trimRange.upperBound, preferredTimescale: CMTimeScale(1000))
|
||||
self.additionalPlayer?.currentItem?.forwardPlaybackEndTime = CMTime(seconds: trimRange.upperBound, preferredTimescale: CMTimeScale(1000))
|
||||
}
|
||||
}
|
||||
|
||||
public func setAdditionalVideo(_ path: String, positionChanges: [VideoPositionChange]) {
|
||||
self.updateValues(mode: .skipRendering) { values in
|
||||
return values.withUpdatedAdditionalVideo(path: path, positionChanges: positionChanges)
|
||||
}
|
||||
}
|
||||
|
||||
public func setAdditionalVideoPosition(_ position: CGPoint, scale: CGFloat, rotation: CGFloat) {
|
||||
self.updateValues(mode: .forceRendering) { values in
|
||||
return values.withUpdatedAdditionalVideo(position: position, scale: scale, rotation: rotation)
|
||||
}
|
||||
}
|
||||
|
||||
public func setDrawingAndEntities(data: Data?, image: UIImage?, entities: [CodableDrawingEntity]) {
|
||||
self.updateValues(skipRendering: true) { values in
|
||||
self.updateValues(mode: .skipRendering) { values in
|
||||
return values.withUpdatedDrawingAndEntities(drawing: image, entities: entities)
|
||||
}
|
||||
}
|
||||
|
||||
public func setGradientColors(_ gradientColors: [UIColor]) {
|
||||
self.updateValues(skipRendering: true) { values in
|
||||
self.updateValues(mode: .skipRendering) { values in
|
||||
return values.withUpdatedGradientColors(gradientColors: gradientColors)
|
||||
}
|
||||
}
|
||||
@ -655,12 +705,14 @@ public final class MediaEditor {
|
||||
private func updateRenderChain() {
|
||||
self.renderer.renderPassedEnabled = !self.previewUnedited
|
||||
self.renderChain.update(values: self.values)
|
||||
if let player = self.player, player.rate > 0.0 {
|
||||
self.renderer.videoFinishPass.update(values: self.values)
|
||||
|
||||
if let player = self.player, player.rate > 0.0 && !self.forceRendering {
|
||||
} else {
|
||||
let currentTime = CACurrentMediaTime()
|
||||
if !self.scheduledUpdate {
|
||||
let delay = 0.03333
|
||||
if let previousUpdateTime = self.previousUpdateTime, currentTime - previousUpdateTime < delay {
|
||||
let delay = self.forceRendering ? 0.0 : 0.03333
|
||||
if let previousUpdateTime = self.previousUpdateTime, delay > 0.0, currentTime - previousUpdateTime < delay {
|
||||
self.scheduledUpdate = true
|
||||
Queue.mainQueue().after(delay - (currentTime - previousUpdateTime)) {
|
||||
self.scheduledUpdate = false
|
||||
@ -788,7 +840,11 @@ final class MediaEditorRenderChain {
|
||||
self.adjustmentsPass.adjustments.vignette = 0.0
|
||||
}
|
||||
case .grain:
|
||||
break
|
||||
if let value = value as? Float {
|
||||
self.adjustmentsPass.adjustments.grain = value
|
||||
} else {
|
||||
self.adjustmentsPass.adjustments.grain = 0.0
|
||||
}
|
||||
case .sharpen:
|
||||
if let value = value as? Float {
|
||||
self.sharpenPass.value = value
|
||||
@ -834,16 +890,20 @@ final class MediaEditorRenderChain {
|
||||
self.blurPass.value.rotation = Float(value.rotation)
|
||||
}
|
||||
case .curves:
|
||||
var value = (value as? CurvesValue) ?? CurvesValue.initial
|
||||
let allDataPoints = value.all.dataPoints
|
||||
let redDataPoints = value.red.dataPoints
|
||||
let greenDataPoints = value.green.dataPoints
|
||||
let blueDataPoints = value.blue.dataPoints
|
||||
|
||||
self.adjustmentsPass.allCurve = allDataPoints
|
||||
self.adjustmentsPass.redCurve = redDataPoints
|
||||
self.adjustmentsPass.greenCurve = greenDataPoints
|
||||
self.adjustmentsPass.blueCurve = blueDataPoints
|
||||
if var value = value as? CurvesValue {
|
||||
let allDataPoints = value.all.dataPoints
|
||||
let redDataPoints = value.red.dataPoints
|
||||
let greenDataPoints = value.green.dataPoints
|
||||
let blueDataPoints = value.blue.dataPoints
|
||||
|
||||
self.adjustmentsPass.adjustments.hasCurves = 1.0
|
||||
self.adjustmentsPass.allCurve = allDataPoints
|
||||
self.adjustmentsPass.redCurve = redDataPoints
|
||||
self.adjustmentsPass.greenCurve = greenDataPoints
|
||||
self.adjustmentsPass.blueCurve = blueDataPoints
|
||||
} else {
|
||||
self.adjustmentsPass.adjustments.hasCurves = 0.0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -92,16 +92,22 @@ final class MediaEditorComposer {
|
||||
|
||||
self.renderer.setupForComposer(composer: self)
|
||||
self.renderChain.update(values: self.values)
|
||||
self.renderer.videoFinishPass.update(values: self.values)
|
||||
}
|
||||
|
||||
func processSampleBuffer(_ sampleBuffer: CMSampleBuffer, pool: CVPixelBufferPool?, textureRotation: TextureRotation, completion: @escaping (CVPixelBuffer?) -> Void) {
|
||||
func processSampleBuffer(sampleBuffer: CMSampleBuffer, textureRotation: TextureRotation, additionalSampleBuffer: CMSampleBuffer?, additionalTextureRotation: TextureRotation, pool: CVPixelBufferPool?, completion: @escaping (CVPixelBuffer?) -> Void) {
|
||||
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer), let pool = pool else {
|
||||
completion(nil)
|
||||
return
|
||||
}
|
||||
let time = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
|
||||
|
||||
self.renderer.consumeVideoPixelBuffer(imageBuffer, rotation: textureRotation, timestamp: time, render: true)
|
||||
let mainPixelBuffer = VideoPixelBuffer(pixelBuffer: imageBuffer, rotation: textureRotation, timestamp: time)
|
||||
var additionalPixelBuffer: VideoPixelBuffer?
|
||||
if let additionalSampleBuffer, let additionalImageBuffer = CMSampleBufferGetImageBuffer(additionalSampleBuffer) {
|
||||
additionalPixelBuffer = VideoPixelBuffer(pixelBuffer: additionalImageBuffer, rotation: additionalTextureRotation, timestamp: time)
|
||||
}
|
||||
self.renderer.consumeVideoPixelBuffer(pixelBuffer: mainPixelBuffer, additionalPixelBuffer: additionalPixelBuffer, render: true)
|
||||
|
||||
if let finalTexture = self.renderer.finalTexture, var ciImage = CIImage(mtlTexture: finalTexture, options: [.colorSpace: self.colorSpace]) {
|
||||
ciImage = ciImage.transformed(by: CGAffineTransformMakeScale(1.0, -1.0).translatedBy(x: 0.0, y: -ciImage.extent.height))
|
||||
|
@ -20,8 +20,10 @@ func composerEntitiesForDrawingEntity(account: Account, entity: DrawingEntity, c
|
||||
content = .file(file)
|
||||
case let .image(image):
|
||||
content = .image(image)
|
||||
case let .video(path, _):
|
||||
case let .video(path, _, _):
|
||||
content = .video(path)
|
||||
case .dualVideoReference:
|
||||
return []
|
||||
}
|
||||
return [MediaEditorComposerStickerEntity(account: account, content: content, position: entity.position, scale: entity.scale, rotation: entity.rotation, baseSize: entity.baseSize, mirrored: entity.mirrored, colorSpace: colorSpace)]
|
||||
} else if let renderImage = entity.renderImage, let image = CIImage(image: renderImage, options: [.colorSpace: colorSpace]) {
|
||||
@ -269,6 +271,7 @@ private class MediaEditorComposerStickerEntity: MediaEditorComposerEntity {
|
||||
|
||||
let processFrame: (Double?, Int?, Int?, (Int) -> AnimatedStickerFrame?) -> Void = { [weak self] duration, frameCount, frameRate, takeFrame in
|
||||
guard let strongSelf = self else {
|
||||
completion(nil)
|
||||
return
|
||||
}
|
||||
var frameAdvancement: Int = 0
|
||||
|
@ -5,9 +5,25 @@ import MetalKit
|
||||
import Photos
|
||||
import SwiftSignalKit
|
||||
|
||||
final class VideoPixelBuffer {
|
||||
let pixelBuffer: CVPixelBuffer
|
||||
let rotation: TextureRotation
|
||||
let timestamp: CMTime
|
||||
|
||||
init(
|
||||
pixelBuffer: CVPixelBuffer,
|
||||
rotation: TextureRotation,
|
||||
timestamp: CMTime
|
||||
) {
|
||||
self.pixelBuffer = pixelBuffer
|
||||
self.rotation = rotation
|
||||
self.timestamp = timestamp
|
||||
}
|
||||
}
|
||||
|
||||
protocol TextureConsumer: AnyObject {
|
||||
func consumeTexture(_ texture: MTLTexture, render: Bool)
|
||||
func consumeVideoPixelBuffer(_ pixelBuffer: CVPixelBuffer, rotation: TextureRotation, timestamp: CMTime, render: Bool)
|
||||
func consumeVideoPixelBuffer(pixelBuffer: VideoPixelBuffer, additionalPixelBuffer: VideoPixelBuffer?, render: Bool)
|
||||
}
|
||||
|
||||
final class RenderingContext {
|
||||
@ -51,10 +67,13 @@ final class MediaEditorRenderer: TextureConsumer {
|
||||
}
|
||||
}
|
||||
|
||||
var semaphore = DispatchSemaphore(value: 3)
|
||||
private var semaphore = DispatchSemaphore(value: 3)
|
||||
private var renderPasses: [RenderPass] = []
|
||||
|
||||
private let videoInputPass = VideoInputPass()
|
||||
private let additionalVideoInputPass = VideoInputPass()
|
||||
let videoFinishPass = VideoInputScalePass()
|
||||
|
||||
private let outputRenderPass = OutputRenderPass()
|
||||
private weak var renderTarget: RenderTarget? {
|
||||
didSet {
|
||||
@ -68,7 +87,8 @@ final class MediaEditorRenderer: TextureConsumer {
|
||||
private var textureCache: CVMetalTextureCache?
|
||||
|
||||
private var currentTexture: MTLTexture?
|
||||
private var currentPixelBuffer: (CVPixelBuffer, TextureRotation)?
|
||||
private var currentPixelBuffer: VideoPixelBuffer?
|
||||
private var currentAdditionalPixelBuffer: VideoPixelBuffer?
|
||||
|
||||
public var onNextRender: (() -> Void)?
|
||||
|
||||
@ -120,6 +140,8 @@ final class MediaEditorRenderer: TextureConsumer {
|
||||
self.commandQueue = device.makeCommandQueue()
|
||||
self.commandQueue?.label = "Media Editor Command Queue"
|
||||
self.videoInputPass.setup(device: device, library: library)
|
||||
self.additionalVideoInputPass.setup(device: device, library: library)
|
||||
self.videoFinishPass.setup(device: device, library: library)
|
||||
self.renderPasses.forEach { $0.setup(device: device, library: library) }
|
||||
self.outputRenderPass.setup(device: device, library: library)
|
||||
}
|
||||
@ -147,11 +169,15 @@ final class MediaEditorRenderer: TextureConsumer {
|
||||
self.commandQueue = device.makeCommandQueue()
|
||||
self.commandQueue?.label = "Media Editor Command Queue"
|
||||
self.videoInputPass.setup(device: device, library: library)
|
||||
self.additionalVideoInputPass.setup(device: device, library: library)
|
||||
self.videoFinishPass.setup(device: device, library: library)
|
||||
self.renderPasses.forEach { $0.setup(device: device, library: library) }
|
||||
}
|
||||
|
||||
var renderPassedEnabled = true
|
||||
|
||||
var needsDisplay = false
|
||||
|
||||
func renderFrame() {
|
||||
let device: MTLDevice?
|
||||
if let renderTarget = self.renderTarget {
|
||||
@ -164,22 +190,32 @@ final class MediaEditorRenderer: TextureConsumer {
|
||||
guard let device = device,
|
||||
let commandQueue = self.commandQueue,
|
||||
let textureCache = self.textureCache else {
|
||||
self.semaphore.signal()
|
||||
self.didRenderFrame()
|
||||
return
|
||||
}
|
||||
|
||||
guard let commandBuffer = commandQueue.makeCommandBuffer() else {
|
||||
self.semaphore.signal()
|
||||
self.didRenderFrame()
|
||||
return
|
||||
}
|
||||
|
||||
var texture: MTLTexture
|
||||
if let currentTexture = self.currentTexture {
|
||||
texture = currentTexture
|
||||
} else if let (currentPixelBuffer, textureRotation) = self.currentPixelBuffer, let videoTexture = self.videoInputPass.processPixelBuffer(currentPixelBuffer, rotation: textureRotation, textureCache: textureCache, device: device, commandBuffer: commandBuffer) {
|
||||
texture = videoTexture
|
||||
} else if let currentPixelBuffer = self.currentPixelBuffer, let currentAdditionalPixelBuffer = self.currentAdditionalPixelBuffer, let videoTexture = self.videoInputPass.processPixelBuffer(currentPixelBuffer, textureCache: textureCache, device: device, commandBuffer: commandBuffer), let additionalVideoTexture = self.additionalVideoInputPass.processPixelBuffer(currentAdditionalPixelBuffer, textureCache: textureCache, device: device, commandBuffer: commandBuffer) {
|
||||
if let result = self.videoFinishPass.process(input: videoTexture, secondInput: additionalVideoTexture, timestamp: currentPixelBuffer.timestamp, device: device, commandBuffer: commandBuffer) {
|
||||
texture = result
|
||||
} else {
|
||||
texture = videoTexture
|
||||
}
|
||||
} else if let currentPixelBuffer = self.currentPixelBuffer, let videoTexture = self.videoInputPass.processPixelBuffer(currentPixelBuffer, textureCache: textureCache, device: device, commandBuffer: commandBuffer) {
|
||||
if let result = self.videoFinishPass.process(input: videoTexture, secondInput: nil, timestamp: currentPixelBuffer.timestamp, device: device, commandBuffer: commandBuffer) {
|
||||
texture = result
|
||||
} else {
|
||||
texture = videoTexture
|
||||
}
|
||||
} else {
|
||||
self.semaphore.signal()
|
||||
self.didRenderFrame()
|
||||
return
|
||||
}
|
||||
|
||||
@ -192,17 +228,22 @@ final class MediaEditorRenderer: TextureConsumer {
|
||||
}
|
||||
self.finalTexture = texture
|
||||
|
||||
commandBuffer.addCompletedHandler { [weak self] _ in
|
||||
if let self {
|
||||
if self.renderTarget == nil {
|
||||
self.semaphore.signal()
|
||||
if self.renderTarget == nil {
|
||||
commandBuffer.addCompletedHandler { [weak self] _ in
|
||||
if let self {
|
||||
self.didRenderFrame()
|
||||
}
|
||||
}
|
||||
}
|
||||
commandBuffer.commit()
|
||||
|
||||
if let renderTarget = self.renderTarget {
|
||||
renderTarget.redraw()
|
||||
if self.needsDisplay {
|
||||
self.didRenderFrame()
|
||||
} else {
|
||||
self.needsDisplay = true
|
||||
renderTarget.redraw()
|
||||
}
|
||||
} else {
|
||||
commandBuffer.waitUntilCompleted()
|
||||
}
|
||||
@ -215,13 +256,13 @@ final class MediaEditorRenderer: TextureConsumer {
|
||||
let commandBuffer = commandQueue.makeCommandBuffer(),
|
||||
let texture = self.finalTexture
|
||||
else {
|
||||
self.semaphore.signal()
|
||||
self.needsDisplay = false
|
||||
self.didRenderFrame()
|
||||
return
|
||||
}
|
||||
|
||||
commandBuffer.addCompletedHandler { [weak self] _ in
|
||||
if let self {
|
||||
self.semaphore.signal()
|
||||
self.didRenderFrame()
|
||||
|
||||
if let onNextRender = self.onNextRender {
|
||||
self.onNextRender = nil
|
||||
@ -235,15 +276,21 @@ final class MediaEditorRenderer: TextureConsumer {
|
||||
self.outputRenderPass.process(input: texture, device: device, commandBuffer: commandBuffer)
|
||||
|
||||
commandBuffer.commit()
|
||||
self.needsDisplay = false
|
||||
}
|
||||
|
||||
func willRenderFrame() {
|
||||
let _ = self.semaphore.wait(timeout: .distantFuture)
|
||||
let timeout = self.renderTarget != nil ? DispatchTime.now() + 0.1 : .distantFuture
|
||||
let _ = self.semaphore.wait(timeout: timeout)
|
||||
}
|
||||
|
||||
func didRenderFrame() {
|
||||
self.semaphore.signal()
|
||||
}
|
||||
|
||||
func consumeTexture(_ texture: MTLTexture, render: Bool) {
|
||||
if render {
|
||||
let _ = self.semaphore.wait(timeout: .distantFuture)
|
||||
self.willRenderFrame()
|
||||
}
|
||||
|
||||
self.currentTexture = texture
|
||||
@ -253,18 +300,19 @@ final class MediaEditorRenderer: TextureConsumer {
|
||||
}
|
||||
|
||||
var previousPresentationTimestamp: CMTime?
|
||||
func consumeVideoPixelBuffer(_ pixelBuffer: CVPixelBuffer, rotation: TextureRotation, timestamp: CMTime, render: Bool) {
|
||||
let _ = self.semaphore.wait(timeout: .distantFuture)
|
||||
func consumeVideoPixelBuffer(pixelBuffer: VideoPixelBuffer, additionalPixelBuffer: VideoPixelBuffer?, render: Bool) {
|
||||
self.willRenderFrame()
|
||||
|
||||
self.currentPixelBuffer = (pixelBuffer, rotation)
|
||||
self.currentPixelBuffer = pixelBuffer
|
||||
self.currentAdditionalPixelBuffer = additionalPixelBuffer
|
||||
if render {
|
||||
if self.previousPresentationTimestamp == timestamp {
|
||||
self.semaphore.signal()
|
||||
if self.previousPresentationTimestamp == pixelBuffer.timestamp {
|
||||
self.didRenderFrame()
|
||||
} else {
|
||||
self.renderFrame()
|
||||
}
|
||||
}
|
||||
self.previousPresentationTimestamp = timestamp
|
||||
self.previousPresentationTimestamp = pixelBuffer.timestamp
|
||||
}
|
||||
|
||||
func renderTargetDidChange(_ target: RenderTarget?) {
|
||||
|
@ -37,6 +37,21 @@ public enum EditorToolKey: Int32, CaseIterable {
|
||||
]
|
||||
}
|
||||
|
||||
public struct VideoPositionChange: Codable, Equatable {
|
||||
private enum CodingKeys: String, CodingKey {
|
||||
case additional
|
||||
case timestamp
|
||||
}
|
||||
|
||||
public let additional: Bool
|
||||
public let timestamp: Double
|
||||
|
||||
public init(additional: Bool, timestamp: Double) {
|
||||
self.additional = additional
|
||||
self.timestamp = timestamp
|
||||
}
|
||||
}
|
||||
|
||||
public final class MediaEditorValues: Codable, Equatable {
|
||||
public static func == (lhs: MediaEditorValues, rhs: MediaEditorValues) -> Bool {
|
||||
if lhs.originalDimensions != rhs.originalDimensions {
|
||||
@ -69,13 +84,27 @@ public final class MediaEditorValues: Codable, Equatable {
|
||||
if lhs.videoIsFullHd != rhs.videoIsFullHd {
|
||||
return false
|
||||
}
|
||||
if lhs.additionalVideoPath != rhs.additionalVideoPath {
|
||||
return false
|
||||
}
|
||||
if lhs.additionalVideoPosition != rhs.additionalVideoPosition {
|
||||
return false
|
||||
}
|
||||
if lhs.additionalVideoScale != rhs.additionalVideoScale {
|
||||
return false
|
||||
}
|
||||
if lhs.additionalVideoRotation != rhs.additionalVideoRotation {
|
||||
return false
|
||||
}
|
||||
if lhs.additionalVideoPositionChanges != rhs.additionalVideoPositionChanges {
|
||||
return false
|
||||
}
|
||||
if lhs.drawing !== rhs.drawing {
|
||||
return false
|
||||
}
|
||||
if lhs.entities != rhs.entities {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
for key in EditorToolKey.allCases {
|
||||
let lhsToolValue = lhs.toolValues[key]
|
||||
@ -115,6 +144,12 @@ public final class MediaEditorValues: Codable, Equatable {
|
||||
case videoIsMuted
|
||||
case videoIsFullHd
|
||||
|
||||
case additionalVideoPath
|
||||
case additionalVideoPosition
|
||||
case additionalVideoScale
|
||||
case additionalVideoRotation
|
||||
case additionalVideoPositionChanges
|
||||
|
||||
case drawing
|
||||
case entities
|
||||
case toolValues
|
||||
@ -133,6 +168,12 @@ public final class MediaEditorValues: Codable, Equatable {
|
||||
public let videoIsMuted: Bool
|
||||
public let videoIsFullHd: Bool
|
||||
|
||||
public let additionalVideoPath: String?
|
||||
public let additionalVideoPosition: CGPoint?
|
||||
public let additionalVideoScale: CGFloat?
|
||||
public let additionalVideoRotation: CGFloat?
|
||||
public let additionalVideoPositionChanges: [VideoPositionChange]
|
||||
|
||||
public let drawing: UIImage?
|
||||
public let entities: [CodableDrawingEntity]
|
||||
public let toolValues: [EditorToolKey: Any]
|
||||
@ -148,6 +189,11 @@ public final class MediaEditorValues: Codable, Equatable {
|
||||
videoTrimRange: Range<Double>?,
|
||||
videoIsMuted: Bool,
|
||||
videoIsFullHd: Bool,
|
||||
additionalVideoPath: String?,
|
||||
additionalVideoPosition: CGPoint?,
|
||||
additionalVideoScale: CGFloat?,
|
||||
additionalVideoRotation: CGFloat?,
|
||||
additionalVideoPositionChanges: [VideoPositionChange],
|
||||
drawing: UIImage?,
|
||||
entities: [CodableDrawingEntity],
|
||||
toolValues: [EditorToolKey: Any]
|
||||
@ -162,6 +208,11 @@ public final class MediaEditorValues: Codable, Equatable {
|
||||
self.videoTrimRange = videoTrimRange
|
||||
self.videoIsMuted = videoIsMuted
|
||||
self.videoIsFullHd = videoIsFullHd
|
||||
self.additionalVideoPath = additionalVideoPath
|
||||
self.additionalVideoPosition = additionalVideoPosition
|
||||
self.additionalVideoScale = additionalVideoScale
|
||||
self.additionalVideoRotation = additionalVideoRotation
|
||||
self.additionalVideoPositionChanges = additionalVideoPositionChanges
|
||||
self.drawing = drawing
|
||||
self.entities = entities
|
||||
self.toolValues = toolValues
|
||||
@ -190,6 +241,12 @@ public final class MediaEditorValues: Codable, Equatable {
|
||||
self.videoIsMuted = try container.decode(Bool.self, forKey: .videoIsMuted)
|
||||
self.videoIsFullHd = try container.decodeIfPresent(Bool.self, forKey: .videoIsFullHd) ?? false
|
||||
|
||||
self.additionalVideoPath = try container.decodeIfPresent(String.self, forKey: .additionalVideoPath)
|
||||
self.additionalVideoPosition = try container.decodeIfPresent(CGPoint.self, forKey: .additionalVideoPosition)
|
||||
self.additionalVideoScale = try container.decodeIfPresent(CGFloat.self, forKey: .additionalVideoScale)
|
||||
self.additionalVideoRotation = try container.decodeIfPresent(CGFloat.self, forKey: .additionalVideoRotation)
|
||||
self.additionalVideoPositionChanges = try container.decodeIfPresent([VideoPositionChange].self, forKey: .additionalVideoPositionChanges) ?? []
|
||||
|
||||
if let drawingData = try container.decodeIfPresent(Data.self, forKey: .drawing), let image = UIImage(data: drawingData) {
|
||||
self.drawing = image
|
||||
} else {
|
||||
@ -227,6 +284,12 @@ public final class MediaEditorValues: Codable, Equatable {
|
||||
try container.encode(self.videoIsMuted, forKey: .videoIsMuted)
|
||||
try container.encode(self.videoIsFullHd, forKey: .videoIsFullHd)
|
||||
|
||||
try container.encodeIfPresent(self.additionalVideoPath, forKey: .additionalVideoPath)
|
||||
try container.encodeIfPresent(self.additionalVideoPosition, forKey: .additionalVideoPosition)
|
||||
try container.encodeIfPresent(self.additionalVideoScale, forKey: .additionalVideoScale)
|
||||
try container.encodeIfPresent(self.additionalVideoRotation, forKey: .additionalVideoRotation)
|
||||
try container.encodeIfPresent(self.additionalVideoPositionChanges, forKey: .additionalVideoPositionChanges)
|
||||
|
||||
if let drawing = self.drawing, let pngDrawingData = drawing.pngData() {
|
||||
try container.encode(pngDrawingData, forKey: .drawing)
|
||||
}
|
||||
@ -243,35 +306,43 @@ public final class MediaEditorValues: Codable, Equatable {
|
||||
}
|
||||
|
||||
public func makeCopy() -> MediaEditorValues {
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: self.gradientColors, videoTrimRange: self.videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: self.videoIsFullHd, drawing: self.drawing, entities: self.entities, toolValues: self.toolValues)
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: self.gradientColors, videoTrimRange: self.videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: self.videoIsFullHd, additionalVideoPath: self.additionalVideoPath, additionalVideoPosition: self.additionalVideoPosition, additionalVideoScale: self.additionalVideoScale, additionalVideoRotation: self.additionalVideoRotation, additionalVideoPositionChanges: self.additionalVideoPositionChanges, drawing: self.drawing, entities: self.entities, toolValues: self.toolValues)
|
||||
}
|
||||
|
||||
func withUpdatedCrop(offset: CGPoint, scale: CGFloat, rotation: CGFloat, mirroring: Bool) -> MediaEditorValues {
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: offset, cropSize: self.cropSize, cropScale: scale, cropRotation: rotation, cropMirroring: mirroring, gradientColors: self.gradientColors, videoTrimRange: self.videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: self.videoIsFullHd, drawing: self.drawing, entities: self.entities, toolValues: self.toolValues)
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: offset, cropSize: self.cropSize, cropScale: scale, cropRotation: rotation, cropMirroring: mirroring, gradientColors: self.gradientColors, videoTrimRange: self.videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: self.videoIsFullHd, additionalVideoPath: self.additionalVideoPath, additionalVideoPosition: self.additionalVideoPosition, additionalVideoScale: self.additionalVideoScale, additionalVideoRotation: self.additionalVideoRotation, additionalVideoPositionChanges: self.additionalVideoPositionChanges, drawing: self.drawing, entities: self.entities, toolValues: self.toolValues)
|
||||
}
|
||||
|
||||
func withUpdatedGradientColors(gradientColors: [UIColor]) -> MediaEditorValues {
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: gradientColors, videoTrimRange: self.videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: self.videoIsFullHd, drawing: self.drawing, entities: self.entities, toolValues: self.toolValues)
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: gradientColors, videoTrimRange: self.videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: self.videoIsFullHd, additionalVideoPath: self.additionalVideoPath, additionalVideoPosition: self.additionalVideoPosition, additionalVideoScale: self.additionalVideoScale, additionalVideoRotation: self.additionalVideoRotation, additionalVideoPositionChanges: self.additionalVideoPositionChanges, drawing: self.drawing, entities: self.entities, toolValues: self.toolValues)
|
||||
}
|
||||
|
||||
func withUpdatedVideoIsMuted(_ videoIsMuted: Bool) -> MediaEditorValues {
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: self.gradientColors, videoTrimRange: self.videoTrimRange, videoIsMuted: videoIsMuted, videoIsFullHd: self.videoIsFullHd, drawing: self.drawing, entities: self.entities, toolValues: self.toolValues)
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: self.gradientColors, videoTrimRange: self.videoTrimRange, videoIsMuted: videoIsMuted, videoIsFullHd: self.videoIsFullHd, additionalVideoPath: self.additionalVideoPath, additionalVideoPosition: self.additionalVideoPosition, additionalVideoScale: self.additionalVideoScale, additionalVideoRotation: self.additionalVideoRotation, additionalVideoPositionChanges: self.additionalVideoPositionChanges, drawing: self.drawing, entities: self.entities, toolValues: self.toolValues)
|
||||
}
|
||||
|
||||
func withUpdatedVideoIsFullHd(_ videoIsFullHd: Bool) -> MediaEditorValues {
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: self.gradientColors, videoTrimRange: self.videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: videoIsFullHd, drawing: self.drawing, entities: self.entities, toolValues: self.toolValues)
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: self.gradientColors, videoTrimRange: self.videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: videoIsFullHd, additionalVideoPath: self.additionalVideoPath, additionalVideoPosition: self.additionalVideoPosition, additionalVideoScale: self.additionalVideoScale, additionalVideoRotation: self.additionalVideoRotation, additionalVideoPositionChanges: self.additionalVideoPositionChanges, drawing: self.drawing, entities: self.entities, toolValues: self.toolValues)
|
||||
}
|
||||
|
||||
func withUpdatedAdditionalVideo(path: String, positionChanges: [VideoPositionChange]) -> MediaEditorValues {
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: self.gradientColors, videoTrimRange: videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: self.videoIsFullHd, additionalVideoPath: path, additionalVideoPosition: self.additionalVideoPosition, additionalVideoScale: self.additionalVideoScale, additionalVideoRotation: self.additionalVideoRotation, additionalVideoPositionChanges: positionChanges, drawing: self.drawing, entities: self.entities, toolValues: self.toolValues)
|
||||
}
|
||||
|
||||
func withUpdatedAdditionalVideo(position: CGPoint, scale: CGFloat, rotation: CGFloat) -> MediaEditorValues {
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: self.gradientColors, videoTrimRange: videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: self.videoIsFullHd, additionalVideoPath: self.additionalVideoPath, additionalVideoPosition: position, additionalVideoScale: scale, additionalVideoRotation: rotation, additionalVideoPositionChanges: self.additionalVideoPositionChanges, drawing: self.drawing, entities: self.entities, toolValues: self.toolValues)
|
||||
}
|
||||
|
||||
func withUpdatedVideoTrimRange(_ videoTrimRange: Range<Double>) -> MediaEditorValues {
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: self.gradientColors, videoTrimRange: videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: self.videoIsFullHd, drawing: self.drawing, entities: self.entities, toolValues: self.toolValues)
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: self.gradientColors, videoTrimRange: videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: self.videoIsFullHd, additionalVideoPath: self.additionalVideoPath, additionalVideoPosition: self.additionalVideoPosition, additionalVideoScale: self.additionalVideoScale, additionalVideoRotation: self.additionalVideoRotation, additionalVideoPositionChanges: self.additionalVideoPositionChanges, drawing: self.drawing, entities: self.entities, toolValues: self.toolValues)
|
||||
}
|
||||
|
||||
func withUpdatedDrawingAndEntities(drawing: UIImage?, entities: [CodableDrawingEntity]) -> MediaEditorValues {
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: self.gradientColors, videoTrimRange: self.videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: self.videoIsFullHd, drawing: drawing, entities: entities, toolValues: self.toolValues)
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: self.gradientColors, videoTrimRange: self.videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: self.videoIsFullHd, additionalVideoPath: self.additionalVideoPath, additionalVideoPosition: self.additionalVideoPosition, additionalVideoScale: self.additionalVideoScale, additionalVideoRotation: self.additionalVideoRotation, additionalVideoPositionChanges: self.additionalVideoPositionChanges, drawing: drawing, entities: entities, toolValues: self.toolValues)
|
||||
}
|
||||
|
||||
func withUpdatedToolValues(_ toolValues: [EditorToolKey: Any]) -> MediaEditorValues {
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: self.gradientColors, videoTrimRange: self.videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: self.videoIsFullHd, drawing: self.drawing, entities: self.entities, toolValues: toolValues)
|
||||
return MediaEditorValues(originalDimensions: self.originalDimensions, cropOffset: self.cropOffset, cropSize: self.cropSize, cropScale: self.cropScale, cropRotation: self.cropRotation, cropMirroring: self.cropMirroring, gradientColors: self.gradientColors, videoTrimRange: self.videoTrimRange, videoIsMuted: self.videoIsMuted, videoIsFullHd: self.videoIsFullHd, additionalVideoPath: self.additionalVideoPath, additionalVideoPosition: self.additionalVideoPosition, additionalVideoScale: self.additionalVideoScale, additionalVideoRotation: self.additionalVideoRotation, additionalVideoPositionChanges: self.additionalVideoPositionChanges, drawing: self.drawing, entities: self.entities, toolValues: toolValues)
|
||||
}
|
||||
|
||||
public var resultDimensions: PixelDimensions {
|
||||
@ -558,7 +629,8 @@ public struct CurvesValue: Equatable, Codable {
|
||||
},
|
||||
size: CGSize(width: 1.0, height: 1.0),
|
||||
type: .line,
|
||||
granularity: 100
|
||||
granularity: 100,
|
||||
floor: false
|
||||
)
|
||||
return dataPoints
|
||||
}()
|
||||
@ -885,7 +957,7 @@ public enum MediaEditorCurveType {
|
||||
case line
|
||||
}
|
||||
|
||||
public func curveThroughPoints(count: Int, valueAtIndex: (Int) -> Float, positionAtIndex: (Int, CGFloat) -> CGFloat, size: CGSize, type: MediaEditorCurveType, granularity: Int) -> (UIBezierPath, [Float]) {
|
||||
public func curveThroughPoints(count: Int, valueAtIndex: (Int) -> Float, positionAtIndex: (Int, CGFloat) -> CGFloat, size: CGSize, type: MediaEditorCurveType, granularity: Int, floor: Bool) -> (UIBezierPath, [Float]) {
|
||||
let path = UIBezierPath()
|
||||
var dataPoints: [Float] = []
|
||||
|
||||
@ -900,7 +972,11 @@ public func curveThroughPoints(count: Int, valueAtIndex: (Int) -> Float, positio
|
||||
|
||||
let step = size.width / CGFloat(count)
|
||||
func pointAtIndex(_ index: Int) -> CGPoint {
|
||||
return CGPoint(x: floorToScreenPixels(positionAtIndex(index, step)), y: floorToScreenPixels(CGFloat(valueAtIndex(index)) * size.height))
|
||||
if floor {
|
||||
return CGPoint(x: floorToScreenPixels(positionAtIndex(index, step)), y: floorToScreenPixels(CGFloat(valueAtIndex(index)) * size.height))
|
||||
} else {
|
||||
return CGPoint(x: positionAtIndex(index, step), y: CGFloat(valueAtIndex(index)) * size.height)
|
||||
}
|
||||
}
|
||||
|
||||
for index in 1 ..< count - 2 {
|
||||
@ -923,7 +999,7 @@ public func curveThroughPoints(count: Int, valueAtIndex: (Int) -> Float, positio
|
||||
path.addLine(to: point)
|
||||
}
|
||||
|
||||
if ((index - 1) % 2 == 0) {
|
||||
if ((j - 1) % 2 == 0) {
|
||||
dataPoints.append(Float(point.y))
|
||||
}
|
||||
}
|
||||
|
@ -47,12 +47,16 @@ public final class MediaEditorVideoAVAssetWriter: MediaEditorVideoExportWriter {
|
||||
private var adaptor: AVAssetWriterInputPixelBufferAdaptor!
|
||||
|
||||
func setup(configuration: MediaEditorVideoExport.Configuration, outputPath: String) {
|
||||
Logger.shared.log("VideoExport", "Will setup asset writer")
|
||||
|
||||
let url = URL(fileURLWithPath: outputPath)
|
||||
self.writer = try? AVAssetWriter(url: url, fileType: .mp4)
|
||||
guard let writer = self.writer else {
|
||||
return
|
||||
}
|
||||
writer.shouldOptimizeForNetworkUse = configuration.shouldOptimizeForNetworkUse
|
||||
|
||||
Logger.shared.log("VideoExport", "Did setup asset writer")
|
||||
}
|
||||
|
||||
func setupVideoInput(configuration: MediaEditorVideoExport.Configuration, sourceFrameRate: Float) {
|
||||
@ -60,6 +64,8 @@ public final class MediaEditorVideoAVAssetWriter: MediaEditorVideoExportWriter {
|
||||
return
|
||||
}
|
||||
|
||||
Logger.shared.log("VideoExport", "Will setup video input")
|
||||
|
||||
var videoSettings = configuration.videoSettings
|
||||
if var compressionSettings = videoSettings[AVVideoCompressionPropertiesKey] as? [String: Any] {
|
||||
compressionSettings[AVVideoExpectedSourceFrameRateKey] = sourceFrameRate
|
||||
@ -78,6 +84,8 @@ public final class MediaEditorVideoAVAssetWriter: MediaEditorVideoExportWriter {
|
||||
|
||||
if writer.canAdd(videoInput) {
|
||||
writer.add(videoInput)
|
||||
} else {
|
||||
Logger.shared.log("VideoExport", "Failed to add video input")
|
||||
}
|
||||
self.videoInput = videoInput
|
||||
}
|
||||
@ -250,15 +258,21 @@ public final class MediaEditorVideoExport {
|
||||
private let outputPath: String
|
||||
|
||||
private var reader: AVAssetReader?
|
||||
private var additionalReader: AVAssetReader?
|
||||
|
||||
private var videoOutput: AVAssetReaderOutput?
|
||||
private var audioOutput: AVAssetReaderAudioMixOutput?
|
||||
private var textureRotation: TextureRotation = .rotate0Degrees
|
||||
|
||||
private var additionalVideoOutput: AVAssetReaderOutput?
|
||||
private var additionalTextureRotation: TextureRotation = .rotate0Degrees
|
||||
|
||||
private let queue = Queue()
|
||||
|
||||
private var writer: MediaEditorVideoExportWriter?
|
||||
private var composer: MediaEditorComposer?
|
||||
|
||||
private var textureRotation: TextureRotation = .rotate0Degrees
|
||||
|
||||
private let duration = ValuePromise<CMTime>()
|
||||
private var durationValue: CMTime? {
|
||||
didSet {
|
||||
@ -312,7 +326,11 @@ public final class MediaEditorVideoExport {
|
||||
|
||||
switch self.subject {
|
||||
case let .video(asset):
|
||||
self.setupWithAsset(asset)
|
||||
var additionalAsset: AVAsset?
|
||||
if let additionalPath = self.configuration.values.additionalVideoPath {
|
||||
additionalAsset = AVURLAsset(url: URL(fileURLWithPath: additionalPath))
|
||||
}
|
||||
self.setupWithAsset(asset, additionalAsset: additionalAsset)
|
||||
case let .image(image):
|
||||
self.setupWithImage(image)
|
||||
}
|
||||
@ -325,26 +343,31 @@ public final class MediaEditorVideoExport {
|
||||
self.composer = MediaEditorComposer(account: self.account, values: self.configuration.values, dimensions: self.configuration.composerDimensions, outputDimensions: self.configuration.dimensions)
|
||||
}
|
||||
|
||||
private func setupWithAsset(_ asset: AVAsset) {
|
||||
private func setupWithAsset(_ asset: AVAsset, additionalAsset: AVAsset?) {
|
||||
self.reader = try? AVAssetReader(asset: asset)
|
||||
self.textureRotation = textureRotatonForAVAsset(asset)
|
||||
|
||||
if let additionalAsset {
|
||||
self.additionalReader = try? AVAssetReader(asset: additionalAsset)
|
||||
self.additionalTextureRotation = textureRotatonForAVAsset(additionalAsset)
|
||||
}
|
||||
guard let reader = self.reader else {
|
||||
return
|
||||
}
|
||||
if let timeRange = self.configuration.timeRange {
|
||||
reader.timeRange = timeRange
|
||||
self.additionalReader?.timeRange = timeRange
|
||||
}
|
||||
|
||||
self.writer = MediaEditorVideoAVAssetWriter()
|
||||
guard let writer = self.writer else {
|
||||
return
|
||||
}
|
||||
|
||||
self.textureRotation = textureRotatonForAVAsset(asset)
|
||||
|
||||
writer.setup(configuration: self.configuration, outputPath: self.outputPath)
|
||||
|
||||
let videoTracks = asset.tracks(withMediaType: .video)
|
||||
if (videoTracks.count > 0) {
|
||||
let additionalVideoTracks = additionalAsset?.tracks(withMediaType: .video)
|
||||
if videoTracks.count > 0 {
|
||||
var sourceFrameRate: Float = 0.0
|
||||
let colorProperties: [String: Any] = [
|
||||
AVVideoColorPrimariesKey: AVVideoColorPrimaries_ITU_R_709_2,
|
||||
@ -357,7 +380,7 @@ public final class MediaEditorVideoExport {
|
||||
kCVPixelBufferMetalCompatibilityKey as String: true,
|
||||
AVVideoColorPropertiesKey: colorProperties
|
||||
]
|
||||
if let videoTrack = videoTracks.first, videoTrack.preferredTransform.isIdentity && !self.configuration.values.requiresComposing {
|
||||
if let videoTrack = videoTracks.first, videoTrack.preferredTransform.isIdentity && !self.configuration.values.requiresComposing && additionalAsset == nil {
|
||||
} else {
|
||||
self.setupComposer()
|
||||
}
|
||||
@ -371,6 +394,15 @@ public final class MediaEditorVideoExport {
|
||||
}
|
||||
self.videoOutput = videoOutput
|
||||
|
||||
if let additionalReader = self.additionalReader, let additionalVideoTrack = additionalVideoTracks?.first {
|
||||
let additionalVideoOutput = AVAssetReaderTrackOutput(track: additionalVideoTrack, outputSettings: outputSettings)
|
||||
additionalVideoOutput.alwaysCopiesSampleData = true
|
||||
if additionalReader.canAdd(additionalVideoOutput) {
|
||||
additionalReader.add(additionalVideoOutput)
|
||||
}
|
||||
self.additionalVideoOutput = additionalVideoOutput
|
||||
}
|
||||
|
||||
if let videoTrack = videoTracks.first {
|
||||
if videoTrack.nominalFrameRate > 0.0 {
|
||||
sourceFrameRate = videoTrack.nominalFrameRate
|
||||
@ -411,6 +443,8 @@ public final class MediaEditorVideoExport {
|
||||
}
|
||||
|
||||
private func setupWithImage(_ image: UIImage) {
|
||||
Logger.shared.log("VideoExport", "Setup with image")
|
||||
|
||||
self.setupComposer()
|
||||
|
||||
self.writer = MediaEditorVideoAVAssetWriter()
|
||||
@ -491,7 +525,7 @@ public final class MediaEditorVideoExport {
|
||||
guard let writer = self.writer, let composer = self.composer, case let .image(image) = self.subject else {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
let duration: Double = 5.0
|
||||
let frameRate: Double = Double(self.configuration.frameRate)
|
||||
var position: CMTime = CMTime(value: 0, timescale: Int32(self.configuration.frameRate))
|
||||
@ -545,22 +579,25 @@ public final class MediaEditorVideoExport {
|
||||
return false
|
||||
}
|
||||
self.pauseDispatchGroup.wait()
|
||||
if let buffer = output.copyNextSampleBuffer() {
|
||||
let timestamp = CMSampleBufferGetPresentationTimeStamp(buffer)
|
||||
if let sampleBuffer = output.copyNextSampleBuffer() {
|
||||
let timestamp = CMSampleBufferGetPresentationTimeStamp(sampleBuffer)
|
||||
if let duration = self.durationValue {
|
||||
let startTimestamp = self.reader?.timeRange.start ?? .zero
|
||||
let progress = (timestamp - startTimestamp).seconds / duration.seconds
|
||||
self.statusValue = .progress(Float(progress))
|
||||
}
|
||||
|
||||
let additionalSampleBuffer = self.additionalVideoOutput?.copyNextSampleBuffer()
|
||||
|
||||
if let composer = self.composer {
|
||||
composer.processSampleBuffer(buffer, pool: writer.pixelBufferPool, textureRotation: self.textureRotation, completion: { pixelBuffer in
|
||||
composer.processSampleBuffer(sampleBuffer: sampleBuffer, textureRotation: self.textureRotation, additionalSampleBuffer: additionalSampleBuffer, additionalTextureRotation: self.additionalTextureRotation, pool: writer.pixelBufferPool, completion: { pixelBuffer in
|
||||
if let pixelBuffer {
|
||||
if !writer.appendPixelBuffer(pixelBuffer, at: timestamp) {
|
||||
writer.markVideoAsFinished()
|
||||
appendFailed = true
|
||||
}
|
||||
} else {
|
||||
if !writer.appendVideoBuffer(buffer) {
|
||||
if !writer.appendVideoBuffer(sampleBuffer) {
|
||||
writer.markVideoAsFinished()
|
||||
appendFailed = true
|
||||
}
|
||||
@ -569,7 +606,7 @@ public final class MediaEditorVideoExport {
|
||||
})
|
||||
self.semaphore.wait()
|
||||
} else {
|
||||
if !writer.appendVideoBuffer(buffer) {
|
||||
if !writer.appendVideoBuffer(sampleBuffer) {
|
||||
writer.markVideoAsFinished()
|
||||
return false
|
||||
}
|
||||
@ -646,12 +683,16 @@ public final class MediaEditorVideoExport {
|
||||
}
|
||||
|
||||
private func startImageVideoExport() {
|
||||
Logger.shared.log("VideoExport", "Starting image video export")
|
||||
|
||||
guard self.internalStatus == .idle, let writer = self.writer else {
|
||||
Logger.shared.log("VideoExport", "Failed on writer state")
|
||||
self.statusValue = .failed(.invalid)
|
||||
return
|
||||
}
|
||||
|
||||
guard writer.startWriting() else {
|
||||
Logger.shared.log("VideoExport", "Failed on start writing")
|
||||
self.statusValue = .failed(.writing(nil))
|
||||
return
|
||||
}
|
||||
@ -685,6 +726,11 @@ public final class MediaEditorVideoExport {
|
||||
return
|
||||
}
|
||||
|
||||
if let additionalReader = self.additionalReader, !additionalReader.startReading() {
|
||||
self.statusValue = .failed(.reading(nil))
|
||||
return
|
||||
}
|
||||
|
||||
self.internalStatus = .exporting
|
||||
|
||||
writer.startSession(atSourceTime: self.configuration.timeRange?.start ?? .zero)
|
||||
|
@ -3,9 +3,10 @@ import QuartzCore
|
||||
import Metal
|
||||
import simd
|
||||
|
||||
fileprivate struct VertexData {
|
||||
struct VertexData {
|
||||
let pos: simd_float4
|
||||
let texCoord: simd_float2
|
||||
let localPos: simd_float2
|
||||
}
|
||||
|
||||
enum TextureRotation: Int {
|
||||
@ -13,9 +14,10 @@ enum TextureRotation: Int {
|
||||
case rotate90Degrees
|
||||
case rotate180Degrees
|
||||
case rotate270Degrees
|
||||
case rotate90DegreesMirrored
|
||||
}
|
||||
|
||||
private func verticesDataForRotation(_ rotation: TextureRotation) -> [VertexData] {
|
||||
func verticesDataForRotation(_ rotation: TextureRotation, rect: CGRect = CGRect(x: -0.5, y: -0.5, width: 1.0, height: 1.0), z: Float = 0.0) -> [VertexData] {
|
||||
let topLeft: simd_float2
|
||||
let topRight: simd_float2
|
||||
let bottomLeft: simd_float2
|
||||
@ -37,6 +39,11 @@ private func verticesDataForRotation(_ rotation: TextureRotation) -> [VertexData
|
||||
topRight = simd_float2(1.0, 0.0)
|
||||
bottomLeft = simd_float2(0.0, 1.0)
|
||||
bottomRight = simd_float2(0.0, 0.0)
|
||||
case .rotate90DegreesMirrored:
|
||||
topLeft = simd_float2(1.0, 0.0)
|
||||
topRight = simd_float2(1.0, 1.0)
|
||||
bottomLeft = simd_float2(0.0, 0.0)
|
||||
bottomRight = simd_float2(0.0, 1.0)
|
||||
case .rotate270Degrees:
|
||||
topLeft = simd_float2(0.0, 0.0)
|
||||
topRight = simd_float2(0.0, 1.0)
|
||||
@ -46,20 +53,24 @@ private func verticesDataForRotation(_ rotation: TextureRotation) -> [VertexData
|
||||
|
||||
return [
|
||||
VertexData(
|
||||
pos: simd_float4(x: -1, y: -1, z: 0, w: 1),
|
||||
texCoord: topLeft
|
||||
pos: simd_float4(x: Float(rect.minX) * 2.0, y: Float(rect.minY) * 2.0, z: z, w: 1),
|
||||
texCoord: topLeft,
|
||||
localPos: simd_float2(0.0, 0.0)
|
||||
),
|
||||
VertexData(
|
||||
pos: simd_float4(x: 1, y: -1, z: 0, w: 1),
|
||||
texCoord: topRight
|
||||
pos: simd_float4(x: Float(rect.maxX) * 2.0, y: Float(rect.minY) * 2.0, z: z, w: 1),
|
||||
texCoord: topRight,
|
||||
localPos: simd_float2(1.0, 0.0)
|
||||
),
|
||||
VertexData(
|
||||
pos: simd_float4(x: -1, y: 1, z: 0, w: 1),
|
||||
texCoord: bottomLeft
|
||||
pos: simd_float4(x: Float(rect.minX) * 2.0, y: Float(rect.maxY) * 2.0, z: z, w: 1),
|
||||
texCoord: bottomLeft,
|
||||
localPos: simd_float2(0.0, 1.0)
|
||||
),
|
||||
VertexData(
|
||||
pos: simd_float4(x: 1, y: 1, z: 0, w: 1),
|
||||
texCoord: bottomRight
|
||||
pos: simd_float4(x: Float(rect.maxX) * 2.0, y: Float(rect.maxY) * 2.0, z: z, w: 1),
|
||||
texCoord: bottomRight,
|
||||
localPos: simd_float2(1.0, 1.0)
|
||||
),
|
||||
]
|
||||
}
|
||||
|
@ -3,7 +3,7 @@ import AVFoundation
|
||||
import Metal
|
||||
import MetalKit
|
||||
|
||||
func textureRotatonForAVAsset(_ asset: AVAsset) -> TextureRotation {
|
||||
func textureRotatonForAVAsset(_ asset: AVAsset, mirror: Bool = false) -> TextureRotation {
|
||||
for track in asset.tracks {
|
||||
if track.mediaType == .video {
|
||||
let t = track.preferredTransform
|
||||
@ -18,7 +18,7 @@ func textureRotatonForAVAsset(_ asset: AVAsset) -> TextureRotation {
|
||||
} else if t.a == 1.0 && t.d == -1.0 {
|
||||
return .rotate180Degrees
|
||||
} else {
|
||||
return .rotate90Degrees
|
||||
return mirror ? .rotate90DegreesMirrored : .rotate90Degrees
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -27,13 +27,20 @@ func textureRotatonForAVAsset(_ asset: AVAsset) -> TextureRotation {
|
||||
|
||||
final class VideoTextureSource: NSObject, TextureSource, AVPlayerItemOutputPullDelegate {
|
||||
private weak var player: AVPlayer?
|
||||
private weak var additionalPlayer: AVPlayer?
|
||||
private weak var playerItem: AVPlayerItem?
|
||||
private weak var additionalPlayerItem: AVPlayerItem?
|
||||
|
||||
private let mirror: Bool
|
||||
|
||||
private var playerItemOutput: AVPlayerItemVideoOutput?
|
||||
private var additionalPlayerItemOutput: AVPlayerItemVideoOutput?
|
||||
|
||||
private var displayLink: CADisplayLink?
|
||||
|
||||
private let device: MTLDevice?
|
||||
private var textureRotation: TextureRotation = .rotate0Degrees
|
||||
private var additionalTextureRotation: TextureRotation = .rotate0Degrees
|
||||
|
||||
private var forceUpdate: Bool = false
|
||||
|
||||
@ -41,8 +48,10 @@ final class VideoTextureSource: NSObject, TextureSource, AVPlayerItemOutputPullD
|
||||
var queue: DispatchQueue!
|
||||
var started: Bool = false
|
||||
|
||||
init(player: AVPlayer, renderTarget: RenderTarget) {
|
||||
init(player: AVPlayer, additionalPlayer: AVPlayer?, mirror: Bool, renderTarget: RenderTarget) {
|
||||
self.player = player
|
||||
self.additionalPlayer = additionalPlayer
|
||||
self.mirror = mirror
|
||||
self.device = renderTarget.mtlDevice!
|
||||
|
||||
self.queue = DispatchQueue(
|
||||
@ -54,7 +63,9 @@ final class VideoTextureSource: NSObject, TextureSource, AVPlayerItemOutputPullD
|
||||
|
||||
super.init()
|
||||
|
||||
self.updatePlayerItem(player.currentItem)
|
||||
self.playerItem = player.currentItem
|
||||
self.additionalPlayerItem = additionalPlayer?.currentItem
|
||||
self.handleReadyToPlay()
|
||||
}
|
||||
|
||||
func invalidate() {
|
||||
@ -63,21 +74,7 @@ final class VideoTextureSource: NSObject, TextureSource, AVPlayerItemOutputPullD
|
||||
self.displayLink?.invalidate()
|
||||
self.displayLink = nil
|
||||
}
|
||||
|
||||
private func updatePlayerItem(_ playerItem: AVPlayerItem?) {
|
||||
self.displayLink?.invalidate()
|
||||
self.displayLink = nil
|
||||
if let output = self.playerItemOutput, let item = self.playerItem {
|
||||
if item.outputs.contains(output) {
|
||||
item.remove(output)
|
||||
}
|
||||
}
|
||||
self.playerItemOutput = nil
|
||||
|
||||
self.playerItem = playerItem
|
||||
self.handleReadyToPlay()
|
||||
}
|
||||
|
||||
private func handleReadyToPlay() {
|
||||
guard let playerItem = self.playerItem else {
|
||||
return
|
||||
@ -94,7 +91,7 @@ final class VideoTextureSource: NSObject, TextureSource, AVPlayerItemOutputPullD
|
||||
break
|
||||
}
|
||||
}
|
||||
self.textureRotation = textureRotatonForAVAsset(playerItem.asset)
|
||||
self.textureRotation = textureRotatonForAVAsset(playerItem.asset, mirror: additionalPlayer == nil && mirror)
|
||||
if !hasVideoTrack {
|
||||
return
|
||||
}
|
||||
@ -117,6 +114,16 @@ final class VideoTextureSource: NSObject, TextureSource, AVPlayerItemOutputPullD
|
||||
playerItem.add(output)
|
||||
self.playerItemOutput = output
|
||||
|
||||
if let additionalPlayerItem = self.additionalPlayerItem {
|
||||
self.additionalTextureRotation = textureRotatonForAVAsset(additionalPlayerItem.asset, mirror: true)
|
||||
|
||||
let output = AVPlayerItemVideoOutput(outputSettings: outputSettings)
|
||||
output.suppressesPlayerRendering = true
|
||||
output.setDelegate(self, queue: self.queue)
|
||||
additionalPlayerItem.add(output)
|
||||
self.additionalPlayerItemOutput = output
|
||||
}
|
||||
|
||||
self.setupDisplayLink(frameRate: min(60, frameRate))
|
||||
}
|
||||
|
||||
@ -161,7 +168,8 @@ final class VideoTextureSource: NSObject, TextureSource, AVPlayerItemOutputPullD
|
||||
return
|
||||
}
|
||||
|
||||
let requestTime = output.itemTime(forHostTime: CACurrentMediaTime())
|
||||
let time = CACurrentMediaTime()
|
||||
let requestTime = output.itemTime(forHostTime: time)
|
||||
if requestTime < .zero {
|
||||
return
|
||||
}
|
||||
@ -173,8 +181,19 @@ final class VideoTextureSource: NSObject, TextureSource, AVPlayerItemOutputPullD
|
||||
}
|
||||
|
||||
var presentationTime: CMTime = .zero
|
||||
var mainPixelBuffer: VideoPixelBuffer?
|
||||
if let pixelBuffer = output.copyPixelBuffer(forItemTime: requestTime, itemTimeForDisplay: &presentationTime) {
|
||||
self.output?.consumeVideoPixelBuffer(pixelBuffer, rotation: self.textureRotation, timestamp: presentationTime, render: true)
|
||||
mainPixelBuffer = VideoPixelBuffer(pixelBuffer: pixelBuffer, rotation: self.textureRotation, timestamp: presentationTime)
|
||||
}
|
||||
|
||||
let additionalRequestTime = self.additionalPlayerItemOutput?.itemTime(forHostTime: time)
|
||||
var additionalPixelBuffer: VideoPixelBuffer?
|
||||
if let additionalRequestTime, let pixelBuffer = self.additionalPlayerItemOutput?.copyPixelBuffer(forItemTime: additionalRequestTime, itemTimeForDisplay: &presentationTime) {
|
||||
additionalPixelBuffer = VideoPixelBuffer(pixelBuffer: pixelBuffer, rotation: self.additionalTextureRotation, timestamp: presentationTime)
|
||||
}
|
||||
|
||||
if let mainPixelBuffer {
|
||||
self.output?.consumeVideoPixelBuffer(pixelBuffer: mainPixelBuffer, additionalPixelBuffer: additionalPixelBuffer, render: true)
|
||||
}
|
||||
}
|
||||
|
||||
@ -201,7 +220,6 @@ final class VideoTextureSource: NSObject, TextureSource, AVPlayerItemOutputPullD
|
||||
|
||||
final class VideoInputPass: DefaultRenderPass {
|
||||
private var cachedTexture: MTLTexture?
|
||||
private let scalePass = VideoInputScalePass()
|
||||
|
||||
override var fragmentShaderFunctionName: String {
|
||||
return "bt709ToRGBFragmentShader"
|
||||
@ -209,10 +227,9 @@ final class VideoInputPass: DefaultRenderPass {
|
||||
|
||||
override func setup(device: MTLDevice, library: MTLLibrary) {
|
||||
super.setup(device: device, library: library)
|
||||
self.scalePass.setup(device: device, library: library)
|
||||
}
|
||||
|
||||
func processPixelBuffer(_ pixelBuffer: CVPixelBuffer, rotation: TextureRotation, textureCache: CVMetalTextureCache, device: MTLDevice, commandBuffer: MTLCommandBuffer) -> MTLTexture? {
|
||||
func processPixelBuffer(_ pixelBuffer: VideoPixelBuffer, textureCache: CVMetalTextureCache, device: MTLDevice, commandBuffer: MTLCommandBuffer) -> MTLTexture? {
|
||||
func textureFromPixelBuffer(_ pixelBuffer: CVPixelBuffer, pixelFormat: MTLPixelFormat, width: Int, height: Int, plane: Int) -> MTLTexture? {
|
||||
var textureRef : CVMetalTexture?
|
||||
let status = CVMetalTextureCacheCreateTextureFromImage(nil, textureCache, pixelBuffer, nil, pixelFormat, width, height, plane, &textureRef)
|
||||
@ -222,13 +239,13 @@ final class VideoInputPass: DefaultRenderPass {
|
||||
return nil
|
||||
}
|
||||
|
||||
let width = CVPixelBufferGetWidth(pixelBuffer)
|
||||
let height = CVPixelBufferGetHeight(pixelBuffer)
|
||||
guard let inputYTexture = textureFromPixelBuffer(pixelBuffer, pixelFormat: .r8Unorm, width: width, height: height, plane: 0),
|
||||
let inputCbCrTexture = textureFromPixelBuffer(pixelBuffer, pixelFormat: .rg8Unorm, width: width >> 1, height: height >> 1, plane: 1) else {
|
||||
let width = CVPixelBufferGetWidth(pixelBuffer.pixelBuffer)
|
||||
let height = CVPixelBufferGetHeight(pixelBuffer.pixelBuffer)
|
||||
guard let inputYTexture = textureFromPixelBuffer(pixelBuffer.pixelBuffer, pixelFormat: .r8Unorm, width: width, height: height, plane: 0),
|
||||
let inputCbCrTexture = textureFromPixelBuffer(pixelBuffer.pixelBuffer, pixelFormat: .rg8Unorm, width: width >> 1, height: height >> 1, plane: 1) else {
|
||||
return nil
|
||||
}
|
||||
return self.process(yTexture: inputYTexture, cbcrTexture: inputCbCrTexture, width: width, height: height, rotation: rotation, device: device, commandBuffer: commandBuffer)
|
||||
return self.process(yTexture: inputYTexture, cbcrTexture: inputCbCrTexture, width: width, height: height, rotation: pixelBuffer.rotation, device: device, commandBuffer: commandBuffer)
|
||||
}
|
||||
|
||||
func process(yTexture: MTLTexture, cbcrTexture: MTLTexture, width: Int, height: Int, rotation: TextureRotation, device: MTLDevice, commandBuffer: MTLCommandBuffer) -> MTLTexture? {
|
||||
@ -279,26 +296,364 @@ final class VideoInputPass: DefaultRenderPass {
|
||||
|
||||
renderCommandEncoder.endEncoding()
|
||||
|
||||
var outputTexture = self.cachedTexture
|
||||
if let texture = outputTexture {
|
||||
outputTexture = self.scalePass.process(input: texture, device: device, commandBuffer: commandBuffer)
|
||||
}
|
||||
return outputTexture
|
||||
return self.cachedTexture
|
||||
}
|
||||
}
|
||||
|
||||
final class VideoInputScalePass: DefaultRenderPass {
|
||||
private func verticesData(
|
||||
textureRotation: TextureRotation,
|
||||
containerSize: CGSize,
|
||||
position: CGPoint,
|
||||
size: CGSize,
|
||||
rotation: CGFloat,
|
||||
z: Float = 0.0
|
||||
) -> [VertexData] {
|
||||
let topLeft: simd_float2
|
||||
let topRight: simd_float2
|
||||
let bottomLeft: simd_float2
|
||||
let bottomRight: simd_float2
|
||||
|
||||
switch textureRotation {
|
||||
case .rotate0Degrees:
|
||||
topLeft = simd_float2(0.0, 1.0)
|
||||
topRight = simd_float2(1.0, 1.0)
|
||||
bottomLeft = simd_float2(0.0, 0.0)
|
||||
bottomRight = simd_float2(1.0, 0.0)
|
||||
case .rotate180Degrees:
|
||||
topLeft = simd_float2(1.0, 0.0)
|
||||
topRight = simd_float2(0.0, 0.0)
|
||||
bottomLeft = simd_float2(1.0, 1.0)
|
||||
bottomRight = simd_float2(0.0, 1.0)
|
||||
case .rotate90Degrees:
|
||||
topLeft = simd_float2(1.0, 1.0)
|
||||
topRight = simd_float2(1.0, 0.0)
|
||||
bottomLeft = simd_float2(0.0, 1.0)
|
||||
bottomRight = simd_float2(0.0, 0.0)
|
||||
case .rotate90DegreesMirrored:
|
||||
topLeft = simd_float2(1.0, 0.0)
|
||||
topRight = simd_float2(1.0, 1.0)
|
||||
bottomLeft = simd_float2(0.0, 0.0)
|
||||
bottomRight = simd_float2(0.0, 1.0)
|
||||
case .rotate270Degrees:
|
||||
topLeft = simd_float2(0.0, 0.0)
|
||||
topRight = simd_float2(0.0, 1.0)
|
||||
bottomLeft = simd_float2(1.0, 0.0)
|
||||
bottomRight = simd_float2(1.0, 1.0)
|
||||
}
|
||||
|
||||
let relativeSize = CGSize(
|
||||
width: size.width / containerSize.width,
|
||||
height: size.height / containerSize.height
|
||||
)
|
||||
let relativeOffset = CGPoint(
|
||||
x: position.x / containerSize.width,
|
||||
y: position.y / containerSize.height
|
||||
)
|
||||
|
||||
let rect = CGRect(
|
||||
origin: CGPoint(
|
||||
x: relativeOffset.x - relativeSize.width / 2.0,
|
||||
y: relativeOffset.y - relativeSize.height / 2.0
|
||||
),
|
||||
size: relativeSize
|
||||
)
|
||||
|
||||
return [
|
||||
VertexData(
|
||||
pos: simd_float4(x: Float(rect.minX) * 2.0, y: Float(rect.minY) * 2.0, z: z, w: 1),
|
||||
texCoord: topLeft,
|
||||
localPos: simd_float2(0.0, 0.0)
|
||||
),
|
||||
VertexData(
|
||||
pos: simd_float4(x: Float(rect.maxX) * 2.0, y: Float(rect.minY) * 2.0, z: z, w: 1),
|
||||
texCoord: topRight,
|
||||
localPos: simd_float2(1.0, 0.0)
|
||||
),
|
||||
VertexData(
|
||||
pos: simd_float4(x: Float(rect.minX) * 2.0, y: Float(rect.maxY) * 2.0, z: z, w: 1),
|
||||
texCoord: bottomLeft,
|
||||
localPos: simd_float2(0.0, 1.0)
|
||||
),
|
||||
VertexData(
|
||||
pos: simd_float4(x: Float(rect.maxX) * 2.0, y: Float(rect.maxY) * 2.0, z: z, w: 1),
|
||||
texCoord: bottomRight,
|
||||
localPos: simd_float2(1.0, 1.0)
|
||||
),
|
||||
]
|
||||
}
|
||||
|
||||
private func lookupSpringValue(_ t: CGFloat) -> CGFloat {
|
||||
let table: [(CGFloat, CGFloat)] = [
|
||||
(0.0, 0.0),
|
||||
(0.0625, 0.1123005598783493),
|
||||
(0.125, 0.31598418951034546),
|
||||
(0.1875, 0.5103585720062256),
|
||||
(0.25, 0.6650152802467346),
|
||||
(0.3125, 0.777747631072998),
|
||||
(0.375, 0.8557760119438171),
|
||||
(0.4375, 0.9079672694206238),
|
||||
(0.5, 0.942038357257843),
|
||||
(0.5625, 0.9638798832893372),
|
||||
(0.625, 0.9776856303215027),
|
||||
(0.6875, 0.9863143563270569),
|
||||
(0.75, 0.991658091545105),
|
||||
(0.8125, 0.9949421286582947),
|
||||
(0.875, 0.9969474077224731),
|
||||
(0.9375, 0.9981651306152344),
|
||||
(1.0, 1.0)
|
||||
]
|
||||
|
||||
for i in 0 ..< table.count - 2 {
|
||||
let lhs = table[i]
|
||||
let rhs = table[i + 1]
|
||||
|
||||
if t >= lhs.0 && t <= rhs.0 {
|
||||
let fraction = (t - lhs.0) / (rhs.0 - lhs.0)
|
||||
let value = lhs.1 + fraction * (rhs.1 - lhs.1)
|
||||
return value
|
||||
}
|
||||
}
|
||||
return 1.0
|
||||
}
|
||||
|
||||
final class VideoInputScalePass: RenderPass {
|
||||
private var cachedTexture: MTLTexture?
|
||||
|
||||
override func process(input: MTLTexture, device: MTLDevice, commandBuffer: MTLCommandBuffer) -> MTLTexture? {
|
||||
guard max(input.width, input.height) > 1920 else {
|
||||
var mainPipelineState: MTLRenderPipelineState?
|
||||
var mainVerticesBuffer: MTLBuffer?
|
||||
var mainTextureRotation: TextureRotation = .rotate0Degrees
|
||||
|
||||
var additionalVerticesBuffer: MTLBuffer?
|
||||
var additionalTextureRotation: TextureRotation = .rotate0Degrees
|
||||
|
||||
var pixelFormat: MTLPixelFormat {
|
||||
return .bgra8Unorm
|
||||
}
|
||||
|
||||
func setup(device: MTLDevice, library: MTLLibrary) {
|
||||
let descriptor = MTLRenderPipelineDescriptor()
|
||||
descriptor.vertexFunction = library.makeFunction(name: "defaultVertexShader")
|
||||
descriptor.fragmentFunction = library.makeFunction(name: "dualFragmentShader")
|
||||
descriptor.colorAttachments[0].pixelFormat = self.pixelFormat
|
||||
descriptor.colorAttachments[0].isBlendingEnabled = true
|
||||
descriptor.colorAttachments[0].rgbBlendOperation = .add
|
||||
descriptor.colorAttachments[0].alphaBlendOperation = .add
|
||||
descriptor.colorAttachments[0].sourceRGBBlendFactor = .sourceAlpha
|
||||
descriptor.colorAttachments[0].sourceAlphaBlendFactor = .sourceAlpha
|
||||
descriptor.colorAttachments[0].destinationRGBBlendFactor = .oneMinusSourceAlpha
|
||||
descriptor.colorAttachments[0].destinationAlphaBlendFactor = .oneMinusSourceAlpha
|
||||
|
||||
do {
|
||||
self.mainPipelineState = try device.makeRenderPipelineState(descriptor: descriptor)
|
||||
} catch {
|
||||
print(error.localizedDescription)
|
||||
}
|
||||
}
|
||||
|
||||
func setupMainVerticesBuffer(device: MTLDevice, rotation: TextureRotation = .rotate0Degrees) {
|
||||
if self.mainVerticesBuffer == nil || rotation != self.mainTextureRotation {
|
||||
self.mainTextureRotation = rotation
|
||||
let vertices = verticesDataForRotation(rotation)
|
||||
self.mainVerticesBuffer = device.makeBuffer(
|
||||
bytes: vertices,
|
||||
length: MemoryLayout<VertexData>.stride * vertices.count,
|
||||
options: [])
|
||||
}
|
||||
}
|
||||
|
||||
func encodeVideo(
|
||||
using encoder: MTLRenderCommandEncoder,
|
||||
containerSize: CGSize,
|
||||
texture: MTLTexture,
|
||||
textureRotation: TextureRotation,
|
||||
position: VideoPosition,
|
||||
roundness: Float,
|
||||
alpha: Float,
|
||||
zPosition: Float,
|
||||
device: MTLDevice
|
||||
) {
|
||||
encoder.setFragmentTexture(texture, index: 0)
|
||||
|
||||
let center = CGPoint(
|
||||
x: position.position.x - containerSize.width / 2.0,
|
||||
y: containerSize.height - position.position.y - containerSize.height / 2.0
|
||||
)
|
||||
|
||||
let size = CGSize(
|
||||
width: position.size.width * position.scale,
|
||||
height: position.size.height * position.scale
|
||||
)
|
||||
|
||||
let vertices = verticesData(textureRotation: textureRotation, containerSize: containerSize, position: center, size: size, rotation: position.rotation, z: zPosition)
|
||||
let buffer = device.makeBuffer(
|
||||
bytes: vertices,
|
||||
length: MemoryLayout<VertexData>.stride * vertices.count,
|
||||
options: [])
|
||||
encoder.setVertexBuffer(buffer, offset: 0, index: 0)
|
||||
|
||||
var resolution = simd_uint2(UInt32(size.width), UInt32(size.height))
|
||||
encoder.setFragmentBytes(&resolution, length: MemoryLayout<simd_uint2>.size * 2, index: 0)
|
||||
|
||||
var roundness = roundness
|
||||
encoder.setFragmentBytes(&roundness, length: MemoryLayout<simd_float1>.size, index: 1)
|
||||
|
||||
var alpha = alpha
|
||||
encoder.setFragmentBytes(&alpha, length: MemoryLayout<simd_float1>.size, index: 2)
|
||||
|
||||
encoder.drawPrimitives(type: .triangleStrip, vertexStart: 0, vertexCount: 4)
|
||||
}
|
||||
|
||||
func setupAdditionalVerticesBuffer(device: MTLDevice, rotation: TextureRotation = .rotate0Degrees) {
|
||||
self.additionalTextureRotation = rotation
|
||||
let vertices = verticesDataForRotation(rotation, rect: CGRect(x: -0.5, y: -0.5, width: 0.5, height: 0.5), z: 0.5)
|
||||
self.additionalVerticesBuffer = device.makeBuffer(
|
||||
bytes: vertices,
|
||||
length: MemoryLayout<VertexData>.stride * vertices.count,
|
||||
options: [])
|
||||
}
|
||||
|
||||
func update(values: MediaEditorValues) {
|
||||
if let position = values.additionalVideoPosition, let scale = values.additionalVideoScale, let rotation = values.additionalVideoRotation {
|
||||
self.additionalPosition = VideoInputScalePass.VideoPosition(position: position, size: CGSize(width: 1080.0 / 4.0, height: 1920.0 / 4.0), scale: scale, rotation: rotation)
|
||||
}
|
||||
if !values.additionalVideoPositionChanges.isEmpty {
|
||||
self.videoPositionChanges = values.additionalVideoPositionChanges
|
||||
}
|
||||
}
|
||||
|
||||
private var mainPosition = VideoPosition(
|
||||
position: CGPoint(x: 1080 / 2.0, y: 1920.0 / 2.0),
|
||||
size: CGSize(width: 1080.0, height: 1920.0),
|
||||
scale: 1.0,
|
||||
rotation: 0.0
|
||||
)
|
||||
|
||||
private var additionalPosition = VideoPosition(
|
||||
position: CGPoint(x: 1080 / 2.0, y: 1920.0 / 2.0),
|
||||
size: CGSize(width: 1080.0, height: 1920.0),
|
||||
scale: 0.5,
|
||||
rotation: 0.0
|
||||
)
|
||||
|
||||
private var transitionDuration = 0.5
|
||||
private var videoPositionChanges: [VideoPositionChange] = []
|
||||
|
||||
enum VideoType {
|
||||
case main
|
||||
case additional
|
||||
case transition
|
||||
}
|
||||
|
||||
struct VideoPosition {
|
||||
let position: CGPoint
|
||||
let size: CGSize
|
||||
let scale: CGFloat
|
||||
let rotation: CGFloat
|
||||
|
||||
|
||||
func mixed(with other: VideoPosition, fraction: CGFloat) -> VideoPosition {
|
||||
let position = CGPoint(
|
||||
x: self.position.x + (other.position.x - self.position.x) * fraction,
|
||||
y: self.position.y + (other.position.y - self.position.y) * fraction
|
||||
)
|
||||
let size = CGSize(
|
||||
width: self.size.width + (other.size.width - self.size.width) * fraction,
|
||||
height: self.size.height + (other.size.height - self.size.height) * fraction
|
||||
)
|
||||
let scale = self.scale + (other.scale - self.scale) * fraction
|
||||
let rotation = self.rotation + (other.rotation - self.rotation) * fraction
|
||||
|
||||
return VideoPosition(
|
||||
position: position,
|
||||
size: size,
|
||||
scale: scale,
|
||||
rotation: rotation
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
struct VideoState {
|
||||
let texture: MTLTexture
|
||||
let textureRotation: TextureRotation
|
||||
let position: VideoPosition
|
||||
let roundness: Float
|
||||
let alpha: Float
|
||||
}
|
||||
|
||||
func transitionState(for time: CMTime, mainInput: MTLTexture, additionalInput: MTLTexture?) -> (VideoState, VideoState?, VideoState?) {
|
||||
let timestamp = time.seconds
|
||||
|
||||
var backgroundTexture = mainInput
|
||||
var backgroundTextureRotation = self.mainTextureRotation
|
||||
|
||||
var foregroundTexture = additionalInput
|
||||
var foregroundTextureRotation = self.additionalTextureRotation
|
||||
|
||||
var transitionFraction = 1.0
|
||||
if let additionalInput {
|
||||
var previousChange: VideoPositionChange?
|
||||
for change in self.videoPositionChanges {
|
||||
if timestamp >= change.timestamp {
|
||||
previousChange = change
|
||||
}
|
||||
if timestamp < change.timestamp {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if let previousChange {
|
||||
if previousChange.additional {
|
||||
backgroundTexture = additionalInput
|
||||
backgroundTextureRotation = self.additionalTextureRotation
|
||||
|
||||
foregroundTexture = mainInput
|
||||
foregroundTextureRotation = self.mainTextureRotation
|
||||
}
|
||||
if previousChange.timestamp > 0.0 && timestamp < previousChange.timestamp + transitionDuration {
|
||||
transitionFraction = (timestamp - previousChange.timestamp) / transitionDuration
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let backgroundVideoState = VideoState(texture: backgroundTexture, textureRotation: backgroundTextureRotation, position: self.mainPosition, roundness: 0.0, alpha: 1.0)
|
||||
var foregroundVideoState: VideoState?
|
||||
var disappearingVideoState: VideoState?
|
||||
|
||||
if let foregroundTexture {
|
||||
var foregroundPosition = self.additionalPosition
|
||||
var roundness: Float = 1.0
|
||||
if transitionFraction < 1.0 {
|
||||
let springFraction = lookupSpringValue(transitionFraction)
|
||||
foregroundPosition = foregroundPosition.mixed(with: self.mainPosition, fraction: 1.0 - springFraction)
|
||||
roundness = Float(springFraction)
|
||||
|
||||
let disappearedPosition = VideoPosition(position: self.additionalPosition.position, size: self.additionalPosition.size, scale: 0.01, rotation: self.additionalPosition.scale)
|
||||
disappearingVideoState = VideoState(texture: backgroundTexture, textureRotation: backgroundTextureRotation, position: self.additionalPosition.mixed(with: disappearedPosition, fraction: min(1.0, transitionFraction * 1.428)), roundness: 1.0, alpha: max(0.0, 1.0 - Float(transitionFraction) * 3.33))
|
||||
}
|
||||
foregroundVideoState = VideoState(texture: foregroundTexture, textureRotation: foregroundTextureRotation, position: foregroundPosition, roundness: roundness, alpha: 1.0)
|
||||
}
|
||||
|
||||
return (backgroundVideoState, foregroundVideoState, disappearingVideoState)
|
||||
}
|
||||
|
||||
func process(input: MTLTexture, secondInput: MTLTexture?, timestamp: CMTime, device: MTLDevice, commandBuffer: MTLCommandBuffer) -> MTLTexture? {
|
||||
guard max(input.width, input.height) > 1920 || secondInput != nil else {
|
||||
return input
|
||||
}
|
||||
self.setupVerticesBuffer(device: device)
|
||||
|
||||
|
||||
let scaledSize = CGSize(width: input.width, height: input.height).fitted(CGSize(width: 1920.0, height: 1920.0))
|
||||
let width = Int(scaledSize.width)
|
||||
let height = Int(scaledSize.height)
|
||||
let width: Int
|
||||
let height: Int
|
||||
|
||||
if secondInput != nil {
|
||||
width = 1080
|
||||
height = 1920
|
||||
} else {
|
||||
width = Int(scaledSize.width)
|
||||
height = Int(scaledSize.height)
|
||||
}
|
||||
|
||||
let containerSize = CGSize(width: width, height: height)
|
||||
|
||||
if self.cachedTexture == nil || self.cachedTexture?.width != width || self.cachedTexture?.height != height {
|
||||
let textureDescriptor = MTLTextureDescriptor()
|
||||
@ -330,12 +685,56 @@ final class VideoInputScalePass: DefaultRenderPass {
|
||||
znear: -1.0, zfar: 1.0)
|
||||
)
|
||||
|
||||
renderCommandEncoder.setFragmentTexture(input, index: 0)
|
||||
renderCommandEncoder.setRenderPipelineState(self.mainPipelineState!)
|
||||
|
||||
let (mainVideoState, additionalVideoState, transitionVideoState) = self.transitionState(for: timestamp, mainInput: input, additionalInput: secondInput)
|
||||
|
||||
self.encodeDefaultCommands(using: renderCommandEncoder)
|
||||
self.encodeVideo(
|
||||
using: renderCommandEncoder,
|
||||
containerSize: containerSize,
|
||||
texture: mainVideoState.texture,
|
||||
textureRotation: mainVideoState.textureRotation,
|
||||
position: mainVideoState.position,
|
||||
roundness: mainVideoState.roundness,
|
||||
alpha: mainVideoState.alpha,
|
||||
zPosition: 0.0,
|
||||
device: device
|
||||
)
|
||||
|
||||
if let additionalVideoState {
|
||||
self.encodeVideo(
|
||||
using: renderCommandEncoder,
|
||||
containerSize: containerSize,
|
||||
texture: additionalVideoState.texture,
|
||||
textureRotation: additionalVideoState.textureRotation,
|
||||
position: additionalVideoState.position,
|
||||
roundness: additionalVideoState.roundness,
|
||||
alpha: additionalVideoState.alpha,
|
||||
zPosition: 0.5,
|
||||
device: device
|
||||
)
|
||||
}
|
||||
|
||||
if let transitionVideoState {
|
||||
self.encodeVideo(
|
||||
using: renderCommandEncoder,
|
||||
containerSize: containerSize,
|
||||
texture: transitionVideoState.texture,
|
||||
textureRotation: transitionVideoState.textureRotation,
|
||||
position: transitionVideoState.position,
|
||||
roundness: transitionVideoState.roundness,
|
||||
alpha: transitionVideoState.alpha,
|
||||
zPosition: 0.75,
|
||||
device: device
|
||||
)
|
||||
}
|
||||
|
||||
renderCommandEncoder.endEncoding()
|
||||
|
||||
return self.cachedTexture!
|
||||
}
|
||||
|
||||
func process(input: MTLTexture, device: MTLDevice, commandBuffer: MTLCommandBuffer) -> MTLTexture? {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
@ -53,7 +53,8 @@ private class HistogramView: UIView {
|
||||
},
|
||||
size: size,
|
||||
type: .filled,
|
||||
granularity: 200
|
||||
granularity: 200,
|
||||
floor: true
|
||||
)
|
||||
|
||||
transition.setShapeLayerPath(layer: self.shapeLayer, path: path.cgPath)
|
||||
@ -709,7 +710,8 @@ final class CurvesScreenComponent: Component {
|
||||
},
|
||||
size: availableSize,
|
||||
type: .line,
|
||||
granularity: 100
|
||||
granularity: 100,
|
||||
floor: true
|
||||
)
|
||||
self.curveLayer.path = curvePath.cgPath
|
||||
|
||||
|
@ -256,7 +256,7 @@ final class MediaEditorScreenComponent: Component {
|
||||
private var inputMediaNodeStateContext = ChatEntityKeyboardInputNode.StateContext()
|
||||
private var inputMediaInteraction: ChatEntityKeyboardInputNode.Interaction?
|
||||
private var inputMediaNode: ChatEntityKeyboardInputNode?
|
||||
|
||||
|
||||
private var component: MediaEditorScreenComponent?
|
||||
private weak var state: State?
|
||||
private var environment: ViewControllerComponentContainer.Environment?
|
||||
@ -1605,6 +1605,8 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
|
||||
|
||||
fileprivate var hasAnyChanges = false
|
||||
|
||||
private var playbackPositionDisposable: Disposable?
|
||||
|
||||
private var presentationData: PresentationData
|
||||
private var validLayout: ContainerViewLayout?
|
||||
|
||||
@ -1743,6 +1745,7 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
|
||||
self.subjectDisposable?.dispose()
|
||||
self.gradientColorsDisposable?.dispose()
|
||||
self.appInForegroundDisposable?.dispose()
|
||||
self.playbackPositionDisposable?.dispose()
|
||||
}
|
||||
|
||||
private func setup(with subject: MediaEditorScreen.Subject) {
|
||||
@ -1776,32 +1779,7 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
|
||||
mediaEntity.scale = storyDimensions.width / fittedSize.width
|
||||
}
|
||||
self.entitiesView.add(mediaEntity, announce: false)
|
||||
|
||||
if case let .image(_, _, additionalImage, position) = subject, let additionalImage {
|
||||
let image = generateImage(CGSize(width: additionalImage.size.width, height: additionalImage.size.width), contextGenerator: { size, context in
|
||||
let bounds = CGRect(origin: .zero, size: size)
|
||||
context.clear(bounds)
|
||||
context.addEllipse(in: bounds)
|
||||
context.clip()
|
||||
|
||||
if let cgImage = additionalImage.cgImage {
|
||||
context.draw(cgImage, in: CGRect(origin: CGPoint(x: (size.width - additionalImage.size.width) / 2.0, y: (size.height - additionalImage.size.height) / 2.0), size: additionalImage.size))
|
||||
}
|
||||
})
|
||||
let imageEntity = DrawingStickerEntity(content: .image(image ?? additionalImage))
|
||||
imageEntity.referenceDrawingSize = storyDimensions
|
||||
imageEntity.scale = 1.49
|
||||
imageEntity.position = position.getPosition(storyDimensions)
|
||||
self.entitiesView.add(imageEntity, announce: false)
|
||||
} else if case let .video(_, _, additionalVideoPath, additionalVideoImage, _, _, _, position) = subject, let additionalVideoPath {
|
||||
let videoEntity = DrawingStickerEntity(content: .video(additionalVideoPath, additionalVideoImage))
|
||||
videoEntity.referenceDrawingSize = storyDimensions
|
||||
videoEntity.scale = 1.49
|
||||
videoEntity.mirrored = true
|
||||
videoEntity.position = position.getPosition(storyDimensions)
|
||||
self.entitiesView.add(videoEntity, announce: false)
|
||||
}
|
||||
|
||||
|
||||
let initialPosition = mediaEntity.position
|
||||
let initialScale = mediaEntity.scale
|
||||
let initialRotation = mediaEntity.rotation
|
||||
@ -1847,6 +1825,58 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
|
||||
}
|
||||
}
|
||||
|
||||
if case let .image(_, _, additionalImage, position) = subject, let additionalImage {
|
||||
let image = generateImage(CGSize(width: additionalImage.size.width, height: additionalImage.size.width), contextGenerator: { size, context in
|
||||
let bounds = CGRect(origin: .zero, size: size)
|
||||
context.clear(bounds)
|
||||
context.addEllipse(in: bounds)
|
||||
context.clip()
|
||||
|
||||
if let cgImage = additionalImage.cgImage {
|
||||
context.draw(cgImage, in: CGRect(origin: CGPoint(x: (size.width - additionalImage.size.width) / 2.0, y: (size.height - additionalImage.size.height) / 2.0), size: additionalImage.size))
|
||||
}
|
||||
})
|
||||
let imageEntity = DrawingStickerEntity(content: .image(image ?? additionalImage))
|
||||
imageEntity.referenceDrawingSize = storyDimensions
|
||||
imageEntity.scale = 1.49
|
||||
imageEntity.position = position.getPosition(storyDimensions)
|
||||
self.entitiesView.add(imageEntity, announce: false)
|
||||
} else if case let .video(_, _, _, additionalVideoPath, _, _, _, changes, position) = subject, let additionalVideoPath {
|
||||
let videoEntity = DrawingStickerEntity(content: .dualVideoReference)
|
||||
videoEntity.referenceDrawingSize = storyDimensions
|
||||
videoEntity.scale = 1.49
|
||||
videoEntity.position = position.getPosition(storyDimensions)
|
||||
self.entitiesView.add(videoEntity, announce: false)
|
||||
|
||||
mediaEditor.setAdditionalVideo(additionalVideoPath, positionChanges: changes.map { VideoPositionChange(additional: $0.0, timestamp: $0.1) })
|
||||
mediaEditor.setAdditionalVideoPosition(videoEntity.position, scale: videoEntity.scale, rotation: videoEntity.rotation)
|
||||
if let entityView = self.entitiesView.getView(for: videoEntity.uuid) as? DrawingStickerEntityView {
|
||||
entityView.updated = { [weak videoEntity, weak self] in
|
||||
if let self, let videoEntity {
|
||||
self.mediaEditor?.setAdditionalVideoPosition(videoEntity.position, scale: videoEntity.scale, rotation: videoEntity.rotation)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else if case let .asset(asset) = subject, asset.mediaType == .video {
|
||||
//#if DEBUG
|
||||
// let videoEntity = DrawingStickerEntity(content: .dualVideoReference)
|
||||
// videoEntity.referenceDrawingSize = storyDimensions
|
||||
// videoEntity.scale = 1.49
|
||||
// videoEntity.position = PIPPosition.bottomRight.getPosition(storyDimensions)
|
||||
// self.entitiesView.add(videoEntity, announce: false)
|
||||
//
|
||||
// mediaEditor.setAdditionalVideo("", positionChanges: [VideoPositionChange(additional: false, timestamp: 0.0), VideoPositionChange(additional: true, timestamp: 3.0)])
|
||||
// mediaEditor.setAdditionalVideoPosition(videoEntity.position, scale: videoEntity.scale, rotation: videoEntity.rotation)
|
||||
// if let entityView = self.entitiesView.getView(for: videoEntity.uuid) as? DrawingStickerEntityView {
|
||||
// entityView.updated = { [weak videoEntity, weak self] in
|
||||
// if let self, let videoEntity {
|
||||
// self.mediaEditor?.setAdditionalVideoPosition(videoEntity.position, scale: videoEntity.scale, rotation: videoEntity.rotation)
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
//#endif
|
||||
}
|
||||
|
||||
self.gradientColorsDisposable = mediaEditor.gradientColors.start(next: { [weak self] colors in
|
||||
if let self, let colors {
|
||||
let (topColor, bottomColor) = colors
|
||||
@ -1907,6 +1937,61 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if case .video = subject {
|
||||
self.playbackPositionDisposable = (mediaEditor.position
|
||||
|> deliverOnMainQueue).start(next: { [weak self] position in
|
||||
if let self {
|
||||
self.updateVideoPlaybackPosition(position: position)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
private var additionalIsMainstage = false
|
||||
private func updateVideoPlaybackPosition(position: CGFloat) {
|
||||
guard let subject = self.subject, case let .video(_, _, _, _, _, _, _, timestamps, _) = subject, !timestamps.isEmpty else {
|
||||
return
|
||||
}
|
||||
var currentIsFront = false
|
||||
for (isFront, timestamp) in timestamps {
|
||||
if position < timestamp {
|
||||
break
|
||||
}
|
||||
currentIsFront = isFront
|
||||
}
|
||||
|
||||
self.additionalIsMainstage = currentIsFront
|
||||
self.updateMainStageVideo()
|
||||
}
|
||||
|
||||
private func updateMainStageVideo() {
|
||||
guard let mainEntityView = self.entitiesView.getView(where: { $0 is DrawingMediaEntityView }) as? DrawingMediaEntityView, let mainEntity = mainEntityView.entity as? DrawingMediaEntity else {
|
||||
return
|
||||
}
|
||||
|
||||
let additionalEntityView = self.entitiesView.getView(where: { view in
|
||||
if let stickerEntity = view.entity as? DrawingStickerEntity, case .video = stickerEntity.content {
|
||||
return true
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}) as? DrawingStickerEntityView
|
||||
|
||||
var animated = true
|
||||
if mainEntity.scale != 1.0 || mainEntity.rotation != 0.0 || mainEntity.position != CGPoint(x: storyDimensions.width / 2.0, y: storyDimensions.height / 2.0) {
|
||||
animated = false
|
||||
}
|
||||
|
||||
let _ = animated
|
||||
|
||||
if self.additionalIsMainstage {
|
||||
mainEntityView.additionalView = additionalEntityView?.videoView
|
||||
additionalEntityView?.mainView = mainEntityView.previewView
|
||||
} else {
|
||||
mainEntityView.additionalView = nil
|
||||
additionalEntityView?.mainView = nil
|
||||
}
|
||||
}
|
||||
|
||||
override func didLoad() {
|
||||
@ -1967,6 +2052,13 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
|
||||
},
|
||||
onInteractionUpdated: { [weak self] isInteracting in
|
||||
if let self {
|
||||
if let selectedEntityView = self.entitiesView.selectedEntityView as? DrawingStickerEntityView, let entity = selectedEntityView.entity as? DrawingStickerEntity, case .dualVideoReference = entity.content {
|
||||
if isInteracting {
|
||||
self.mediaEditor?.stop()
|
||||
} else {
|
||||
self.mediaEditor?.play()
|
||||
}
|
||||
}
|
||||
self.isInteractingWithEntities = isInteracting
|
||||
self.requestUpdate(transition: .easeInOut(duration: 0.2))
|
||||
}
|
||||
@ -2154,7 +2246,34 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
|
||||
if let view = self.componentHost.view as? MediaEditorScreenComponent.View {
|
||||
view.animateIn(from: .camera, completion: completion)
|
||||
}
|
||||
if let subject = self.subject, case let .video(_, transitionImage, _, _, _, _, _, _) = subject, let transitionImage {
|
||||
if let subject = self.subject, case let .video(_, mainTransitionImage, _, _, additionalTransitionImage, _, _, positionChangeTimestamps, pipPosition) = subject, let mainTransitionImage {
|
||||
var transitionImage = mainTransitionImage
|
||||
if let additionalTransitionImage {
|
||||
var backgroundImage = mainTransitionImage
|
||||
var foregroundImage = additionalTransitionImage
|
||||
if let change = positionChangeTimestamps.first, change.0 {
|
||||
backgroundImage = additionalTransitionImage
|
||||
foregroundImage = mainTransitionImage
|
||||
}
|
||||
if let combinedTransitionImage = generateImage(backgroundImage.size, scale: 1.0, rotatedContext: { size, context in
|
||||
UIGraphicsPushContext(context)
|
||||
backgroundImage.draw(in: CGRect(origin: .zero, size: size))
|
||||
|
||||
let ellipsePosition = pipPosition.getPosition(storyDimensions)
|
||||
let ellipseSize = CGSize(width: 401.0, height: 401.0)
|
||||
let ellipseRect = CGRect(origin: CGPoint(x: ellipsePosition.x - ellipseSize.width / 2.0, y: ellipsePosition.y - ellipseSize.height / 2.0), size: ellipseSize)
|
||||
let foregroundSize = foregroundImage.size.aspectFilled(ellipseSize)
|
||||
let foregroundRect = CGRect(origin: CGPoint(x: ellipseRect.center.x - foregroundSize.width / 2.0, y: ellipseRect.center.y - foregroundSize.height / 2.0), size: foregroundSize)
|
||||
context.addEllipse(in: ellipseRect)
|
||||
context.clip()
|
||||
|
||||
foregroundImage.draw(in: foregroundRect)
|
||||
|
||||
UIGraphicsPopContext()
|
||||
}) {
|
||||
transitionImage = combinedTransitionImage
|
||||
}
|
||||
}
|
||||
self.setupTransitionImage(transitionImage)
|
||||
}
|
||||
case let .gallery(transitionIn):
|
||||
@ -2861,13 +2980,13 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
|
||||
|
||||
public enum Subject {
|
||||
case image(UIImage, PixelDimensions, UIImage?, PIPPosition)
|
||||
case video(String, UIImage?, String?, UIImage?, PixelDimensions, Double, [(Bool, Double)], PIPPosition)
|
||||
case video(String, UIImage?, Bool, String?, UIImage?, PixelDimensions, Double, [(Bool, Double)], PIPPosition)
|
||||
case asset(PHAsset)
|
||||
case draft(MediaEditorDraft, Int64?)
|
||||
|
||||
var dimensions: PixelDimensions {
|
||||
switch self {
|
||||
case let .image(_, dimensions, _, _), let .video(_, _, _, _, dimensions, _, _, _):
|
||||
case let .image(_, dimensions, _, _), let .video(_, _, _, _, _, dimensions, _, _, _):
|
||||
return dimensions
|
||||
case let .asset(asset):
|
||||
return PixelDimensions(width: Int32(asset.pixelWidth), height: Int32(asset.pixelHeight))
|
||||
@ -2880,8 +2999,8 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
|
||||
switch self {
|
||||
case let .image(image, dimensions, _, _):
|
||||
return .image(image, dimensions)
|
||||
case let .video(videoPath, transitionImage, _, _, dimensions, duration, _, _):
|
||||
return .video(videoPath, transitionImage, dimensions, duration)
|
||||
case let .video(videoPath, transitionImage, mirror, additionalVideoPath, _, dimensions, duration, _, _):
|
||||
return .video(videoPath, transitionImage, mirror, additionalVideoPath, dimensions, duration)
|
||||
case let .asset(asset):
|
||||
return .asset(asset)
|
||||
case let .draft(draft, _):
|
||||
@ -2893,7 +3012,7 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
|
||||
switch self {
|
||||
case let .image(image, dimensions, _, _):
|
||||
return .image(image, dimensions)
|
||||
case let .video(videoPath, _, _, _, dimensions, _, _, _):
|
||||
case let .video(videoPath, _, _, _, _, dimensions, _, _, _):
|
||||
return .video(videoPath, dimensions)
|
||||
case let .asset(asset):
|
||||
return .asset(asset)
|
||||
@ -3156,18 +3275,6 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
|
||||
self?.presentTimeoutPremiumSuggestion(86400 * 2)
|
||||
}
|
||||
})))
|
||||
items.append(.action(ContextMenuActionItem(text: "Keep Always", icon: { theme in
|
||||
return currentArchived ? generateTintedImage(image: UIImage(bundleImageName: "Chat/Context Menu/Check"), color: theme.contextMenu.primaryColor) : nil
|
||||
}, action: { _, a in
|
||||
a(.default)
|
||||
|
||||
updateTimeout(86400, true)
|
||||
})))
|
||||
items.append(.separator)
|
||||
items.append(.action(ContextMenuActionItem(text: "Select 'Keep Always' to show the story on your page.", textLayout: .multiline, textFont: .small, icon: { theme in
|
||||
return nil
|
||||
}, action: { _, _ in
|
||||
})))
|
||||
|
||||
let presentationData = self.context.sharedContext.currentPresentationData.with({ $0 }).withUpdated(theme: defaultDarkPresentationTheme)
|
||||
let contextController = ContextController(account: self.context.account, presentationData: presentationData, source: .reference(HeaderContextReferenceContentSource(controller: self, sourceView: sourceView)), items: .single(ContextController.Items(content: .list(items))), gesture: nil)
|
||||
@ -3332,7 +3439,7 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
|
||||
switch subject {
|
||||
case let .image(image, dimensions, _, _):
|
||||
saveImageDraft(image, dimensions)
|
||||
case let .video(path, _, _, _, dimensions, _, _, _):
|
||||
case let .video(path, _, _, _, _, dimensions, _, _, _):
|
||||
saveVideoDraft(path, dimensions, duration)
|
||||
case let .asset(asset):
|
||||
if asset.mediaType == .video {
|
||||
@ -3425,7 +3532,7 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
|
||||
duration = 5.0
|
||||
|
||||
firstFrame = .single(image)
|
||||
case let .video(path, _, _, _, _, _, _, _):
|
||||
case let .video(path, _, _, _, _, _, _, _, _):
|
||||
videoResult = .videoFile(path: path)
|
||||
if let videoTrimRange = mediaEditor.values.videoTrimRange {
|
||||
duration = videoTrimRange.upperBound - videoTrimRange.lowerBound
|
||||
@ -3613,7 +3720,7 @@ public final class MediaEditorScreen: ViewController, UIDropInteractionDelegate
|
||||
|
||||
let exportSubject: Signal<MediaEditorVideoExport.Subject, NoError>
|
||||
switch subject {
|
||||
case let .video(path, _, _, _, _, _, _, _):
|
||||
case let .video(path, _, _, _, _, _, _, _, _):
|
||||
let asset = AVURLAsset(url: NSURL(fileURLWithPath: path) as URL)
|
||||
exportSubject = .single(.video(asset))
|
||||
case let .image(image, _, _, _):
|
||||
|
@ -554,7 +554,7 @@ private final class MediaToolsScreenComponent: Component {
|
||||
switch component.section {
|
||||
case .adjustments:
|
||||
self.curvesState = nil
|
||||
let tools: [AdjustmentTool] = [
|
||||
var tools: [AdjustmentTool] = [
|
||||
AdjustmentTool(
|
||||
key: .enhance,
|
||||
title: "Enhance",
|
||||
@ -627,14 +627,6 @@ private final class MediaToolsScreenComponent: Component {
|
||||
maxValue: 1.0,
|
||||
startValue: 0.0
|
||||
),
|
||||
AdjustmentTool(
|
||||
key: .grain,
|
||||
title: "Grain",
|
||||
value: mediaEditor?.getToolValue(.grain) as? Float ?? 0.0,
|
||||
minValue: 0.0,
|
||||
maxValue: 1.0,
|
||||
startValue: 0.0
|
||||
),
|
||||
AdjustmentTool(
|
||||
key: .sharpen,
|
||||
title: "Sharpen",
|
||||
@ -644,6 +636,18 @@ private final class MediaToolsScreenComponent: Component {
|
||||
startValue: 0.0
|
||||
)
|
||||
]
|
||||
|
||||
if !component.mediaEditor.sourceIsVideo {
|
||||
tools.insert(AdjustmentTool(
|
||||
key: .grain,
|
||||
title: "Grain",
|
||||
value: mediaEditor?.getToolValue(.grain) as? Float ?? 0.0,
|
||||
minValue: 0.0,
|
||||
maxValue: 1.0,
|
||||
startValue: 0.0
|
||||
), at: tools.count - 1)
|
||||
}
|
||||
|
||||
optionsSize = self.toolOptions.update(
|
||||
transition: optionsTransition,
|
||||
component: AnyComponent(AdjustmentsComponent(
|
||||
@ -814,7 +818,7 @@ private final class MediaToolsScreenComponent: Component {
|
||||
)
|
||||
),
|
||||
environment: {},
|
||||
containerSize: CGSize(width: previewContainerFrame.width, height: previewContainerFrame.height - optionsSize.height)
|
||||
containerSize: CGSize(width: previewContainerFrame.width, height: previewContainerFrame.height)
|
||||
)
|
||||
case .curves:
|
||||
needsHistogram = true
|
||||
|
@ -251,7 +251,7 @@ final class StoryPreviewComponent: Component {
|
||||
style: .story,
|
||||
placeholder: "Reply Privately...",
|
||||
alwaysDarkWhenHasText: false,
|
||||
nextInputMode: { _ in return nil },
|
||||
nextInputMode: { _ in return .stickers },
|
||||
areVoiceMessagesAvailable: false,
|
||||
presentController: { _ in
|
||||
},
|
||||
|
@ -232,6 +232,16 @@ public final class PeerListItemComponent: Component {
|
||||
self.component = component
|
||||
self.state = state
|
||||
|
||||
let labelData: (String, Bool)
|
||||
if let presence = component.presence {
|
||||
let timestamp = CFAbsoluteTimeGetCurrent() + NSTimeIntervalSince1970
|
||||
labelData = stringAndActivityForUserPresence(strings: component.strings, dateTimeFormat: PresentationDateTimeFormat(), presence: presence, relativeTo: Int32(timestamp))
|
||||
} else if let subtitle = component.subtitle {
|
||||
labelData = (subtitle, false)
|
||||
} else {
|
||||
labelData = ("", false)
|
||||
}
|
||||
|
||||
let contextInset: CGFloat = 0.0
|
||||
|
||||
let height: CGFloat
|
||||
@ -241,14 +251,17 @@ public final class PeerListItemComponent: Component {
|
||||
case .generic:
|
||||
titleFont = Font.semibold(17.0)
|
||||
subtitleFont = Font.regular(15.0)
|
||||
height = 60.0
|
||||
if labelData.0.isEmpty {
|
||||
height = 50.0
|
||||
} else {
|
||||
height = 60.0
|
||||
}
|
||||
case .compact:
|
||||
titleFont = Font.semibold(14.0)
|
||||
subtitleFont = Font.regular(14.0)
|
||||
height = 42.0
|
||||
}
|
||||
|
||||
|
||||
let verticalInset: CGFloat = 1.0
|
||||
var leftInset: CGFloat = 53.0 + component.sideInset
|
||||
if case .generic = component.style {
|
||||
@ -313,16 +326,6 @@ public final class PeerListItemComponent: Component {
|
||||
self.avatarNode.setPeer(context: component.context, theme: component.theme, peer: peer, clipStyle: clipStyle, synchronousLoad: synchronousLoad, displayDimensions: CGSize(width: avatarSize, height: avatarSize))
|
||||
}
|
||||
|
||||
let labelData: (String, Bool)
|
||||
if let presence = component.presence {
|
||||
let timestamp = CFAbsoluteTimeGetCurrent() + NSTimeIntervalSince1970
|
||||
labelData = stringAndActivityForUserPresence(strings: component.strings, dateTimeFormat: PresentationDateTimeFormat(), presence: presence, relativeTo: Int32(timestamp))
|
||||
} else if let subtitle = component.subtitle {
|
||||
labelData = (subtitle, false)
|
||||
} else {
|
||||
labelData = ("", false)
|
||||
}
|
||||
|
||||
let labelSize = self.label.update(
|
||||
transition: .immediate,
|
||||
component: AnyComponent(MultilineTextComponent(
|
||||
|
@ -1948,7 +1948,7 @@ public final class StoryItemSetContainerComponent: Component {
|
||||
})))
|
||||
|
||||
if component.slice.item.storyItem.isPublic && (component.slice.peer.addressName != nil || !component.slice.peer._asPeer().usernames.isEmpty) {
|
||||
items.append(.action(ContextMenuActionItem(text: "Copy link", icon: { theme in
|
||||
items.append(.action(ContextMenuActionItem(text: "Copy Link", icon: { theme in
|
||||
return generateTintedImage(image: UIImage(bundleImageName: "Chat/Context Menu/Link"), color: theme.contextMenu.primaryColor)
|
||||
}, action: { [weak self] _, a in
|
||||
a(.default)
|
||||
@ -2819,7 +2819,7 @@ public final class StoryItemSetContainerComponent: Component {
|
||||
}
|
||||
return .single(nil)
|
||||
|> then(
|
||||
.single(.video(symlinkPath, nil, nil, nil, PixelDimensions(width: 720, height: 1280), duration ?? 0.0, [], .bottomRight))
|
||||
.single(.video(symlinkPath, nil, false, nil, nil, PixelDimensions(width: 720, height: 1280), duration ?? 0.0, [], .bottomRight))
|
||||
|> delay(0.1, queue: Queue.mainQueue())
|
||||
)
|
||||
}
|
||||
|
@ -219,6 +219,8 @@ public func fetchVideoLibraryMediaResource(account: Account, resource: VideoLibr
|
||||
|
||||
let alreadyReceivedAsset = Atomic<Bool>(value: false)
|
||||
if asset.mediaType == .image {
|
||||
Logger.shared.log("FetchVideoResource", "Getting asset image \(asset.localIdentifier)")
|
||||
|
||||
let options = PHImageRequestOptions()
|
||||
options.isNetworkAccessAllowed = true
|
||||
options.deliveryMode = .highQualityFormat
|
||||
@ -230,6 +232,8 @@ public func fetchVideoLibraryMediaResource(account: Account, resource: VideoLibr
|
||||
return
|
||||
}
|
||||
|
||||
Logger.shared.log("FetchVideoResource", "Got asset image \(asset.localIdentifier)")
|
||||
|
||||
var mediaEditorValues: MediaEditorValues?
|
||||
if case let .compress(adjustmentsValue) = resource.conversion, let adjustmentsValue, adjustmentsValue.isStory {
|
||||
if let values = try? JSONDecoder().decode(MediaEditorValues.self, from: adjustmentsValue.data.makeData()) {
|
||||
@ -241,10 +245,12 @@ public func fetchVideoLibraryMediaResource(account: Account, resource: VideoLibr
|
||||
let tempFile = EngineTempBox.shared.tempFile(fileName: "video.mp4")
|
||||
let updatedSize = Atomic<Int64>(value: 0)
|
||||
if let mediaEditorValues {
|
||||
Logger.shared.log("FetchVideoResource", "Requesting video export")
|
||||
|
||||
let configuration = recommendedVideoExportConfiguration(values: mediaEditorValues, frameRate: 30.0)
|
||||
let videoExport = MediaEditorVideoExport(account: account, subject: .image(image), configuration: configuration, outputPath: tempFile.path)
|
||||
videoExport.start()
|
||||
|
||||
|
||||
let statusDisposable = videoExport.status.start(next: { status in
|
||||
switch status {
|
||||
case .completed:
|
||||
|
@ -419,7 +419,7 @@ private final class PeerInfoScreenLabeledValueItemNode: PeerInfoScreenItemNode {
|
||||
if enabledEntities.isEmpty {
|
||||
return NSAttributedString(string: text, font: Font.regular(17.0), textColor: textColorValue)
|
||||
} else {
|
||||
let fontSize: CGFloat = 17.0
|
||||
let fontSize: CGFloat = 16.0
|
||||
|
||||
let baseFont = Font.regular(fontSize)
|
||||
let linkFont = baseFont
|
||||
|
@ -125,6 +125,14 @@ public final class TelegramRootController: NavigationController, TelegramRootCon
|
||||
self.applicationInFocusDisposable?.dispose()
|
||||
}
|
||||
|
||||
public func getContactsController() -> ViewController? {
|
||||
return self.contactsController
|
||||
}
|
||||
|
||||
public func getChatsController() -> ViewController? {
|
||||
return self.chatListController
|
||||
}
|
||||
|
||||
override public func containerLayoutUpdated(_ layout: ContainerViewLayout, transition: ContainedViewLayoutTransition) {
|
||||
let needsRootWallpaperBackgroundNode: Bool
|
||||
if case .regular = layout.metrics.widthClass {
|
||||
@ -303,10 +311,10 @@ public final class TelegramRootController: NavigationController, TelegramRootCon
|
||||
switch value {
|
||||
case .pendingImage:
|
||||
return nil
|
||||
case let .image(image, additionalImage, pipPosition):
|
||||
return .image(image, PixelDimensions(image.size), additionalImage, editorPIPPosition(pipPosition))
|
||||
case let .video(path, transitionImage, additionalPath, additionalTransitionImage, dimensions, duration, positionChangeTimestamps, pipPosition):
|
||||
return .video(path, transitionImage, additionalPath, additionalTransitionImage, dimensions, duration, positionChangeTimestamps, editorPIPPosition(pipPosition))
|
||||
case let .image(image):
|
||||
return .image(image.image, PixelDimensions(image.image.size), image.additionalImage, editorPIPPosition(image.additionalImagePosition))
|
||||
case let .video(video):
|
||||
return .video(video.videoPath, video.coverImage, video.mirror, video.additionalVideoPath, video.additionalCoverImage, video.dimensions, video.duration, video.positionChangeTimestamps, editorPIPPosition(video.additionalVideoPosition))
|
||||
case let .asset(asset):
|
||||
return .asset(asset)
|
||||
case let .draft(draft):
|
||||
|
Loading…
x
Reference in New Issue
Block a user