diff --git a/Telegram/BroadcastUpload/BroadcastUploadExtension.swift b/Telegram/BroadcastUpload/BroadcastUploadExtension.swift index 79733d96b9..e2ea000ca3 100644 --- a/Telegram/BroadcastUpload/BroadcastUploadExtension.swift +++ b/Telegram/BroadcastUpload/BroadcastUploadExtension.swift @@ -336,7 +336,7 @@ private final class EmbeddedBroadcastUploadImpl: BroadcastUploadImpl { let logsPath = rootPath + "/logs/broadcast-logs" let _ = try? FileManager.default.createDirectory(atPath: logsPath, withIntermediateDirectories: true, attributes: nil) - let embeddedBroadcastImplementationTypePath = rootPath + "/broadcast-coordination-type" + let embeddedBroadcastImplementationTypePath = rootPath + "/broadcast-coordination-type-v2" var useIPCContext = false if let typeData = try? Data(contentsOf: URL(fileURLWithPath: embeddedBroadcastImplementationTypePath)), let type = String(data: typeData, encoding: .utf8) { diff --git a/Tests/CallUITest/Sources/ViewController.swift b/Tests/CallUITest/Sources/ViewController.swift index 3178bdc66b..40e0e609fc 100644 --- a/Tests/CallUITest/Sources/ViewController.swift +++ b/Tests/CallUITest/Sources/ViewController.swift @@ -35,7 +35,8 @@ public final class ViewController: UIViewController { isRemoteAudioMuted: false, localVideo: nil, remoteVideo: nil, - isRemoteBatteryLow: false + isRemoteBatteryLow: false, + enableVideoSharpening: false ) private var currentLayout: (size: CGSize, insets: UIEdgeInsets)? diff --git a/submodules/TelegramCallsUI/Sources/CallControllerNodeV2.swift b/submodules/TelegramCallsUI/Sources/CallControllerNodeV2.swift index f66ebcf25b..db19e00c36 100644 --- a/submodules/TelegramCallsUI/Sources/CallControllerNodeV2.swift +++ b/submodules/TelegramCallsUI/Sources/CallControllerNodeV2.swift @@ -166,6 +166,11 @@ final class CallControllerNodeV2: ViewControllerTracingNode, CallControllerNodeP } self.conferenceAddParticipant?() } + + var enableVideoSharpening = true + if let data = call.context.currentAppConfiguration.with({ $0 }).data, let value = data["ios_call_video_sharpening"] as? Double { + enableVideoSharpening = value != 0.0 + } self.callScreenState = PrivateCallScreen.State( strings: presentationData.strings, @@ -180,7 +185,8 @@ final class CallControllerNodeV2: ViewControllerTracingNode, CallControllerNodeP remoteVideo: nil, isRemoteBatteryLow: false, isEnergySavingEnabled: !self.sharedContext.energyUsageSettings.fullTranslucency, - isConferencePossible: false + isConferencePossible: false, + enableVideoSharpening: enableVideoSharpening ) self.isMicrophoneMutedDisposable = (call.isMuted diff --git a/submodules/TelegramCallsUI/Sources/PresentationGroupCall.swift b/submodules/TelegramCallsUI/Sources/PresentationGroupCall.swift index 16b967d677..8637bc87fc 100644 --- a/submodules/TelegramCallsUI/Sources/PresentationGroupCall.swift +++ b/submodules/TelegramCallsUI/Sources/PresentationGroupCall.swift @@ -1160,7 +1160,7 @@ public final class PresentationGroupCallImpl: PresentationGroupCall { useIPCContext = value != 0.0 } - let embeddedBroadcastImplementationTypePath = self.accountContext.sharedContext.basePath + "/broadcast-coordination-type" + let embeddedBroadcastImplementationTypePath = self.accountContext.sharedContext.basePath + "/broadcast-coordination-type-v2" let screencastIPCContext: ScreencastIPCContext if useIPCContext { diff --git a/submodules/TelegramCallsUI/Sources/VideoChatEncryptionKeyComponent.swift b/submodules/TelegramCallsUI/Sources/VideoChatEncryptionKeyComponent.swift index 83abd095f9..edea3aeb9b 100644 --- a/submodules/TelegramCallsUI/Sources/VideoChatEncryptionKeyComponent.swift +++ b/submodules/TelegramCallsUI/Sources/VideoChatEncryptionKeyComponent.swift @@ -7,6 +7,7 @@ import BalancedTextComponent import TelegramPresentationData import CallsEmoji import ImageBlur +import HierarchyTrackingLayer private final class EmojiContainerView: UIView { private let maskImageView: UIImageView? @@ -207,6 +208,7 @@ private final class EmojiItemComponent: Component { } final class View: UIView { + private let hierarchyTrackingLayer: HierarchyTrackingLayer private let containerView: EmojiContainerView private let measureEmojiView = ComponentView() private var pendingContainerView: EmojiContainerView? @@ -219,11 +221,22 @@ private final class EmojiItemComponent: Component { private var pendingEmojiValues: [String]? override init(frame: CGRect) { + self.hierarchyTrackingLayer = HierarchyTrackingLayer() self.containerView = EmojiContainerView(hasMask: true) super.init(frame: frame) + self.layer.addSublayer(self.hierarchyTrackingLayer) self.addSubview(self.containerView) + + self.hierarchyTrackingLayer.isInHierarchyUpdated = { [weak self] value in + guard let self else { + return + } + if value { + self.state?.updated(transition: .immediate) + } + } } required init?(coder: NSCoder) { diff --git a/submodules/TelegramCallsUI/Sources/VideoChatExpandedParticipantThumbnailsComponent.swift b/submodules/TelegramCallsUI/Sources/VideoChatExpandedParticipantThumbnailsComponent.swift index 8729fa24a4..600c22dd9a 100644 --- a/submodules/TelegramCallsUI/Sources/VideoChatExpandedParticipantThumbnailsComponent.swift +++ b/submodules/TelegramCallsUI/Sources/VideoChatExpandedParticipantThumbnailsComponent.swift @@ -275,7 +275,7 @@ final class VideoChatParticipantThumbnailComponent: Component { if let current = self.videoLayer { videoLayer = current } else { - videoLayer = PrivateCallVideoLayer() + videoLayer = PrivateCallVideoLayer(enableSharpening: false) self.videoLayer = videoLayer self.extractedContainerView.contentView.layer.insertSublayer(videoLayer.blurredLayer, above: videoBackgroundLayer) self.extractedContainerView.contentView.layer.insertSublayer(videoLayer, above: videoLayer.blurredLayer) diff --git a/submodules/TelegramCallsUI/Sources/VideoChatParticipantVideoComponent.swift b/submodules/TelegramCallsUI/Sources/VideoChatParticipantVideoComponent.swift index 25835f0790..ec362d6601 100644 --- a/submodules/TelegramCallsUI/Sources/VideoChatParticipantVideoComponent.swift +++ b/submodules/TelegramCallsUI/Sources/VideoChatParticipantVideoComponent.swift @@ -51,6 +51,7 @@ final class VideoChatParticipantVideoComponent: Component { let contentInsets: UIEdgeInsets let controlInsets: UIEdgeInsets let interfaceOrientation: UIInterfaceOrientation + let enableVideoSharpening: Bool let action: (() -> Void)? let contextAction: ((EnginePeer, ContextExtractedContentContainingView, ContextGesture) -> Void)? let activatePinch: ((PinchSourceContainerNode) -> Void)? @@ -70,6 +71,7 @@ final class VideoChatParticipantVideoComponent: Component { contentInsets: UIEdgeInsets, controlInsets: UIEdgeInsets, interfaceOrientation: UIInterfaceOrientation, + enableVideoSharpening: Bool, action: (() -> Void)?, contextAction: ((EnginePeer, ContextExtractedContentContainingView, ContextGesture) -> Void)?, activatePinch: ((PinchSourceContainerNode) -> Void)?, @@ -88,6 +90,7 @@ final class VideoChatParticipantVideoComponent: Component { self.contentInsets = contentInsets self.controlInsets = controlInsets self.interfaceOrientation = interfaceOrientation + self.enableVideoSharpening = enableVideoSharpening self.action = action self.contextAction = contextAction self.activatePinch = activatePinch @@ -128,6 +131,9 @@ final class VideoChatParticipantVideoComponent: Component { if lhs.interfaceOrientation != rhs.interfaceOrientation { return false } + if lhs.enableVideoSharpening != rhs.enableVideoSharpening { + return false + } if (lhs.action == nil) != (rhs.action == nil) { return false } @@ -525,7 +531,7 @@ final class VideoChatParticipantVideoComponent: Component { resetVideoSource = true } } else { - videoLayer = PrivateCallVideoLayer() + videoLayer = PrivateCallVideoLayer(enableSharpening: component.enableVideoSharpening) self.videoLayer = videoLayer videoLayer.opacity = 0.0 self.pinchContainerNode.contentNode.view.layer.insertSublayer(videoLayer.blurredLayer, above: videoBackgroundLayer) diff --git a/submodules/TelegramCallsUI/Sources/VideoChatParticipantsComponent.swift b/submodules/TelegramCallsUI/Sources/VideoChatParticipantsComponent.swift index 54bc0d4a64..7a3eb86cd7 100644 --- a/submodules/TelegramCallsUI/Sources/VideoChatParticipantsComponent.swift +++ b/submodules/TelegramCallsUI/Sources/VideoChatParticipantsComponent.swift @@ -152,6 +152,7 @@ final class VideoChatParticipantsComponent: Component { let expandedInsets: UIEdgeInsets let safeInsets: UIEdgeInsets let interfaceOrientation: UIInterfaceOrientation + let enableVideoSharpening: Bool let openParticipantContextMenu: (EnginePeer.Id, ContextExtractedContentContainingView, ContextGesture?) -> Void let openInvitedParticipantContextMenu: (EnginePeer.Id, ContextExtractedContentContainingView, ContextGesture?) -> Void let updateMainParticipant: (VideoParticipantKey?, Bool?) -> Void @@ -173,6 +174,7 @@ final class VideoChatParticipantsComponent: Component { expandedInsets: UIEdgeInsets, safeInsets: UIEdgeInsets, interfaceOrientation: UIInterfaceOrientation, + enableVideoSharpening: Bool, openParticipantContextMenu: @escaping (EnginePeer.Id, ContextExtractedContentContainingView, ContextGesture?) -> Void, openInvitedParticipantContextMenu: @escaping (EnginePeer.Id, ContextExtractedContentContainingView, ContextGesture?) -> Void, updateMainParticipant: @escaping (VideoParticipantKey?, Bool?) -> Void, @@ -193,6 +195,7 @@ final class VideoChatParticipantsComponent: Component { self.expandedInsets = expandedInsets self.safeInsets = safeInsets self.interfaceOrientation = interfaceOrientation + self.enableVideoSharpening = enableVideoSharpening self.openParticipantContextMenu = openParticipantContextMenu self.openInvitedParticipantContextMenu = openInvitedParticipantContextMenu self.updateMainParticipant = updateMainParticipant @@ -239,6 +242,9 @@ final class VideoChatParticipantsComponent: Component { if lhs.interfaceOrientation != rhs.interfaceOrientation { return false } + if lhs.enableVideoSharpening != rhs.enableVideoSharpening { + return false + } return true } @@ -1074,6 +1080,7 @@ final class VideoChatParticipantsComponent: Component { contentInsets: itemContentInsets, controlInsets: itemControlInsets, interfaceOrientation: component.interfaceOrientation, + enableVideoSharpening: component.enableVideoSharpening, action: { [weak self] in guard let self, let component = self.component else { return diff --git a/submodules/TelegramCallsUI/Sources/VideoChatScreen.swift b/submodules/TelegramCallsUI/Sources/VideoChatScreen.swift index 3f2216a746..d65a9c0fdd 100644 --- a/submodules/TelegramCallsUI/Sources/VideoChatScreen.swift +++ b/submodules/TelegramCallsUI/Sources/VideoChatScreen.swift @@ -234,6 +234,8 @@ final class VideoChatScreenComponent: Component { let participants = ComponentView() var scheduleInfo: ComponentView? + + var enableVideoSharpening: Bool = false var reconnectedAsEventsDisposable: Disposable? var memberEventsDisposable: Disposable? @@ -1244,6 +1246,11 @@ final class VideoChatScreenComponent: Component { self.invitedPeers.removeAll(where: { invitedPeer in members.participants.contains(where: { $0.id == .peer(invitedPeer.peer.id) }) }) } self.callState = component.initialData.callState + + self.enableVideoSharpening = true + if let data = component.initialCall.accountContext.currentAppConfiguration.with({ $0 }).data, let value = data["ios_call_video_sharpening"] as? Double { + self.enableVideoSharpening = value != 0.0 + } } var call: VideoChatCall @@ -1359,7 +1366,7 @@ final class VideoChatScreenComponent: Component { return false } if participant.videoDescription != nil || participant.presentationDescription != nil { - if let participantPeer = participant.peer, members.speakingParticipants.contains(participantPeer.id) { + if let participantPeer = participant.peer, participantPeer.id != groupCall.accountContext.account.peerId, members.speakingParticipants.contains(participantPeer.id) { return true } } @@ -1421,7 +1428,7 @@ final class VideoChatScreenComponent: Component { var speakingParticipantPeers: [EnginePeer] = [] if let members, !members.speakingParticipants.isEmpty { for participant in members.participants { - if let participantPeer = participant.peer, members.speakingParticipants.contains(participantPeer.id) { + if let participantPeer = participant.peer, participantPeer.id != groupCall.accountContext.account.peerId, members.speakingParticipants.contains(participantPeer.id) { speakingParticipantPeers.append(participantPeer) } } @@ -1698,7 +1705,7 @@ final class VideoChatScreenComponent: Component { return false } if participant.videoDescription != nil || participant.presentationDescription != nil { - if let participantPeer = participant.peer, members.speakingParticipants.contains(participantPeer.id) { + if let participantPeer = participant.peer, participantPeer.id != conferenceSource.context.account.peerId, members.speakingParticipants.contains(participantPeer.id) { return true } } @@ -1760,7 +1767,7 @@ final class VideoChatScreenComponent: Component { var speakingParticipantPeers: [EnginePeer] = [] if !members.speakingParticipants.isEmpty { for participant in members.participants { - if let participantPeer = participant.peer, members.speakingParticipants.contains(participantPeer.id) { + if let participantPeer = participant.peer, participantPeer.id != conferenceSource.context.account.peerId, members.speakingParticipants.contains(participantPeer.id) { speakingParticipantPeers.append(participantPeer) } } @@ -2501,6 +2508,7 @@ final class VideoChatScreenComponent: Component { expandedInsets: participantsExpandedInsets, safeInsets: participantsSafeInsets, interfaceOrientation: environment.orientation ?? .portrait, + enableVideoSharpening: self.enableVideoSharpening, openParticipantContextMenu: { [weak self] id, sourceView, gesture in guard let self else { return diff --git a/submodules/TelegramUI/Components/Calls/CallScreen/Sources/Components/PrivateCallVideoLayer.swift b/submodules/TelegramUI/Components/Calls/CallScreen/Sources/Components/PrivateCallVideoLayer.swift index 7a4cce7027..5a954acc94 100644 --- a/submodules/TelegramUI/Components/Calls/CallScreen/Sources/Components/PrivateCallVideoLayer.swift +++ b/submodules/TelegramUI/Components/Calls/CallScreen/Sources/Components/PrivateCallVideoLayer.swift @@ -5,6 +5,21 @@ import MetalPerformanceShaders import Accelerate import MetalEngine +private func makeSharpenKernel(device: MTLDevice, sharpeningStrength: Float) -> MPSImageConvolution { + let centerWeight = 1.0 + 6.0 * sharpeningStrength + let adjacentWeight = -1.0 * sharpeningStrength + let diagonalWeight = -0.5 * sharpeningStrength + + let sharpenWeights: [Float] = [ + diagonalWeight, adjacentWeight, diagonalWeight, + adjacentWeight, centerWeight, adjacentWeight, + diagonalWeight, adjacentWeight, diagonalWeight + ] + let result = MPSImageConvolution(device: device, kernelWidth: 3, kernelHeight: 3, weights: sharpenWeights) + result.edgeMode = .clamp + return result +} + public final class PrivateCallVideoLayer: MetalEngineSubjectLayer, MetalEngineSubject { public var internalData: MetalEngineSubjectInternalData? @@ -16,6 +31,9 @@ public final class PrivateCallVideoLayer: MetalEngineSubjectLayer, MetalEngineSu let computePipelineStateHorizontal: MTLComputePipelineState let computePipelineStateVertical: MTLComputePipelineState let downscaleKernel: MPSImageBilinearScale + + var sharpeningStrength: Float = 0.0 + var sharpenKernel: MPSImageConvolution required init?(device: MTLDevice) { guard let library = metalLibrary(device: device) else { @@ -52,6 +70,14 @@ public final class PrivateCallVideoLayer: MetalEngineSubjectLayer, MetalEngineSu self.computePipelineStateVertical = computePipelineStateVertical self.downscaleKernel = MPSImageBilinearScale(device: device) + + self.sharpeningStrength = 1.4 + self.sharpenKernel = makeSharpenKernel(device: device, sharpeningStrength: self.sharpeningStrength) + } + + func updateSharpeningStrength(device: MTLDevice, sharpeningStrength: Float) { + self.sharpeningStrength = sharpeningStrength + self.sharpenKernel = makeSharpenKernel(device: device, sharpeningStrength: self.sharpeningStrength) } } @@ -82,21 +108,26 @@ public final class PrivateCallVideoLayer: MetalEngineSubjectLayer, MetalEngineSu self.setNeedsUpdate() } } + + private let enableSharpening: Bool public var renderSpec: RenderLayerSpec? private var rgbaTexture: PooledTexture? + private var sharpenedTexture: PooledTexture? private var downscaledTexture: PooledTexture? private var blurredHorizontalTexture: PooledTexture? private var blurredVerticalTexture: PooledTexture? - override public init() { + public init(enableSharpening: Bool) { + self.enableSharpening = enableSharpening self.blurredLayer = MetalEngineSubjectLayer() super.init() } override public init(layer: Any) { + self.enableSharpening = false self.blurredLayer = MetalEngineSubjectLayer() super.init(layer: layer) @@ -121,6 +152,9 @@ public final class PrivateCallVideoLayer: MetalEngineSubjectLayer, MetalEngineSu if self.rgbaTexture == nil || self.rgbaTexture?.spec != rgbaTextureSpec { self.rgbaTexture = MetalEngine.shared.pooledTexture(spec: rgbaTextureSpec) } + if self.sharpenedTexture == nil || self.sharpenedTexture?.spec != rgbaTextureSpec { + self.sharpenedTexture = MetalEngine.shared.pooledTexture(spec: rgbaTextureSpec) + } if self.downscaledTexture == nil { self.downscaledTexture = MetalEngine.shared.pooledTexture(spec: TextureSpec(width: 128, height: 128, pixelFormat: .rgba8UnsignedNormalized)) } @@ -134,35 +168,90 @@ public final class PrivateCallVideoLayer: MetalEngineSubjectLayer, MetalEngineSu guard let rgbaTexture = self.rgbaTexture?.get(context: context) else { return } + + var outputTexture = rgbaTexture + + var sharpenedTexture: TexturePlaceholder? + if self.enableSharpening && rgbaTextureSpec.width * rgbaTextureSpec.height >= 800 * 480 { + sharpenedTexture = self.sharpenedTexture?.get(context: context) + if let sharpenedTexture { + outputTexture = sharpenedTexture + } + } - let _ = context.compute(state: BlurState.self, inputs: rgbaTexture.placeholer, commands: { commandBuffer, blurState, rgbaTexture in - guard let rgbaTexture else { - return - } - guard let computeEncoder = commandBuffer.makeComputeCommandEncoder() else { - return - } - - let threadgroupSize = MTLSize(width: 16, height: 16, depth: 1) - let threadgroupCount = MTLSize(width: (rgbaTexture.width + threadgroupSize.width - 1) / threadgroupSize.width, height: (rgbaTexture.height + threadgroupSize.height - 1) / threadgroupSize.height, depth: 1) - - switch videoTextures.textureLayout { - case let .biPlanar(biPlanar): - computeEncoder.setComputePipelineState(blurState.computePipelineStateYUVBiPlanarToRGBA) - computeEncoder.setTexture(biPlanar.y, index: 0) - computeEncoder.setTexture(biPlanar.uv, index: 1) - computeEncoder.setTexture(rgbaTexture, index: 2) - case let .triPlanar(triPlanar): - computeEncoder.setComputePipelineState(blurState.computePipelineStateYUVTriPlanarToRGBA) - computeEncoder.setTexture(triPlanar.y, index: 0) - computeEncoder.setTexture(triPlanar.u, index: 1) - computeEncoder.setTexture(triPlanar.u, index: 2) - computeEncoder.setTexture(rgbaTexture, index: 3) - } - computeEncoder.dispatchThreadgroups(threadgroupCount, threadsPerThreadgroup: threadgroupSize) - - computeEncoder.endEncoding() - }) + if let sharpenedTexture { + let _ = context.compute(state: BlurState.self, inputs: rgbaTexture.placeholer, sharpenedTexture.placeholer, commands: { commandBuffer, blurState, rgbaTexture, sharpenedTexture in + guard let rgbaTexture else { + return + } + guard let sharpenedTexture else { + return + } + + do { + guard let computeEncoder = commandBuffer.makeComputeCommandEncoder() else { + return + } + + let threadgroupSize = MTLSize(width: 16, height: 16, depth: 1) + let threadgroupCount = MTLSize(width: (rgbaTexture.width + threadgroupSize.width - 1) / threadgroupSize.width, height: (rgbaTexture.height + threadgroupSize.height - 1) / threadgroupSize.height, depth: 1) + + switch videoTextures.textureLayout { + case let .biPlanar(biPlanar): + computeEncoder.setComputePipelineState(blurState.computePipelineStateYUVBiPlanarToRGBA) + computeEncoder.setTexture(biPlanar.y, index: 0) + computeEncoder.setTexture(biPlanar.uv, index: 1) + computeEncoder.setTexture(rgbaTexture, index: 2) + case let .triPlanar(triPlanar): + computeEncoder.setComputePipelineState(blurState.computePipelineStateYUVTriPlanarToRGBA) + computeEncoder.setTexture(triPlanar.y, index: 0) + computeEncoder.setTexture(triPlanar.u, index: 1) + computeEncoder.setTexture(triPlanar.u, index: 2) + computeEncoder.setTexture(rgbaTexture, index: 3) + } + computeEncoder.dispatchThreadgroups(threadgroupCount, threadsPerThreadgroup: threadgroupSize) + + computeEncoder.endEncoding() + } + + do { + + blurState.sharpenKernel.encode(commandBuffer: commandBuffer, sourceTexture: rgbaTexture, destinationTexture: sharpenedTexture) + } + }) + } else { + let _ = context.compute(state: BlurState.self, inputs: rgbaTexture.placeholer, commands: { commandBuffer, blurState, rgbaTexture in + guard let rgbaTexture else { + return + } + + do { + guard let computeEncoder = commandBuffer.makeComputeCommandEncoder() else { + return + } + + let threadgroupSize = MTLSize(width: 16, height: 16, depth: 1) + let threadgroupCount = MTLSize(width: (rgbaTexture.width + threadgroupSize.width - 1) / threadgroupSize.width, height: (rgbaTexture.height + threadgroupSize.height - 1) / threadgroupSize.height, depth: 1) + + switch videoTextures.textureLayout { + case let .biPlanar(biPlanar): + computeEncoder.setComputePipelineState(blurState.computePipelineStateYUVBiPlanarToRGBA) + computeEncoder.setTexture(biPlanar.y, index: 0) + computeEncoder.setTexture(biPlanar.uv, index: 1) + computeEncoder.setTexture(rgbaTexture, index: 2) + case let .triPlanar(triPlanar): + computeEncoder.setComputePipelineState(blurState.computePipelineStateYUVTriPlanarToRGBA) + computeEncoder.setTexture(triPlanar.y, index: 0) + computeEncoder.setTexture(triPlanar.u, index: 1) + computeEncoder.setTexture(triPlanar.u, index: 2) + computeEncoder.setTexture(rgbaTexture, index: 3) + } + computeEncoder.dispatchThreadgroups(threadgroupCount, threadsPerThreadgroup: threadgroupSize) + + computeEncoder.endEncoding() + } + }) + } if !self.blurredLayer.isHidden { guard let downscaledTexture = self.downscaledTexture?.get(context: context), let blurredHorizontalTexture = self.blurredHorizontalTexture?.get(context: context), let blurredVerticalTexture = self.blurredVerticalTexture?.get(context: context) else { @@ -228,8 +317,8 @@ public final class PrivateCallVideoLayer: MetalEngineSubjectLayer, MetalEngineSu }) } - context.renderToLayer(spec: renderSpec, state: RenderState.self, layer: self, inputs: rgbaTexture.placeholer, commands: { encoder, placement, rgbaTexture in - guard let rgbaTexture else { + context.renderToLayer(spec: renderSpec, state: RenderState.self, layer: self, inputs: outputTexture.placeholer, commands: { encoder, placement, outputTexture in + guard let outputTexture else { return } @@ -244,7 +333,7 @@ public final class PrivateCallVideoLayer: MetalEngineSubjectLayer, MetalEngineSu ) encoder.setVertexBytes(&mirror, length: 2 * 4, index: 1) - encoder.setFragmentTexture(rgbaTexture, index: 0) + encoder.setFragmentTexture(outputTexture, index: 0) var brightness: Float = 1.0 var saturation: Float = 1.0 diff --git a/submodules/TelegramUI/Components/Calls/CallScreen/Sources/Components/VideoContainerView.swift b/submodules/TelegramUI/Components/Calls/CallScreen/Sources/Components/VideoContainerView.swift index 4eb3db1ff5..f444cf3182 100644 --- a/submodules/TelegramUI/Components/Calls/CallScreen/Sources/Components/VideoContainerView.swift +++ b/submodules/TelegramUI/Components/Calls/CallScreen/Sources/Components/VideoContainerView.swift @@ -128,6 +128,7 @@ final class VideoContainerView: HighlightTrackingButton { } let key: Key + let enableSharpening: Bool let videoContainerLayer: VideoContainerLayer var videoContainerLayerTaken: Bool = false @@ -211,8 +212,9 @@ final class VideoContainerView: HighlightTrackingButton { var pressAction: (() -> Void)? - init(key: Key) { + init(key: Key, enableSharpening: Bool) { self.key = key + self.enableSharpening = enableSharpening self.videoContainerLayer = VideoContainerLayer() self.videoContainerLayer.backgroundColor = nil @@ -223,7 +225,7 @@ final class VideoContainerView: HighlightTrackingButton { self.videoContainerLayer.contentsLayer.cornerCurve = .circular } - self.videoLayer = PrivateCallVideoLayer() + self.videoLayer = PrivateCallVideoLayer(enableSharpening: self.enableSharpening) self.videoLayer.masksToBounds = true self.videoLayer.isDoubleSided = false if #available(iOS 13.0, *) { @@ -454,7 +456,7 @@ final class VideoContainerView: HighlightTrackingButton { let previousVideoLayer = self.videoLayer self.disappearingVideoLayer = DisappearingVideo(flipAnimationInfo: flipAnimationInfo, videoLayer: self.videoLayer, videoMetrics: videoMetrics) - self.videoLayer = PrivateCallVideoLayer() + self.videoLayer = PrivateCallVideoLayer(enableSharpening: self.enableSharpening) self.videoLayer.opacity = previousVideoLayer.opacity self.videoLayer.masksToBounds = true self.videoLayer.isDoubleSided = false diff --git a/submodules/TelegramUI/Components/Calls/CallScreen/Sources/PrivateCallScreen.swift b/submodules/TelegramUI/Components/Calls/CallScreen/Sources/PrivateCallScreen.swift index 3927200df2..86d49a667c 100644 --- a/submodules/TelegramUI/Components/Calls/CallScreen/Sources/PrivateCallScreen.swift +++ b/submodules/TelegramUI/Components/Calls/CallScreen/Sources/PrivateCallScreen.swift @@ -81,6 +81,7 @@ public final class PrivateCallScreen: OverlayMaskContainerView, AVPictureInPictu public var isRemoteBatteryLow: Bool public var isEnergySavingEnabled: Bool public var isConferencePossible: Bool + public var enableVideoSharpening: Bool public init( strings: PresentationStrings, @@ -95,7 +96,8 @@ public final class PrivateCallScreen: OverlayMaskContainerView, AVPictureInPictu remoteVideo: VideoSource?, isRemoteBatteryLow: Bool, isEnergySavingEnabled: Bool, - isConferencePossible: Bool + isConferencePossible: Bool, + enableVideoSharpening: Bool ) { self.strings = strings self.lifecycleState = lifecycleState @@ -110,6 +112,7 @@ public final class PrivateCallScreen: OverlayMaskContainerView, AVPictureInPictu self.isRemoteBatteryLow = isRemoteBatteryLow self.isEnergySavingEnabled = isEnergySavingEnabled self.isConferencePossible = isConferencePossible + self.enableVideoSharpening = enableVideoSharpening } public static func ==(lhs: State, rhs: State) -> Bool { @@ -152,6 +155,9 @@ public final class PrivateCallScreen: OverlayMaskContainerView, AVPictureInPictu if lhs.isConferencePossible != rhs.isConferencePossible { return false } + if lhs.enableVideoSharpening != rhs.enableVideoSharpening { + return false + } return true } } @@ -994,7 +1000,7 @@ public final class PrivateCallScreen: OverlayMaskContainerView, AVPictureInPictu videoContainerView = current } else { animateIn = true - videoContainerView = VideoContainerView(key: videoContainerKey) + videoContainerView = VideoContainerView(key: videoContainerKey, enableSharpening: params.state.enableVideoSharpening) switch videoContainerKey { case .foreground: self.overlayContentsView.layer.addSublayer(videoContainerView.blurredContainerLayer) diff --git a/submodules/TgVoipWebrtc/Sources/OngoingCallThreadLocalContext.mm b/submodules/TgVoipWebrtc/Sources/OngoingCallThreadLocalContext.mm index 00025f77a2..1ceee49216 100644 --- a/submodules/TgVoipWebrtc/Sources/OngoingCallThreadLocalContext.mm +++ b/submodules/TgVoipWebrtc/Sources/OngoingCallThreadLocalContext.mm @@ -1631,7 +1631,6 @@ static void (*InternalVoipLoggingFunction)(NSString *) = NULL; static dispatch_once_t onceToken; dispatch_once(&onceToken, ^{ tgcalls::Register(); - //tgcalls::Register(); tgcalls::Register(); tgcalls::Register(); });