mirror of
https://github.com/Swiftgram/Telegram-iOS.git
synced 2025-06-16 05:55:20 +00:00
Implement Metal rendering
This commit is contained in:
parent
9941c499c6
commit
8485b10918
@ -170,7 +170,7 @@ private final class TimeBasedCleanupImpl {
|
|||||||
let generalPaths = self.generalPaths
|
let generalPaths = self.generalPaths
|
||||||
let shortLivedPaths = self.shortLivedPaths
|
let shortLivedPaths = self.shortLivedPaths
|
||||||
let scanOnce = Signal<Never, NoError> { subscriber in
|
let scanOnce = Signal<Never, NoError> { subscriber in
|
||||||
DispatchQueue.global(qos: .utility).async {
|
DispatchQueue.global(qos: .background).async {
|
||||||
var removedShortLivedCount: Int = 0
|
var removedShortLivedCount: Int = 0
|
||||||
var removedGeneralCount: Int = 0
|
var removedGeneralCount: Int = 0
|
||||||
var removedGeneralLimitCount: Int = 0
|
var removedGeneralLimitCount: Int = 0
|
||||||
|
@ -1,4 +1,44 @@
|
|||||||
load("@build_bazel_rules_swift//swift:swift.bzl", "swift_library")
|
load("@build_bazel_rules_swift//swift:swift.bzl", "swift_library")
|
||||||
|
load(
|
||||||
|
"@build_bazel_rules_apple//apple:resources.bzl",
|
||||||
|
"apple_resource_bundle",
|
||||||
|
"apple_resource_group",
|
||||||
|
)
|
||||||
|
load("//build-system/bazel-utils:plist_fragment.bzl",
|
||||||
|
"plist_fragment",
|
||||||
|
)
|
||||||
|
|
||||||
|
filegroup(
|
||||||
|
name = "TelegramCallsUIMetalResources",
|
||||||
|
srcs = glob([
|
||||||
|
"Resources/**/*.metal",
|
||||||
|
]),
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
)
|
||||||
|
|
||||||
|
plist_fragment(
|
||||||
|
name = "TelegramCallsUIBundleInfoPlist",
|
||||||
|
extension = "plist",
|
||||||
|
template =
|
||||||
|
"""
|
||||||
|
<key>CFBundleIdentifier</key>
|
||||||
|
<string>org.telegram.TelegramCallsUI</string>
|
||||||
|
<key>CFBundleDevelopmentRegion</key>
|
||||||
|
<string>en</string>
|
||||||
|
<key>CFBundleName</key>
|
||||||
|
<string>TelegramCallsUI</string>
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
|
||||||
|
apple_resource_bundle(
|
||||||
|
name = "TelegramCallsUIBundle",
|
||||||
|
infoplists = [
|
||||||
|
":TelegramCallsUIBundleInfoPlist",
|
||||||
|
],
|
||||||
|
resources = [
|
||||||
|
":TelegramCallsUIMetalResources",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
swift_library(
|
swift_library(
|
||||||
name = "TelegramCallsUI",
|
name = "TelegramCallsUI",
|
||||||
@ -6,6 +46,9 @@ swift_library(
|
|||||||
srcs = glob([
|
srcs = glob([
|
||||||
"Sources/**/*.swift",
|
"Sources/**/*.swift",
|
||||||
]),
|
]),
|
||||||
|
data = [
|
||||||
|
":TelegramCallsUIBundle",
|
||||||
|
],
|
||||||
deps = [
|
deps = [
|
||||||
"//submodules/SSignalKit/SwiftSignalKit:SwiftSignalKit",
|
"//submodules/SSignalKit/SwiftSignalKit:SwiftSignalKit",
|
||||||
"//submodules/Display:Display",
|
"//submodules/Display:Display",
|
||||||
|
49
submodules/TelegramCallsUI/Resources/I420VideoShaders.metal
Normal file
49
submodules/TelegramCallsUI/Resources/I420VideoShaders.metal
Normal file
@ -0,0 +1,49 @@
|
|||||||
|
#include <metal_stdlib>
|
||||||
|
using namespace metal;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
packed_float2 position;
|
||||||
|
packed_float2 texcoord;
|
||||||
|
} Vertex;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
float4 position[[position]];
|
||||||
|
float2 texcoord;
|
||||||
|
} Varyings;
|
||||||
|
|
||||||
|
vertex Varyings i420VertexPassthrough(constant Vertex *verticies[[buffer(0)]],
|
||||||
|
unsigned int vid[[vertex_id]]) {
|
||||||
|
Varyings out;
|
||||||
|
constant Vertex &v = verticies[vid];
|
||||||
|
out.position = float4(float2(v.position), 0.0, 1.0);
|
||||||
|
out.texcoord = v.texcoord;
|
||||||
|
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
|
fragment half4 i420FragmentColorConversion(
|
||||||
|
Varyings in[[stage_in]],
|
||||||
|
texture2d<float, access::sample> textureY[[texture(0)]],
|
||||||
|
texture2d<float, access::sample> textureU[[texture(1)]],
|
||||||
|
texture2d<float, access::sample> textureV[[texture(2)]]) {
|
||||||
|
constexpr sampler s(address::clamp_to_edge, filter::linear);
|
||||||
|
float y;
|
||||||
|
float u;
|
||||||
|
float v;
|
||||||
|
float r;
|
||||||
|
float g;
|
||||||
|
float b;
|
||||||
|
// Conversion for YUV to rgb from http://www.fourcc.org/fccyvrgb.php
|
||||||
|
y = textureY.sample(s, in.texcoord).r;
|
||||||
|
u = textureU.sample(s, in.texcoord).r;
|
||||||
|
v = textureV.sample(s, in.texcoord).r;
|
||||||
|
u = u - 0.5;
|
||||||
|
v = v - 0.5;
|
||||||
|
r = y + 1.403 * v;
|
||||||
|
g = y - 0.344 * u - 0.714 * v;
|
||||||
|
b = y + 1.770 * u;
|
||||||
|
|
||||||
|
float4 out = float4(r, g, b, 1.0);
|
||||||
|
|
||||||
|
return half4(out);
|
||||||
|
}
|
57
submodules/TelegramCallsUI/Resources/NV12VideoShaders.metal
Normal file
57
submodules/TelegramCallsUI/Resources/NV12VideoShaders.metal
Normal file
@ -0,0 +1,57 @@
|
|||||||
|
#include <metal_stdlib>
|
||||||
|
using namespace metal;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
packed_float2 position;
|
||||||
|
packed_float2 texcoord;
|
||||||
|
} Vertex;
|
||||||
|
|
||||||
|
typedef struct {
|
||||||
|
float4 position[[position]];
|
||||||
|
float2 texcoord;
|
||||||
|
} Varyings;
|
||||||
|
|
||||||
|
vertex Varyings nv12VertexPassthrough(
|
||||||
|
constant Vertex *verticies[[buffer(0)]],
|
||||||
|
unsigned int vid[[vertex_id]]
|
||||||
|
) {
|
||||||
|
Varyings out;
|
||||||
|
constant Vertex &v = verticies[vid];
|
||||||
|
out.position = float4(float2(v.position), 0.0, 1.0);
|
||||||
|
out.texcoord = v.texcoord;
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
|
float4 samplePoint(texture2d<float, access::sample> textureY, texture2d<float, access::sample> textureCbCr, sampler s, float2 texcoord) {
|
||||||
|
float y;
|
||||||
|
float2 uv;
|
||||||
|
y = textureY.sample(s, texcoord).r;
|
||||||
|
uv = textureCbCr.sample(s, texcoord).rg - float2(0.5, 0.5);
|
||||||
|
|
||||||
|
// Conversion for YUV to rgb from http://www.fourcc.org/fccyvrgb.php
|
||||||
|
float4 out = float4(y + 1.403 * uv.y, y - 0.344 * uv.x - 0.714 * uv.y, y + 1.770 * uv.x, 1.0);
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
|
fragment half4 nv12FragmentColorConversion(
|
||||||
|
Varyings in[[stage_in]],
|
||||||
|
texture2d<float, access::sample> textureY[[texture(0)]],
|
||||||
|
texture2d<float, access::sample> textureCbCr[[texture(1)]]
|
||||||
|
) {
|
||||||
|
constexpr sampler s(address::clamp_to_edge, filter::linear);
|
||||||
|
|
||||||
|
float4 out = samplePoint(textureY, textureCbCr, s, in.texcoord);
|
||||||
|
|
||||||
|
return half4(out);
|
||||||
|
}
|
||||||
|
|
||||||
|
fragment half4 blitFragmentColorConversion(
|
||||||
|
Varyings in[[stage_in]],
|
||||||
|
texture2d<float, access::sample> texture[[texture(0)]]
|
||||||
|
) {
|
||||||
|
constexpr sampler s(address::clamp_to_edge, filter::linear);
|
||||||
|
|
||||||
|
float4 out = texture.sample(s, in.texcoord);
|
||||||
|
|
||||||
|
return half4(out);
|
||||||
|
}
|
@ -25,16 +25,17 @@ final class GroupVideoNode: ASDisplayNode {
|
|||||||
let sourceContainerNode: PinchSourceContainerNode
|
let sourceContainerNode: PinchSourceContainerNode
|
||||||
private let containerNode: ASDisplayNode
|
private let containerNode: ASDisplayNode
|
||||||
private let videoViewContainer: UIView
|
private let videoViewContainer: UIView
|
||||||
private let videoView: PresentationCallVideoView
|
private let videoView: VideoRenderingView
|
||||||
|
|
||||||
private let backdropVideoViewContainer: UIView
|
private let backdropVideoViewContainer: UIView
|
||||||
private let backdropVideoView: PresentationCallVideoView?
|
private let backdropVideoView: VideoRenderingView?
|
||||||
private var backdropEffectView: UIVisualEffectView?
|
private var backdropEffectView: UIVisualEffectView?
|
||||||
|
|
||||||
private var effectView: UIVisualEffectView?
|
private var effectView: UIVisualEffectView?
|
||||||
private var isBlurred: Bool = false
|
private var isBlurred: Bool = false
|
||||||
|
|
||||||
private var isEnabled: Bool = false
|
private var isEnabled: Bool = false
|
||||||
|
private var isBlurEnabled: Bool = false
|
||||||
|
|
||||||
private var validLayout: (CGSize, LayoutMode)?
|
private var validLayout: (CGSize, LayoutMode)?
|
||||||
|
|
||||||
@ -47,7 +48,7 @@ final class GroupVideoNode: ASDisplayNode {
|
|||||||
|
|
||||||
public var isMainstageExclusive = false
|
public var isMainstageExclusive = false
|
||||||
|
|
||||||
init(videoView: PresentationCallVideoView, backdropVideoView: PresentationCallVideoView?) {
|
init(videoView: VideoRenderingView, backdropVideoView: VideoRenderingView?) {
|
||||||
self.sourceContainerNode = PinchSourceContainerNode()
|
self.sourceContainerNode = PinchSourceContainerNode()
|
||||||
self.containerNode = ASDisplayNode()
|
self.containerNode = ASDisplayNode()
|
||||||
self.videoViewContainer = UIView()
|
self.videoViewContainer = UIView()
|
||||||
@ -61,7 +62,7 @@ final class GroupVideoNode: ASDisplayNode {
|
|||||||
super.init()
|
super.init()
|
||||||
|
|
||||||
if let backdropVideoView = backdropVideoView {
|
if let backdropVideoView = backdropVideoView {
|
||||||
self.backdropVideoViewContainer.addSubview(backdropVideoView.view)
|
self.backdropVideoViewContainer.addSubview(backdropVideoView)
|
||||||
self.view.addSubview(self.backdropVideoViewContainer)
|
self.view.addSubview(self.backdropVideoViewContainer)
|
||||||
|
|
||||||
let effect: UIVisualEffect
|
let effect: UIVisualEffect
|
||||||
@ -70,12 +71,12 @@ final class GroupVideoNode: ASDisplayNode {
|
|||||||
} else {
|
} else {
|
||||||
effect = UIBlurEffect(style: .dark)
|
effect = UIBlurEffect(style: .dark)
|
||||||
}
|
}
|
||||||
let backdropEffectView = UIVisualEffectView(effect: effect)
|
//let backdropEffectView = UIVisualEffectView(effect: effect)
|
||||||
self.view.addSubview(backdropEffectView)
|
//self.view.addSubview(backdropEffectView)
|
||||||
self.backdropEffectView = backdropEffectView
|
//self.backdropEffectView = backdropEffectView
|
||||||
}
|
}
|
||||||
|
|
||||||
self.videoViewContainer.addSubview(self.videoView.view)
|
self.videoViewContainer.addSubview(self.videoView)
|
||||||
self.addSubnode(self.sourceContainerNode)
|
self.addSubnode(self.sourceContainerNode)
|
||||||
self.containerNode.view.addSubview(self.videoViewContainer)
|
self.containerNode.view.addSubview(self.videoViewContainer)
|
||||||
self.sourceContainerNode.contentNode.addSubnode(self.containerNode)
|
self.sourceContainerNode.contentNode.addSubnode(self.containerNode)
|
||||||
@ -112,7 +113,7 @@ final class GroupVideoNode: ASDisplayNode {
|
|||||||
self.isEnabled = isEnabled
|
self.isEnabled = isEnabled
|
||||||
|
|
||||||
self.videoView.updateIsEnabled(isEnabled)
|
self.videoView.updateIsEnabled(isEnabled)
|
||||||
self.backdropVideoView?.updateIsEnabled(isEnabled)
|
self.backdropVideoView?.updateIsEnabled(isEnabled && self.isBlurEnabled)
|
||||||
}
|
}
|
||||||
|
|
||||||
func updateIsBlurred(isBlurred: Bool, light: Bool = false, animated: Bool = true) {
|
func updateIsBlurred(isBlurred: Bool, light: Bool = false, animated: Bool = true) {
|
||||||
@ -150,11 +151,11 @@ final class GroupVideoNode: ASDisplayNode {
|
|||||||
self.backgroundColor = .black
|
self.backgroundColor = .black
|
||||||
}
|
}
|
||||||
var snapshotView: UIView?
|
var snapshotView: UIView?
|
||||||
if let snapshot = self.videoView.view.snapshotView(afterScreenUpdates: false) {
|
if let snapshot = self.videoView.snapshotView(afterScreenUpdates: false) {
|
||||||
snapshotView = snapshot
|
snapshotView = snapshot
|
||||||
snapshot.transform = self.videoView.view.transform
|
snapshot.transform = self.videoView.transform
|
||||||
snapshot.frame = self.videoView.view.frame
|
snapshot.frame = self.videoView.frame
|
||||||
self.videoView.view.superview?.insertSubview(snapshot, aboveSubview: self.videoView.view)
|
self.videoView.superview?.insertSubview(snapshot, aboveSubview: self.videoView)
|
||||||
}
|
}
|
||||||
UIView.transition(with: withBackground ? self.videoViewContainer : self.view, duration: 0.4, options: [.transitionFlipFromLeft, .curveEaseOut], animations: {
|
UIView.transition(with: withBackground ? self.videoViewContainer : self.view, duration: 0.4, options: [.transitionFlipFromLeft, .curveEaseOut], animations: {
|
||||||
UIView.performWithoutAnimation {
|
UIView.performWithoutAnimation {
|
||||||
@ -282,17 +283,17 @@ final class GroupVideoNode: ASDisplayNode {
|
|||||||
rotatedVideoFrame.size.width = ceil(rotatedVideoFrame.size.width)
|
rotatedVideoFrame.size.width = ceil(rotatedVideoFrame.size.width)
|
||||||
rotatedVideoFrame.size.height = ceil(rotatedVideoFrame.size.height)
|
rotatedVideoFrame.size.height = ceil(rotatedVideoFrame.size.height)
|
||||||
|
|
||||||
self.videoView.view.alpha = 0.995
|
self.videoView.alpha = 0.995
|
||||||
|
|
||||||
let normalizedVideoSize = rotatedVideoFrame.size.aspectFilled(CGSize(width: 1080.0, height: 1080.0))
|
let normalizedVideoSize = rotatedVideoFrame.size.aspectFilled(CGSize(width: 1080.0, height: 1080.0))
|
||||||
transition.updatePosition(layer: self.videoView.view.layer, position: rotatedVideoFrame.center)
|
transition.updatePosition(layer: self.videoView.layer, position: rotatedVideoFrame.center)
|
||||||
transition.updateBounds(layer: self.videoView.view.layer, bounds: CGRect(origin: CGPoint(), size: normalizedVideoSize))
|
transition.updateBounds(layer: self.videoView.layer, bounds: CGRect(origin: CGPoint(), size: normalizedVideoSize))
|
||||||
|
|
||||||
let transformScale: CGFloat = rotatedVideoFrame.width / normalizedVideoSize.width
|
let transformScale: CGFloat = rotatedVideoFrame.width / normalizedVideoSize.width
|
||||||
transition.updateTransformScale(layer: self.videoViewContainer.layer, scale: transformScale)
|
transition.updateTransformScale(layer: self.videoViewContainer.layer, scale: transformScale)
|
||||||
|
|
||||||
if let backdropVideoView = self.backdropVideoView {
|
if let backdropVideoView = self.backdropVideoView {
|
||||||
backdropVideoView.view.alpha = 0.995
|
backdropVideoView.alpha = 0.995
|
||||||
|
|
||||||
let topFrame = rotatedVideoFrame
|
let topFrame = rotatedVideoFrame
|
||||||
|
|
||||||
@ -303,32 +304,34 @@ final class GroupVideoNode: ASDisplayNode {
|
|||||||
rotatedVideoFrame.size.width = ceil(rotatedVideoFrame.size.width)
|
rotatedVideoFrame.size.width = ceil(rotatedVideoFrame.size.width)
|
||||||
rotatedVideoFrame.size.height = ceil(rotatedVideoFrame.size.height)
|
rotatedVideoFrame.size.height = ceil(rotatedVideoFrame.size.height)
|
||||||
|
|
||||||
let isBlurEnabled = !topFrame.contains(rotatedVideoFrame)
|
self.isBlurEnabled = !topFrame.contains(rotatedVideoFrame)
|
||||||
|
|
||||||
let normalizedVideoSize = rotatedVideoFrame.size.aspectFilled(CGSize(width: 1080.0, height: 1080.0))
|
let normalizedVideoSize = rotatedVideoFrame.size.aspectFilled(CGSize(width: 1080.0, height: 1080.0))
|
||||||
if isBlurEnabled {
|
|
||||||
self.backdropVideoView?.updateIsEnabled(self.isEnabled)
|
self.backdropVideoView?.updateIsEnabled(self.isEnabled && self.isBlurEnabled)
|
||||||
self.backdropVideoView?.view.isHidden = false
|
|
||||||
|
if self.isBlurEnabled {
|
||||||
|
self.backdropVideoView?.isHidden = false
|
||||||
self.backdropEffectView?.isHidden = false
|
self.backdropEffectView?.isHidden = false
|
||||||
}
|
}
|
||||||
transition.updatePosition(layer: backdropVideoView.view.layer, position: rotatedVideoFrame.center, force: true, completion: { [weak self] value in
|
transition.updatePosition(layer: backdropVideoView.layer, position: rotatedVideoFrame.center, force: true, completion: { [weak self] value in
|
||||||
guard let strongSelf = self, value else {
|
guard let strongSelf = self, value else {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if !isBlurEnabled {
|
if !strongSelf.isBlurEnabled {
|
||||||
strongSelf.backdropVideoView?.updateIsEnabled(false)
|
strongSelf.backdropVideoView?.updateIsEnabled(false)
|
||||||
strongSelf.backdropVideoView?.view.isHidden = true
|
strongSelf.backdropVideoView?.isHidden = true
|
||||||
strongSelf.backdropEffectView?.isHidden = false
|
strongSelf.backdropEffectView?.isHidden = false
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
transition.updateBounds(layer: backdropVideoView.view.layer, bounds: CGRect(origin: CGPoint(), size: normalizedVideoSize))
|
transition.updateBounds(layer: backdropVideoView.layer, bounds: CGRect(origin: CGPoint(), size: normalizedVideoSize))
|
||||||
|
|
||||||
let transformScale: CGFloat = rotatedVideoFrame.width / normalizedVideoSize.width
|
let transformScale: CGFloat = rotatedVideoFrame.width / normalizedVideoSize.width
|
||||||
|
|
||||||
transition.updateTransformScale(layer: self.backdropVideoViewContainer.layer, scale: transformScale)
|
transition.updateTransformScale(layer: self.backdropVideoViewContainer.layer, scale: transformScale)
|
||||||
|
|
||||||
let transition: ContainedViewLayoutTransition = .immediate
|
let transition: ContainedViewLayoutTransition = .immediate
|
||||||
transition.updateTransformRotation(view: backdropVideoView.view, angle: angle)
|
transition.updateTransformRotation(view: backdropVideoView, angle: angle)
|
||||||
}
|
}
|
||||||
|
|
||||||
if let backdropEffectView = self.backdropEffectView {
|
if let backdropEffectView = self.backdropEffectView {
|
||||||
@ -359,7 +362,7 @@ final class GroupVideoNode: ASDisplayNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let transition: ContainedViewLayoutTransition = .immediate
|
let transition: ContainedViewLayoutTransition = .immediate
|
||||||
transition.updateTransformRotation(view: self.videoView.view, angle: angle)
|
transition.updateTransformRotation(view: self.videoView, angle: angle)
|
||||||
}
|
}
|
||||||
|
|
||||||
var snapshotView: UIView?
|
var snapshotView: UIView?
|
||||||
|
652
submodules/TelegramCallsUI/Sources/MetalVideoRenderingView.swift
Normal file
652
submodules/TelegramCallsUI/Sources/MetalVideoRenderingView.swift
Normal file
@ -0,0 +1,652 @@
|
|||||||
|
import Foundation
|
||||||
|
import UIKit
|
||||||
|
import AsyncDisplayKit
|
||||||
|
import Display
|
||||||
|
import SwiftSignalKit
|
||||||
|
import AccountContext
|
||||||
|
import TelegramVoip
|
||||||
|
import AVFoundation
|
||||||
|
import Metal
|
||||||
|
import MetalPerformanceShaders
|
||||||
|
|
||||||
|
private func alignUp(size: Int, align: Int) -> Int {
|
||||||
|
precondition(((align - 1) & align) == 0, "Align must be a power of two")
|
||||||
|
|
||||||
|
let alignmentMask = align - 1
|
||||||
|
return (size + alignmentMask) & ~alignmentMask
|
||||||
|
}
|
||||||
|
|
||||||
|
private func getCubeVertexData(
|
||||||
|
cropX: Int,
|
||||||
|
cropY: Int,
|
||||||
|
cropWidth: Int,
|
||||||
|
cropHeight: Int,
|
||||||
|
frameWidth: Int,
|
||||||
|
frameHeight: Int,
|
||||||
|
rotation: Int,
|
||||||
|
buffer: UnsafeMutablePointer<Float>
|
||||||
|
) {
|
||||||
|
let cropLeft = Float(cropX) / Float(frameWidth)
|
||||||
|
let cropRight = Float(cropX + cropWidth) / Float(frameWidth)
|
||||||
|
let cropTop = Float(cropY) / Float(frameHeight)
|
||||||
|
let cropBottom = Float(cropY + cropHeight) / Float(frameHeight)
|
||||||
|
|
||||||
|
switch rotation {
|
||||||
|
default:
|
||||||
|
var values: [Float] = [
|
||||||
|
-1.0, -1.0, cropLeft, cropBottom,
|
||||||
|
1.0, -1.0, cropRight, cropBottom,
|
||||||
|
-1.0, 1.0, cropLeft, cropTop,
|
||||||
|
1.0, 1.0, cropRight, cropTop
|
||||||
|
]
|
||||||
|
memcpy(buffer, &values, values.count * MemoryLayout.size(ofValue: values[0]));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@available(iOS 13.0, *)
|
||||||
|
private protocol FrameBufferRenderingState {
|
||||||
|
var frameSize: CGSize? { get }
|
||||||
|
|
||||||
|
func encode(renderingContext: MetalVideoRenderingContext, vertexBuffer: MTLBuffer, renderEncoder: MTLRenderCommandEncoder) -> Bool
|
||||||
|
}
|
||||||
|
|
||||||
|
@available(iOS 13.0, *)
|
||||||
|
private final class BlitRenderingState {
|
||||||
|
static func encode(renderingContext: MetalVideoRenderingContext, texture: MTLTexture, vertexBuffer: MTLBuffer, renderEncoder: MTLRenderCommandEncoder) -> Bool {
|
||||||
|
renderEncoder.setRenderPipelineState(renderingContext.blitPipelineState)
|
||||||
|
|
||||||
|
renderEncoder.setVertexBuffer(vertexBuffer, offset: 0, index: 0)
|
||||||
|
|
||||||
|
renderEncoder.setFragmentTexture(texture, index: 0)
|
||||||
|
|
||||||
|
renderEncoder.drawPrimitives(type: .triangleStrip, vertexStart: 0, vertexCount: 4, instanceCount: 1)
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@available(iOS 13.0, *)
|
||||||
|
private final class NV12FrameBufferRenderingState: FrameBufferRenderingState {
|
||||||
|
private var yTexture: MTLTexture?
|
||||||
|
private var uvTexture: MTLTexture?
|
||||||
|
|
||||||
|
var frameSize: CGSize? {
|
||||||
|
if let yTexture = self.yTexture {
|
||||||
|
return CGSize(width: yTexture.width, height: yTexture.height)
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateTextureBuffers(renderingContext: MetalVideoRenderingContext, frameBuffer: OngoingGroupCallContext.VideoFrameData.NativeBuffer) {
|
||||||
|
let pixelBuffer = frameBuffer.pixelBuffer
|
||||||
|
|
||||||
|
var lumaTexture: MTLTexture?
|
||||||
|
var chromaTexture: MTLTexture?
|
||||||
|
var outTexture: CVMetalTexture?
|
||||||
|
|
||||||
|
let lumaWidth = CVPixelBufferGetWidthOfPlane(pixelBuffer, 0)
|
||||||
|
let lumaHeight = CVPixelBufferGetHeightOfPlane(pixelBuffer, 0)
|
||||||
|
|
||||||
|
var indexPlane = 0
|
||||||
|
var result = CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, renderingContext.textureCache, pixelBuffer, nil, .r8Unorm, lumaWidth, lumaHeight, indexPlane, &outTexture)
|
||||||
|
if result == kCVReturnSuccess, let outTexture = outTexture {
|
||||||
|
lumaTexture = CVMetalTextureGetTexture(outTexture)
|
||||||
|
}
|
||||||
|
outTexture = nil
|
||||||
|
|
||||||
|
indexPlane = 1
|
||||||
|
result = CVMetalTextureCacheCreateTextureFromImage(kCFAllocatorDefault, renderingContext.textureCache, pixelBuffer, nil, .rg8Unorm, lumaWidth / 2, lumaHeight / 2, indexPlane, &outTexture)
|
||||||
|
if result == kCVReturnSuccess, let outTexture = outTexture {
|
||||||
|
chromaTexture = CVMetalTextureGetTexture(outTexture)
|
||||||
|
}
|
||||||
|
outTexture = nil
|
||||||
|
|
||||||
|
if let lumaTexture = lumaTexture, let chromaTexture = chromaTexture {
|
||||||
|
self.yTexture = lumaTexture
|
||||||
|
self.uvTexture = chromaTexture
|
||||||
|
} else {
|
||||||
|
self.yTexture = nil
|
||||||
|
self.uvTexture = nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func encode(renderingContext: MetalVideoRenderingContext, vertexBuffer: MTLBuffer, renderEncoder: MTLRenderCommandEncoder) -> Bool {
|
||||||
|
guard let yTexture = self.yTexture, let uvTexture = self.uvTexture else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
renderEncoder.setRenderPipelineState(renderingContext.nv12PipelineState)
|
||||||
|
|
||||||
|
renderEncoder.setVertexBuffer(vertexBuffer, offset: 0, index: 0)
|
||||||
|
|
||||||
|
renderEncoder.setFragmentTexture(yTexture, index: 0)
|
||||||
|
renderEncoder.setFragmentTexture(uvTexture, index: 1)
|
||||||
|
|
||||||
|
renderEncoder.drawPrimitives(type: .triangleStrip, vertexStart: 0, vertexCount: 4, instanceCount: 1)
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@available(iOS 13.0, *)
|
||||||
|
private final class I420FrameBufferRenderingState: FrameBufferRenderingState {
|
||||||
|
private var yTexture: MTLTexture?
|
||||||
|
private var uTexture: MTLTexture?
|
||||||
|
private var vTexture: MTLTexture?
|
||||||
|
|
||||||
|
private var lumaTextureDescriptorSize: CGSize?
|
||||||
|
private var lumaTextureDescriptor: MTLTextureDescriptor?
|
||||||
|
private var chromaTextureDescriptor: MTLTextureDescriptor?
|
||||||
|
|
||||||
|
var frameSize: CGSize? {
|
||||||
|
if let yTexture = self.yTexture {
|
||||||
|
return CGSize(width: yTexture.width, height: yTexture.height)
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateTextureBuffers(renderingContext: MetalVideoRenderingContext, frameBuffer: OngoingGroupCallContext.VideoFrameData.I420Buffer) {
|
||||||
|
let lumaSize = CGSize(width: frameBuffer.width, height: frameBuffer.height)
|
||||||
|
|
||||||
|
if lumaSize != lumaTextureDescriptorSize || lumaTextureDescriptor == nil || chromaTextureDescriptor == nil {
|
||||||
|
self.lumaTextureDescriptorSize = lumaSize
|
||||||
|
|
||||||
|
let lumaTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .r8Unorm, width: frameBuffer.width, height: frameBuffer.height, mipmapped: false)
|
||||||
|
lumaTextureDescriptor.usage = .shaderRead
|
||||||
|
self.lumaTextureDescriptor = lumaTextureDescriptor
|
||||||
|
|
||||||
|
self.yTexture = renderingContext.device.makeTexture(descriptor: lumaTextureDescriptor)
|
||||||
|
|
||||||
|
let chromaTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .r8Unorm, width: frameBuffer.width / 2, height: frameBuffer.height / 2, mipmapped: false)
|
||||||
|
chromaTextureDescriptor.usage = .shaderRead
|
||||||
|
self.chromaTextureDescriptor = chromaTextureDescriptor
|
||||||
|
|
||||||
|
self.uTexture = renderingContext.device.makeTexture(descriptor: chromaTextureDescriptor)
|
||||||
|
self.vTexture = renderingContext.device.makeTexture(descriptor: chromaTextureDescriptor)
|
||||||
|
}
|
||||||
|
|
||||||
|
guard let yTexture = self.yTexture, let uTexture = self.uTexture, let vTexture = self.vTexture else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
frameBuffer.y.withUnsafeBytes { bufferPointer in
|
||||||
|
if let baseAddress = bufferPointer.baseAddress {
|
||||||
|
yTexture.replace(region: MTLRegionMake2D(0, 0, yTexture.width, yTexture.height), mipmapLevel: 0, withBytes: baseAddress, bytesPerRow: frameBuffer.strideY)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
frameBuffer.u.withUnsafeBytes { bufferPointer in
|
||||||
|
if let baseAddress = bufferPointer.baseAddress {
|
||||||
|
uTexture.replace(region: MTLRegionMake2D(0, 0, uTexture.width, uTexture.height), mipmapLevel: 0, withBytes: baseAddress, bytesPerRow: frameBuffer.strideU)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
frameBuffer.v.withUnsafeBytes { bufferPointer in
|
||||||
|
if let baseAddress = bufferPointer.baseAddress {
|
||||||
|
vTexture.replace(region: MTLRegionMake2D(0, 0, vTexture.width, vTexture.height), mipmapLevel: 0, withBytes: baseAddress, bytesPerRow: frameBuffer.strideV)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func encode(renderingContext: MetalVideoRenderingContext, vertexBuffer: MTLBuffer, renderEncoder: MTLRenderCommandEncoder) -> Bool {
|
||||||
|
guard let yTexture = self.yTexture, let uTexture = self.uTexture, let vTexture = self.vTexture else {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
||||||
|
renderEncoder.setRenderPipelineState(renderingContext.i420PipelineState)
|
||||||
|
|
||||||
|
renderEncoder.setVertexBuffer(vertexBuffer, offset: 0, index: 0)
|
||||||
|
|
||||||
|
renderEncoder.setFragmentTexture(yTexture, index: 0)
|
||||||
|
renderEncoder.setFragmentTexture(uTexture, index: 1)
|
||||||
|
renderEncoder.setFragmentTexture(vTexture, index: 2)
|
||||||
|
|
||||||
|
renderEncoder.drawPrimitives(type: .triangleStrip, vertexStart: 0, vertexCount: 4, instanceCount: 1)
|
||||||
|
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@available(iOS 13.0, *)
|
||||||
|
final class MetalVideoRenderingView: UIView, VideoRenderingView {
|
||||||
|
static override var layerClass: AnyClass {
|
||||||
|
return CAMetalLayer.self
|
||||||
|
}
|
||||||
|
|
||||||
|
private var metalLayer: CAMetalLayer {
|
||||||
|
return self.layer as! CAMetalLayer
|
||||||
|
}
|
||||||
|
|
||||||
|
private weak var renderingContext: MetalVideoRenderingContext?
|
||||||
|
private var renderingContextIndex: Int?
|
||||||
|
|
||||||
|
private let blur: Bool
|
||||||
|
|
||||||
|
private let vertexBuffer: MTLBuffer
|
||||||
|
|
||||||
|
private var frameBufferRenderingState: FrameBufferRenderingState?
|
||||||
|
private var blurInputTexture: MTLTexture?
|
||||||
|
private var blurOutputTexture: MTLTexture?
|
||||||
|
|
||||||
|
fileprivate private(set) var isEnabled: Bool = false
|
||||||
|
fileprivate var needsRedraw: Bool = false
|
||||||
|
fileprivate let numberOfUsedDrawables = Atomic<Int>(value: 0)
|
||||||
|
|
||||||
|
private var onFirstFrameReceived: ((Float) -> Void)?
|
||||||
|
private var onOrientationUpdated: ((PresentationCallVideoView.Orientation, CGFloat) -> Void)?
|
||||||
|
private var onIsMirroredUpdated: ((Bool) -> Void)?
|
||||||
|
|
||||||
|
private var didReportFirstFrame: Bool = false
|
||||||
|
private var currentOrientation: PresentationCallVideoView.Orientation = .rotation0
|
||||||
|
private var currentAspect: CGFloat = 1.0
|
||||||
|
|
||||||
|
private var disposable: Disposable?
|
||||||
|
|
||||||
|
init?(renderingContext: MetalVideoRenderingContext, input: Signal<OngoingGroupCallContext.VideoFrameData, NoError>, blur: Bool) {
|
||||||
|
self.renderingContext = renderingContext
|
||||||
|
self.blur = blur
|
||||||
|
|
||||||
|
let vertexBufferArray = Array<Float>(repeating: 0, count: 16)
|
||||||
|
guard let vertexBuffer = renderingContext.device.makeBuffer(bytes: vertexBufferArray, length: vertexBufferArray.count * MemoryLayout.size(ofValue: vertexBufferArray[0]), options: [.cpuCacheModeWriteCombined]) else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
self.vertexBuffer = vertexBuffer
|
||||||
|
|
||||||
|
super.init(frame: CGRect())
|
||||||
|
|
||||||
|
self.renderingContextIndex = renderingContext.add(view: self)
|
||||||
|
|
||||||
|
self.metalLayer.device = renderingContext.device
|
||||||
|
self.metalLayer.pixelFormat = .bgra8Unorm
|
||||||
|
self.metalLayer.framebufferOnly = true
|
||||||
|
self.metalLayer.allowsNextDrawableTimeout = true
|
||||||
|
|
||||||
|
self.disposable = input.start(next: { [weak self] videoFrameData in
|
||||||
|
Queue.mainQueue().async {
|
||||||
|
self?.addFrame(videoFrameData)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
required init?(coder: NSCoder) {
|
||||||
|
fatalError("init(coder:) has not been implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
deinit {
|
||||||
|
self.disposable?.dispose()
|
||||||
|
if let renderingContext = self.renderingContext, let renderingContextIndex = self.renderingContextIndex {
|
||||||
|
renderingContext.remove(index: renderingContextIndex)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private func addFrame(_ videoFrameData: OngoingGroupCallContext.VideoFrameData) {
|
||||||
|
let aspect = CGFloat(videoFrameData.width) / CGFloat(videoFrameData.height)
|
||||||
|
var isAspectUpdated = false
|
||||||
|
if self.currentAspect != aspect {
|
||||||
|
self.currentAspect = aspect
|
||||||
|
isAspectUpdated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
let videoFrameOrientation = PresentationCallVideoView.Orientation(videoFrameData.orientation)
|
||||||
|
var isOrientationUpdated = false
|
||||||
|
if self.currentOrientation != videoFrameOrientation {
|
||||||
|
self.currentOrientation = videoFrameOrientation
|
||||||
|
isOrientationUpdated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if isAspectUpdated || isOrientationUpdated {
|
||||||
|
self.onOrientationUpdated?(self.currentOrientation, self.currentAspect)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !self.didReportFirstFrame {
|
||||||
|
self.didReportFirstFrame = true
|
||||||
|
self.onFirstFrameReceived?(Float(self.currentAspect))
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.isEnabled, let renderingContext = self.renderingContext {
|
||||||
|
switch videoFrameData.buffer {
|
||||||
|
case let .native(buffer):
|
||||||
|
let renderingState: NV12FrameBufferRenderingState
|
||||||
|
if let current = self.frameBufferRenderingState as? NV12FrameBufferRenderingState {
|
||||||
|
renderingState = current
|
||||||
|
} else {
|
||||||
|
renderingState = NV12FrameBufferRenderingState()
|
||||||
|
self.frameBufferRenderingState = renderingState
|
||||||
|
}
|
||||||
|
renderingState.updateTextureBuffers(renderingContext: renderingContext, frameBuffer: buffer)
|
||||||
|
self.needsRedraw = true
|
||||||
|
case let .i420(buffer):
|
||||||
|
let renderingState: I420FrameBufferRenderingState
|
||||||
|
if let current = self.frameBufferRenderingState as? I420FrameBufferRenderingState {
|
||||||
|
renderingState = current
|
||||||
|
} else {
|
||||||
|
renderingState = I420FrameBufferRenderingState()
|
||||||
|
self.frameBufferRenderingState = renderingState
|
||||||
|
}
|
||||||
|
renderingState.updateTextureBuffers(renderingContext: renderingContext, frameBuffer: buffer)
|
||||||
|
self.needsRedraw = true
|
||||||
|
default:
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fileprivate func encode(commandBuffer: MTLCommandBuffer) -> MTLDrawable? {
|
||||||
|
guard let renderingContext = self.renderingContext else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
if self.numberOfUsedDrawables.with({ $0 }) >= 2 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
guard let frameBufferRenderingState = self.frameBufferRenderingState else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
guard let frameSize = frameBufferRenderingState.frameSize else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
let drawableSize: CGSize
|
||||||
|
if self.blur {
|
||||||
|
drawableSize = frameSize.aspectFitted(CGSize(width: 64.0, height: 64.0))
|
||||||
|
} else {
|
||||||
|
drawableSize = frameSize
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.blur {
|
||||||
|
if let current = self.blurInputTexture, current.width == Int(drawableSize.width) && current.height == Int(drawableSize.height) {
|
||||||
|
} else {
|
||||||
|
let blurTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .bgra8Unorm, width: Int(drawableSize.width), height: Int(drawableSize.height), mipmapped: false)
|
||||||
|
blurTextureDescriptor.usage = [.shaderRead, .shaderWrite, .renderTarget]
|
||||||
|
|
||||||
|
if let texture = renderingContext.device.makeTexture(descriptor: blurTextureDescriptor) {
|
||||||
|
self.blurInputTexture = texture
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if let current = self.blurOutputTexture, current.width == Int(drawableSize.width) && current.height == Int(drawableSize.height) {
|
||||||
|
} else {
|
||||||
|
let blurTextureDescriptor = MTLTextureDescriptor.texture2DDescriptor(pixelFormat: .bgra8Unorm, width: Int(drawableSize.width), height: Int(drawableSize.height), mipmapped: false)
|
||||||
|
blurTextureDescriptor.usage = [.shaderRead, .shaderWrite]
|
||||||
|
|
||||||
|
if let texture = renderingContext.device.makeTexture(descriptor: blurTextureDescriptor) {
|
||||||
|
self.blurOutputTexture = texture
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.metalLayer.drawableSize != drawableSize {
|
||||||
|
self.metalLayer.drawableSize = drawableSize
|
||||||
|
|
||||||
|
getCubeVertexData(
|
||||||
|
cropX: 0,
|
||||||
|
cropY: 0,
|
||||||
|
cropWidth: Int(drawableSize.width),
|
||||||
|
cropHeight: Int(drawableSize.height),
|
||||||
|
frameWidth: Int(drawableSize.width),
|
||||||
|
frameHeight: Int(drawableSize.height),
|
||||||
|
rotation: 0,
|
||||||
|
buffer: self.vertexBuffer.contents().assumingMemoryBound(to: Float.self)
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
guard let drawable = self.metalLayer.nextDrawable() else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if let blurInputTexture = self.blurInputTexture, let blurOutputTexture = self.blurOutputTexture {
|
||||||
|
let renderPassDescriptor = MTLRenderPassDescriptor()
|
||||||
|
renderPassDescriptor.colorAttachments[0].texture = blurInputTexture
|
||||||
|
renderPassDescriptor.colorAttachments[0].loadAction = .clear
|
||||||
|
renderPassDescriptor.colorAttachments[0].clearColor = MTLClearColor(
|
||||||
|
red: 0.0,
|
||||||
|
green: 0.0,
|
||||||
|
blue: 0.0,
|
||||||
|
alpha: 1.0
|
||||||
|
)
|
||||||
|
|
||||||
|
guard let renderEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: renderPassDescriptor) else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
let _ = frameBufferRenderingState.encode(renderingContext: renderingContext, vertexBuffer: self.vertexBuffer, renderEncoder: renderEncoder)
|
||||||
|
|
||||||
|
renderEncoder.endEncoding()
|
||||||
|
|
||||||
|
renderingContext.blurKernel.encode(commandBuffer: commandBuffer, sourceTexture: blurInputTexture, destinationTexture: blurOutputTexture)
|
||||||
|
|
||||||
|
let blitPassDescriptor = MTLRenderPassDescriptor()
|
||||||
|
blitPassDescriptor.colorAttachments[0].texture = drawable.texture
|
||||||
|
blitPassDescriptor.colorAttachments[0].loadAction = .clear
|
||||||
|
blitPassDescriptor.colorAttachments[0].clearColor = MTLClearColor(
|
||||||
|
red: 0.0,
|
||||||
|
green: 0.0,
|
||||||
|
blue: 0.0,
|
||||||
|
alpha: 1.0
|
||||||
|
)
|
||||||
|
|
||||||
|
guard let blitEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: blitPassDescriptor) else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
let _ = BlitRenderingState.encode(renderingContext: renderingContext, texture: blurOutputTexture, vertexBuffer: self.vertexBuffer, renderEncoder: blitEncoder)
|
||||||
|
|
||||||
|
blitEncoder.endEncoding()
|
||||||
|
} else {
|
||||||
|
let renderPassDescriptor = MTLRenderPassDescriptor()
|
||||||
|
renderPassDescriptor.colorAttachments[0].texture = drawable.texture
|
||||||
|
renderPassDescriptor.colorAttachments[0].loadAction = .clear
|
||||||
|
renderPassDescriptor.colorAttachments[0].clearColor = MTLClearColor(
|
||||||
|
red: 0.0,
|
||||||
|
green: 0.0,
|
||||||
|
blue: 0.0,
|
||||||
|
alpha: 1.0
|
||||||
|
)
|
||||||
|
|
||||||
|
guard let renderEncoder = commandBuffer.makeRenderCommandEncoder(descriptor: renderPassDescriptor) else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
let _ = frameBufferRenderingState.encode(renderingContext: renderingContext, vertexBuffer: self.vertexBuffer, renderEncoder: renderEncoder)
|
||||||
|
|
||||||
|
renderEncoder.endEncoding()
|
||||||
|
}
|
||||||
|
|
||||||
|
return drawable
|
||||||
|
}
|
||||||
|
|
||||||
|
func setOnFirstFrameReceived(_ f: @escaping (Float) -> Void) {
|
||||||
|
self.onFirstFrameReceived = f
|
||||||
|
self.didReportFirstFrame = false
|
||||||
|
}
|
||||||
|
|
||||||
|
func setOnOrientationUpdated(_ f: @escaping (PresentationCallVideoView.Orientation, CGFloat) -> Void) {
|
||||||
|
self.onOrientationUpdated = f
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOrientation() -> PresentationCallVideoView.Orientation {
|
||||||
|
return self.currentOrientation
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAspect() -> CGFloat {
|
||||||
|
return self.currentAspect
|
||||||
|
}
|
||||||
|
|
||||||
|
func setOnIsMirroredUpdated(_ f: @escaping (Bool) -> Void) {
|
||||||
|
self.onIsMirroredUpdated = f
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateIsEnabled(_ isEnabled: Bool) {
|
||||||
|
if self.isEnabled != isEnabled {
|
||||||
|
self.isEnabled = isEnabled
|
||||||
|
|
||||||
|
if self.isEnabled {
|
||||||
|
self.needsRedraw = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@available(iOS 13.0, *)
|
||||||
|
class MetalVideoRenderingContext {
|
||||||
|
private final class ViewReference {
|
||||||
|
weak var view: MetalVideoRenderingView?
|
||||||
|
|
||||||
|
init(view: MetalVideoRenderingView) {
|
||||||
|
self.view = view
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
fileprivate let device: MTLDevice
|
||||||
|
fileprivate let textureCache: CVMetalTextureCache
|
||||||
|
fileprivate let blurKernel: MPSImageGaussianBlur
|
||||||
|
|
||||||
|
fileprivate let blitPipelineState: MTLRenderPipelineState
|
||||||
|
fileprivate let nv12PipelineState: MTLRenderPipelineState
|
||||||
|
fileprivate let i420PipelineState: MTLRenderPipelineState
|
||||||
|
|
||||||
|
private let commandQueue: MTLCommandQueue
|
||||||
|
|
||||||
|
private var displayLink: ConstantDisplayLinkAnimator?
|
||||||
|
private var viewReferences = Bag<ViewReference>()
|
||||||
|
|
||||||
|
init?() {
|
||||||
|
guard let device = MTLCreateSystemDefaultDevice() else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
self.device = device
|
||||||
|
|
||||||
|
var textureCache: CVMetalTextureCache?
|
||||||
|
let _ = CVMetalTextureCacheCreate(kCFAllocatorDefault, nil, self.device, nil, &textureCache)
|
||||||
|
if let textureCache = textureCache {
|
||||||
|
self.textureCache = textureCache
|
||||||
|
} else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
let mainBundle = Bundle(for: MetalVideoRenderingView.self)
|
||||||
|
|
||||||
|
guard let path = mainBundle.path(forResource: "TelegramCallsUIBundle", ofType: "bundle") else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
guard let bundle = Bundle(path: path) else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
guard let defaultLibrary = try? self.device.makeDefaultLibrary(bundle: bundle) else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
self.blurKernel = MPSImageGaussianBlur(device: self.device, sigma: 3.0)
|
||||||
|
|
||||||
|
func makePipelineState(vertexProgram: String, fragmentProgram: String) -> MTLRenderPipelineState? {
|
||||||
|
guard let loadedVertexProgram = defaultLibrary.makeFunction(name: vertexProgram) else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
guard let loadedFragmentProgram = defaultLibrary.makeFunction(name: fragmentProgram) else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
let pipelineStateDescriptor = MTLRenderPipelineDescriptor()
|
||||||
|
pipelineStateDescriptor.vertexFunction = loadedVertexProgram
|
||||||
|
pipelineStateDescriptor.fragmentFunction = loadedFragmentProgram
|
||||||
|
pipelineStateDescriptor.colorAttachments[0].pixelFormat = .bgra8Unorm
|
||||||
|
guard let pipelineState = try? device.makeRenderPipelineState(descriptor: pipelineStateDescriptor) else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return pipelineState
|
||||||
|
}
|
||||||
|
|
||||||
|
guard let blitPipelineState = makePipelineState(vertexProgram: "nv12VertexPassthrough", fragmentProgram: "blitFragmentColorConversion") else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
self.blitPipelineState = blitPipelineState
|
||||||
|
|
||||||
|
guard let nv12PipelineState = makePipelineState(vertexProgram: "nv12VertexPassthrough", fragmentProgram: "nv12FragmentColorConversion") else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
self.nv12PipelineState = nv12PipelineState
|
||||||
|
|
||||||
|
guard let i420PipelineState = makePipelineState(vertexProgram: "i420VertexPassthrough", fragmentProgram: "i420FragmentColorConversion") else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
self.i420PipelineState = i420PipelineState
|
||||||
|
|
||||||
|
guard let commandQueue = self.device.makeCommandQueue() else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
self.commandQueue = commandQueue
|
||||||
|
|
||||||
|
self.displayLink = ConstantDisplayLinkAnimator(update: { [weak self] in
|
||||||
|
self?.redraw()
|
||||||
|
})
|
||||||
|
self.displayLink?.isPaused = false
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateVisibility(isVisible: Bool) {
|
||||||
|
self.displayLink?.isPaused = !isVisible
|
||||||
|
}
|
||||||
|
|
||||||
|
fileprivate func add(view: MetalVideoRenderingView) -> Int {
|
||||||
|
return self.viewReferences.add(ViewReference(view: view))
|
||||||
|
}
|
||||||
|
|
||||||
|
fileprivate func remove(index: Int) {
|
||||||
|
self.viewReferences.remove(index)
|
||||||
|
}
|
||||||
|
|
||||||
|
private func redraw() {
|
||||||
|
guard let commandBuffer = self.commandQueue.makeCommandBuffer() else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
var drawables: [MTLDrawable] = []
|
||||||
|
var takenViewReferences: [ViewReference] = []
|
||||||
|
|
||||||
|
for viewReference in self.viewReferences.copyItems() {
|
||||||
|
guard let videoView = viewReference.view else {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
if !videoView.needsRedraw {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
videoView.needsRedraw = false
|
||||||
|
|
||||||
|
if let drawable = videoView.encode(commandBuffer: commandBuffer) {
|
||||||
|
let numberOfUsedDrawables = videoView.numberOfUsedDrawables
|
||||||
|
let _ = numberOfUsedDrawables.modify {
|
||||||
|
return $0 + 1
|
||||||
|
}
|
||||||
|
takenViewReferences.append(viewReference)
|
||||||
|
|
||||||
|
drawable.addPresentedHandler { _ in
|
||||||
|
let _ = numberOfUsedDrawables.modify {
|
||||||
|
return max(0, $0 - 1)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
drawables.append(drawable)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if drawables.isEmpty {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
if drawables.count > 10 {
|
||||||
|
print("Schedule \(drawables.count) drawables")
|
||||||
|
}
|
||||||
|
|
||||||
|
commandBuffer.addScheduledHandler { _ in
|
||||||
|
for drawable in drawables {
|
||||||
|
drawable.present()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
commandBuffer.commit()
|
||||||
|
}
|
||||||
|
}
|
@ -2595,6 +2595,15 @@ public final class PresentationGroupCallImpl: PresentationGroupCall {
|
|||||||
let videoCapturer = OngoingCallVideoCapturer()
|
let videoCapturer = OngoingCallVideoCapturer()
|
||||||
self.videoCapturer = videoCapturer
|
self.videoCapturer = videoCapturer
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if let videoCapturer = self.videoCapturer {
|
||||||
|
self.requestVideo(capturer: videoCapturer)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func requestVideo(capturer: OngoingCallVideoCapturer) {
|
||||||
|
self.videoCapturer = capturer
|
||||||
|
|
||||||
self.hasVideo = true
|
self.hasVideo = true
|
||||||
if let videoCapturer = self.videoCapturer {
|
if let videoCapturer = self.videoCapturer {
|
||||||
self.genericCallContext?.requestVideo(videoCapturer)
|
self.genericCallContext?.requestVideo(videoCapturer)
|
||||||
@ -3190,6 +3199,10 @@ public final class PresentationGroupCallImpl: PresentationGroupCall {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func video(endpointId: String) -> Signal<OngoingGroupCallContext.VideoFrameData, NoError>? {
|
||||||
|
return self.genericCallContext?.video(endpointId: endpointId)
|
||||||
|
}
|
||||||
|
|
||||||
public func loadMoreMembers(token: String) {
|
public func loadMoreMembers(token: String) {
|
||||||
self.participantsContext?.loadMore(token: token)
|
self.participantsContext?.loadMore(token: token)
|
||||||
}
|
}
|
||||||
|
@ -0,0 +1,144 @@
|
|||||||
|
import Foundation
|
||||||
|
import UIKit
|
||||||
|
import AsyncDisplayKit
|
||||||
|
import Display
|
||||||
|
import SwiftSignalKit
|
||||||
|
import AccountContext
|
||||||
|
import TelegramVoip
|
||||||
|
import AVFoundation
|
||||||
|
|
||||||
|
private func sampleBufferFromPixelBuffer(pixelBuffer: CVPixelBuffer) -> CMSampleBuffer? {
|
||||||
|
var maybeFormat: CMVideoFormatDescription?
|
||||||
|
let status = CMVideoFormatDescriptionCreateForImageBuffer(allocator: kCFAllocatorDefault, imageBuffer: pixelBuffer, formatDescriptionOut: &maybeFormat)
|
||||||
|
if status != noErr {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
guard let format = maybeFormat else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var timingInfo = CMSampleTimingInfo(
|
||||||
|
duration: CMTimeMake(value: 1, timescale: 30),
|
||||||
|
presentationTimeStamp: CMTimeMake(value: 0, timescale: 30),
|
||||||
|
decodeTimeStamp: CMTimeMake(value: 0, timescale: 30)
|
||||||
|
)
|
||||||
|
|
||||||
|
var maybeSampleBuffer: CMSampleBuffer?
|
||||||
|
let bufferStatus = CMSampleBufferCreateReadyWithImageBuffer(allocator: kCFAllocatorDefault, imageBuffer: pixelBuffer, formatDescription: format, sampleTiming: &timingInfo, sampleBufferOut: &maybeSampleBuffer)
|
||||||
|
|
||||||
|
if (bufferStatus != noErr) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
guard let sampleBuffer = maybeSampleBuffer else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
let attachments: NSArray = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer, createIfNecessary: true)! as NSArray
|
||||||
|
let dict: NSMutableDictionary = attachments[0] as! NSMutableDictionary
|
||||||
|
dict[kCMSampleAttachmentKey_DisplayImmediately as NSString] = true as NSNumber
|
||||||
|
|
||||||
|
return sampleBuffer
|
||||||
|
}
|
||||||
|
|
||||||
|
final class SampleBufferVideoRenderingView: UIView, VideoRenderingView {
|
||||||
|
static override var layerClass: AnyClass {
|
||||||
|
return AVSampleBufferDisplayLayer.self
|
||||||
|
}
|
||||||
|
|
||||||
|
private var sampleBufferLayer: AVSampleBufferDisplayLayer {
|
||||||
|
return self.layer as! AVSampleBufferDisplayLayer
|
||||||
|
}
|
||||||
|
|
||||||
|
private var isEnabled: Bool = false
|
||||||
|
|
||||||
|
private var onFirstFrameReceived: ((Float) -> Void)?
|
||||||
|
private var onOrientationUpdated: ((PresentationCallVideoView.Orientation, CGFloat) -> Void)?
|
||||||
|
private var onIsMirroredUpdated: ((Bool) -> Void)?
|
||||||
|
|
||||||
|
private var didReportFirstFrame: Bool = false
|
||||||
|
private var currentOrientation: PresentationCallVideoView.Orientation = .rotation0
|
||||||
|
private var currentAspect: CGFloat = 1.0
|
||||||
|
|
||||||
|
private var disposable: Disposable?
|
||||||
|
|
||||||
|
init(input: Signal<OngoingGroupCallContext.VideoFrameData, NoError>) {
|
||||||
|
super.init(frame: CGRect())
|
||||||
|
|
||||||
|
self.disposable = input.start(next: { [weak self] videoFrameData in
|
||||||
|
Queue.mainQueue().async {
|
||||||
|
self?.addFrame(videoFrameData)
|
||||||
|
}
|
||||||
|
})
|
||||||
|
|
||||||
|
self.sampleBufferLayer.videoGravity = .resize
|
||||||
|
}
|
||||||
|
|
||||||
|
required init?(coder: NSCoder) {
|
||||||
|
fatalError("init(coder:) has not been implemented")
|
||||||
|
}
|
||||||
|
|
||||||
|
deinit {
|
||||||
|
self.disposable?.dispose()
|
||||||
|
}
|
||||||
|
|
||||||
|
private func addFrame(_ videoFrameData: OngoingGroupCallContext.VideoFrameData) {
|
||||||
|
let aspect = CGFloat(videoFrameData.width) / CGFloat(videoFrameData.height)
|
||||||
|
var isAspectUpdated = false
|
||||||
|
if self.currentAspect != aspect {
|
||||||
|
self.currentAspect = aspect
|
||||||
|
isAspectUpdated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
let videoFrameOrientation = PresentationCallVideoView.Orientation(videoFrameData.orientation)
|
||||||
|
var isOrientationUpdated = false
|
||||||
|
if self.currentOrientation != videoFrameOrientation {
|
||||||
|
self.currentOrientation = videoFrameOrientation
|
||||||
|
isOrientationUpdated = true
|
||||||
|
}
|
||||||
|
|
||||||
|
if isAspectUpdated || isOrientationUpdated {
|
||||||
|
self.onOrientationUpdated?(self.currentOrientation, self.currentAspect)
|
||||||
|
}
|
||||||
|
|
||||||
|
if !self.didReportFirstFrame {
|
||||||
|
self.didReportFirstFrame = true
|
||||||
|
self.onFirstFrameReceived?(Float(self.currentAspect))
|
||||||
|
}
|
||||||
|
|
||||||
|
if self.isEnabled {
|
||||||
|
switch videoFrameData.buffer {
|
||||||
|
case let .native(buffer):
|
||||||
|
if let sampleBuffer = sampleBufferFromPixelBuffer(pixelBuffer: buffer.pixelBuffer) {
|
||||||
|
self.sampleBufferLayer.enqueue(sampleBuffer)
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setOnFirstFrameReceived(_ f: @escaping (Float) -> Void) {
|
||||||
|
self.onFirstFrameReceived = f
|
||||||
|
self.didReportFirstFrame = false
|
||||||
|
}
|
||||||
|
|
||||||
|
func setOnOrientationUpdated(_ f: @escaping (PresentationCallVideoView.Orientation, CGFloat) -> Void) {
|
||||||
|
self.onOrientationUpdated = f
|
||||||
|
}
|
||||||
|
|
||||||
|
func getOrientation() -> PresentationCallVideoView.Orientation {
|
||||||
|
return self.currentOrientation
|
||||||
|
}
|
||||||
|
|
||||||
|
func getAspect() -> CGFloat {
|
||||||
|
return self.currentAspect
|
||||||
|
}
|
||||||
|
|
||||||
|
func setOnIsMirroredUpdated(_ f: @escaping (Bool) -> Void) {
|
||||||
|
self.onIsMirroredUpdated = f
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateIsEnabled(_ isEnabled: Bool) {
|
||||||
|
self.isEnabled = isEnabled
|
||||||
|
}
|
||||||
|
}
|
@ -0,0 +1,61 @@
|
|||||||
|
import Foundation
|
||||||
|
import UIKit
|
||||||
|
import AsyncDisplayKit
|
||||||
|
import Display
|
||||||
|
import SwiftSignalKit
|
||||||
|
import AccountContext
|
||||||
|
import TelegramVoip
|
||||||
|
import AVFoundation
|
||||||
|
|
||||||
|
protocol VideoRenderingView: UIView {
|
||||||
|
func setOnFirstFrameReceived(_ f: @escaping (Float) -> Void)
|
||||||
|
func setOnOrientationUpdated(_ f: @escaping (PresentationCallVideoView.Orientation, CGFloat) -> Void)
|
||||||
|
func getOrientation() -> PresentationCallVideoView.Orientation
|
||||||
|
func getAspect() -> CGFloat
|
||||||
|
func setOnIsMirroredUpdated(_ f: @escaping (Bool) -> Void)
|
||||||
|
func updateIsEnabled(_ isEnabled: Bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
class VideoRenderingContext {
|
||||||
|
private var metalContextImpl: Any?
|
||||||
|
|
||||||
|
@available(iOS 13.0, *)
|
||||||
|
var metalContext: MetalVideoRenderingContext {
|
||||||
|
if let value = self.metalContextImpl as? MetalVideoRenderingContext {
|
||||||
|
return value
|
||||||
|
} else {
|
||||||
|
let value = MetalVideoRenderingContext()!
|
||||||
|
self.metalContextImpl = value
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeView(input: Signal<OngoingGroupCallContext.VideoFrameData, NoError>, blur: Bool) -> VideoRenderingView? {
|
||||||
|
if #available(iOS 13.0, *) {
|
||||||
|
return MetalVideoRenderingView(renderingContext: self.metalContext, input: input, blur: blur)
|
||||||
|
} else {
|
||||||
|
return SampleBufferVideoRenderingView(input: input)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func updateVisibility(isVisible: Bool) {
|
||||||
|
if #available(iOS 13.0, *) {
|
||||||
|
self.metalContext.updateVisibility(isVisible: isVisible)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
extension PresentationCallVideoView.Orientation {
|
||||||
|
init(_ orientation: OngoingCallVideoOrientation) {
|
||||||
|
switch orientation {
|
||||||
|
case .rotation0:
|
||||||
|
self = .rotation0
|
||||||
|
case .rotation90:
|
||||||
|
self = .rotation90
|
||||||
|
case .rotation180:
|
||||||
|
self = .rotation180
|
||||||
|
case .rotation270:
|
||||||
|
self = .rotation270
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
@ -918,6 +918,7 @@ public final class VoiceChatController: ViewController {
|
|||||||
private var requestedVideoSources = Set<String>()
|
private var requestedVideoSources = Set<String>()
|
||||||
private var requestedVideoChannels: [PresentationGroupCallRequestedVideo] = []
|
private var requestedVideoChannels: [PresentationGroupCallRequestedVideo] = []
|
||||||
|
|
||||||
|
private var videoRenderingContext: VideoRenderingContext
|
||||||
private var videoNodes: [String: GroupVideoNode] = [:]
|
private var videoNodes: [String: GroupVideoNode] = [:]
|
||||||
private var wideVideoNodes = Set<String>()
|
private var wideVideoNodes = Set<String>()
|
||||||
private var videoOrder: [String] = []
|
private var videoOrder: [String] = []
|
||||||
@ -972,6 +973,8 @@ public final class VoiceChatController: ViewController {
|
|||||||
self.context = call.accountContext
|
self.context = call.accountContext
|
||||||
self.call = call
|
self.call = call
|
||||||
|
|
||||||
|
self.videoRenderingContext = VideoRenderingContext()
|
||||||
|
|
||||||
self.isScheduling = call.schedulePending
|
self.isScheduling = call.schedulePending
|
||||||
|
|
||||||
let presentationData = sharedContext.currentPresentationData.with { $0 }
|
let presentationData = sharedContext.currentPresentationData.with { $0 }
|
||||||
@ -2332,13 +2335,19 @@ public final class VoiceChatController: ViewController {
|
|||||||
}))
|
}))
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
strongSelf.call.makeIncomingVideoView(endpointId: endpointId, requestClone: GroupVideoNode.useBlurTransparency, completion: { videoView, backdropVideoView in
|
if let input = (strongSelf.call as! PresentationGroupCallImpl).video(endpointId: endpointId) {
|
||||||
|
if let videoView = strongSelf.videoRenderingContext.makeView(input: input, blur: false) {
|
||||||
|
completion(GroupVideoNode(videoView: videoView, backdropVideoView: strongSelf.videoRenderingContext.makeView(input: input, blur: true)))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*strongSelf.call.makeIncomingVideoView(endpointId: endpointId, requestClone: GroupVideoNode.useBlurTransparency, completion: { videoView, backdropVideoView in
|
||||||
if let videoView = videoView {
|
if let videoView = videoView {
|
||||||
completion(GroupVideoNode(videoView: videoView, backdropVideoView: backdropVideoView))
|
completion(GroupVideoNode(videoView: videoView, backdropVideoView: backdropVideoView))
|
||||||
} else {
|
} else {
|
||||||
completion(nil)
|
completion(nil)
|
||||||
}
|
}
|
||||||
})
|
})*/
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -3482,7 +3491,29 @@ public final class VoiceChatController: ViewController {
|
|||||||
guard let strongSelf = self, ready else {
|
guard let strongSelf = self, ready else {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
strongSelf.call.makeOutgoingVideoView(requestClone: false, completion: { [weak self] view, _ in
|
let videoCapturer = OngoingCallVideoCapturer()
|
||||||
|
let input = videoCapturer.video()
|
||||||
|
if let videoView = strongSelf.videoRenderingContext.makeView(input: input, blur: false) {
|
||||||
|
let cameraNode = GroupVideoNode(videoView: videoView, backdropVideoView: nil)
|
||||||
|
let controller = VoiceChatCameraPreviewController(context: strongSelf.context, cameraNode: cameraNode, shareCamera: { [weak self] _, unmuted in
|
||||||
|
if let strongSelf = self {
|
||||||
|
strongSelf.call.setIsMuted(action: unmuted ? .unmuted : .muted(isPushToTalkActive: false))
|
||||||
|
(strongSelf.call as! PresentationGroupCallImpl).requestVideo(capturer: videoCapturer)
|
||||||
|
|
||||||
|
if let (layout, navigationHeight) = strongSelf.validLayout {
|
||||||
|
strongSelf.animatingButtonsSwap = true
|
||||||
|
strongSelf.containerLayoutUpdated(layout, navigationHeight: navigationHeight, transition: .animated(duration: 0.4, curve: .spring))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}, switchCamera: { [weak self] in
|
||||||
|
Queue.mainQueue().after(0.1) {
|
||||||
|
self?.call.switchVideoCamera()
|
||||||
|
}
|
||||||
|
})
|
||||||
|
strongSelf.controller?.present(controller, in: .window(.root))
|
||||||
|
}
|
||||||
|
|
||||||
|
/*strongSelf.call.makeOutgoingVideoView(requestClone: false, completion: { [weak self] view, _ in
|
||||||
guard let strongSelf = self, let view = view else {
|
guard let strongSelf = self, let view = view else {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -3503,7 +3534,7 @@ public final class VoiceChatController: ViewController {
|
|||||||
}
|
}
|
||||||
})
|
})
|
||||||
strongSelf.controller?.present(controller, in: .window(.root))
|
strongSelf.controller?.present(controller, in: .window(.root))
|
||||||
})
|
})*/
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -4547,12 +4578,18 @@ public final class VoiceChatController: ViewController {
|
|||||||
|
|
||||||
private var appIsActive = true {
|
private var appIsActive = true {
|
||||||
didSet {
|
didSet {
|
||||||
|
if self.appIsActive != oldValue {
|
||||||
self.updateVisibility()
|
self.updateVisibility()
|
||||||
|
self.updateRequestedVideoChannels()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
private var visibility = false {
|
private var visibility = false {
|
||||||
didSet {
|
didSet {
|
||||||
|
if self.visibility != oldValue {
|
||||||
self.updateVisibility()
|
self.updateVisibility()
|
||||||
|
self.updateRequestedVideoChannels()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4574,6 +4611,8 @@ public final class VoiceChatController: ViewController {
|
|||||||
itemNode.gridVisibility = visible
|
itemNode.gridVisibility = visible
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
self.videoRenderingContext.updateVisibility(isVisible: visible)
|
||||||
}
|
}
|
||||||
|
|
||||||
func animateIn() {
|
func animateIn() {
|
||||||
@ -5201,7 +5240,63 @@ public final class VoiceChatController: ViewController {
|
|||||||
|
|
||||||
if !self.requestedVideoSources.contains(channel.endpointId) {
|
if !self.requestedVideoSources.contains(channel.endpointId) {
|
||||||
self.requestedVideoSources.insert(channel.endpointId)
|
self.requestedVideoSources.insert(channel.endpointId)
|
||||||
self.call.makeIncomingVideoView(endpointId: channel.endpointId, requestClone: GroupVideoNode.useBlurTransparency, completion: { [weak self] videoView, backdropVideoView in
|
|
||||||
|
let input = (self.call as! PresentationGroupCallImpl).video(endpointId: channel.endpointId)
|
||||||
|
if let input = input, let videoView = self.videoRenderingContext.makeView(input: input, blur: false) {
|
||||||
|
let videoNode = GroupVideoNode(videoView: videoView, backdropVideoView: self.videoRenderingContext.makeView(input: input, blur: true))
|
||||||
|
|
||||||
|
self.readyVideoDisposables.set((combineLatest(videoNode.ready, .single(false) |> then(.single(true) |> delay(10.0, queue: Queue.mainQueue())))
|
||||||
|
|> deliverOnMainQueue
|
||||||
|
).start(next: { [weak self, weak videoNode] ready, timeouted in
|
||||||
|
if let strongSelf = self, let videoNode = videoNode {
|
||||||
|
Queue.mainQueue().after(0.1) {
|
||||||
|
if timeouted && !ready {
|
||||||
|
strongSelf.timeoutedEndpointIds.insert(channel.endpointId)
|
||||||
|
strongSelf.readyVideoEndpointIds.remove(channel.endpointId)
|
||||||
|
strongSelf.readyVideoEndpointIdsPromise.set(strongSelf.readyVideoEndpointIds)
|
||||||
|
strongSelf.wideVideoNodes.remove(channel.endpointId)
|
||||||
|
|
||||||
|
strongSelf.updateMembers()
|
||||||
|
} else if ready {
|
||||||
|
strongSelf.readyVideoEndpointIds.insert(channel.endpointId)
|
||||||
|
strongSelf.readyVideoEndpointIdsPromise.set(strongSelf.readyVideoEndpointIds)
|
||||||
|
strongSelf.timeoutedEndpointIds.remove(channel.endpointId)
|
||||||
|
if videoNode.aspectRatio <= 0.77 {
|
||||||
|
strongSelf.wideVideoNodes.insert(channel.endpointId)
|
||||||
|
} else {
|
||||||
|
strongSelf.wideVideoNodes.remove(channel.endpointId)
|
||||||
|
}
|
||||||
|
strongSelf.updateMembers()
|
||||||
|
|
||||||
|
if let (layout, _) = strongSelf.validLayout, case .compact = layout.metrics.widthClass {
|
||||||
|
if let interaction = strongSelf.itemInteraction {
|
||||||
|
loop: for i in 0 ..< strongSelf.currentFullscreenEntries.count {
|
||||||
|
let entry = strongSelf.currentFullscreenEntries[i]
|
||||||
|
switch entry {
|
||||||
|
case let .peer(peerEntry, _):
|
||||||
|
if peerEntry.effectiveVideoEndpointId == channel.endpointId {
|
||||||
|
let presentationData = strongSelf.presentationData.withUpdated(theme: strongSelf.darkTheme)
|
||||||
|
strongSelf.fullscreenListNode.transaction(deleteIndices: [], insertIndicesAndItems: [], updateIndicesAndItems: [ListViewUpdateItem(index: i, previousIndex: i, item: entry.fullscreenItem(context: strongSelf.context, presentationData: presentationData, interaction: interaction), directionHint: nil)], options: [.Synchronous], updateOpaqueState: nil)
|
||||||
|
break loop
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}), forKey: channel.endpointId)
|
||||||
|
self.videoNodes[channel.endpointId] = videoNode
|
||||||
|
|
||||||
|
if let _ = self.validLayout {
|
||||||
|
self.updateMembers()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*self.call.makeIncomingVideoView(endpointId: channel.endpointId, requestClone: GroupVideoNode.useBlurTransparency, completion: { [weak self] videoView, backdropVideoView in
|
||||||
Queue.mainQueue().async {
|
Queue.mainQueue().async {
|
||||||
guard let strongSelf = self, let videoView = videoView else {
|
guard let strongSelf = self, let videoView = videoView else {
|
||||||
return
|
return
|
||||||
@ -5258,7 +5353,7 @@ public final class VoiceChatController: ViewController {
|
|||||||
strongSelf.updateMembers()
|
strongSelf.updateMembers()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})*/
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5366,7 +5461,9 @@ public final class VoiceChatController: ViewController {
|
|||||||
|
|
||||||
private func updateRequestedVideoChannels() {
|
private func updateRequestedVideoChannels() {
|
||||||
Queue.mainQueue().after(0.3) {
|
Queue.mainQueue().after(0.3) {
|
||||||
self.call.setRequestedVideoList(items: self.requestedVideoChannels)
|
let enableVideo = self.appIsActive && self.visibility
|
||||||
|
|
||||||
|
self.call.setRequestedVideoList(items: enableVideo ? self.requestedVideoChannels : [])
|
||||||
self.filterRequestedVideoChannels(channels: self.requestedVideoChannels)
|
self.filterRequestedVideoChannels(channels: self.requestedVideoChannels)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -2354,7 +2354,9 @@ private final class PeerInfoScreenNode: ViewControllerTracingNode, UIScrollViewD
|
|||||||
self?.updateProfileVideo(image, asset: asset, adjustments: adjustments)
|
self?.updateProfileVideo(image, asset: asset, adjustments: adjustments)
|
||||||
}
|
}
|
||||||
galleryController.removedEntry = { [weak self] entry in
|
galleryController.removedEntry = { [weak self] entry in
|
||||||
let _ = self?.headerNode.avatarListNode.listContainerNode.deleteItem(PeerInfoAvatarListItem(entry: entry))
|
if let item = PeerInfoAvatarListItem(entry: entry) {
|
||||||
|
let _ = self?.headerNode.avatarListNode.listContainerNode.deleteItem(item)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
strongSelf.hiddenAvatarRepresentationDisposable.set((galleryController.hiddenMedia |> deliverOnMainQueue).start(next: { entry in
|
strongSelf.hiddenAvatarRepresentationDisposable.set((galleryController.hiddenMedia |> deliverOnMainQueue).start(next: { entry in
|
||||||
self?.headerNode.updateAvatarIsHidden(entry: entry)
|
self?.headerNode.updateAvatarIsHidden(entry: entry)
|
||||||
|
@ -219,6 +219,115 @@ public final class OngoingGroupCallContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public final class VideoFrameData {
|
||||||
|
public final class NativeBuffer {
|
||||||
|
public let pixelBuffer: CVPixelBuffer
|
||||||
|
|
||||||
|
init(pixelBuffer: CVPixelBuffer) {
|
||||||
|
self.pixelBuffer = pixelBuffer
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public final class NV12Buffer {
|
||||||
|
private let wrapped: CallVideoFrameNV12Buffer
|
||||||
|
|
||||||
|
public var width: Int {
|
||||||
|
return Int(self.wrapped.width)
|
||||||
|
}
|
||||||
|
|
||||||
|
public var height: Int {
|
||||||
|
return Int(self.wrapped.height)
|
||||||
|
}
|
||||||
|
|
||||||
|
public var y: Data {
|
||||||
|
return self.wrapped.y
|
||||||
|
}
|
||||||
|
|
||||||
|
public var strideY: Int {
|
||||||
|
return Int(self.wrapped.strideY)
|
||||||
|
}
|
||||||
|
|
||||||
|
public var uv: Data {
|
||||||
|
return self.wrapped.uv
|
||||||
|
}
|
||||||
|
|
||||||
|
public var strideUV: Int {
|
||||||
|
return Int(self.wrapped.strideUV)
|
||||||
|
}
|
||||||
|
|
||||||
|
init(wrapped: CallVideoFrameNV12Buffer) {
|
||||||
|
self.wrapped = wrapped
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public final class I420Buffer {
|
||||||
|
private let wrapped: CallVideoFrameI420Buffer
|
||||||
|
|
||||||
|
public var width: Int {
|
||||||
|
return Int(self.wrapped.width)
|
||||||
|
}
|
||||||
|
|
||||||
|
public var height: Int {
|
||||||
|
return Int(self.wrapped.height)
|
||||||
|
}
|
||||||
|
|
||||||
|
public var y: Data {
|
||||||
|
return self.wrapped.y
|
||||||
|
}
|
||||||
|
|
||||||
|
public var strideY: Int {
|
||||||
|
return Int(self.wrapped.strideY)
|
||||||
|
}
|
||||||
|
|
||||||
|
public var u: Data {
|
||||||
|
return self.wrapped.u
|
||||||
|
}
|
||||||
|
|
||||||
|
public var strideU: Int {
|
||||||
|
return Int(self.wrapped.strideU)
|
||||||
|
}
|
||||||
|
|
||||||
|
public var v: Data {
|
||||||
|
return self.wrapped.v
|
||||||
|
}
|
||||||
|
|
||||||
|
public var strideV: Int {
|
||||||
|
return Int(self.wrapped.strideV)
|
||||||
|
}
|
||||||
|
|
||||||
|
init(wrapped: CallVideoFrameI420Buffer) {
|
||||||
|
self.wrapped = wrapped
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
public enum Buffer {
|
||||||
|
case native(NativeBuffer)
|
||||||
|
case nv12(NV12Buffer)
|
||||||
|
case i420(I420Buffer)
|
||||||
|
}
|
||||||
|
|
||||||
|
public let buffer: Buffer
|
||||||
|
public let width: Int
|
||||||
|
public let height: Int
|
||||||
|
public let orientation: OngoingCallVideoOrientation
|
||||||
|
|
||||||
|
init(frameData: CallVideoFrameData) {
|
||||||
|
if let nativeBuffer = frameData.buffer as? CallVideoFrameNativePixelBuffer {
|
||||||
|
self.buffer = .native(NativeBuffer(pixelBuffer: nativeBuffer.pixelBuffer))
|
||||||
|
} else if let nv12Buffer = frameData.buffer as? CallVideoFrameNV12Buffer {
|
||||||
|
self.buffer = .nv12(NV12Buffer(wrapped: nv12Buffer))
|
||||||
|
} else if let i420Buffer = frameData.buffer as? CallVideoFrameI420Buffer {
|
||||||
|
self.buffer = .i420(I420Buffer(wrapped: i420Buffer))
|
||||||
|
} else {
|
||||||
|
preconditionFailure()
|
||||||
|
}
|
||||||
|
|
||||||
|
self.width = Int(frameData.width)
|
||||||
|
self.height = Int(frameData.height)
|
||||||
|
self.orientation = OngoingCallVideoOrientation(frameData.orientation)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private final class Impl {
|
private final class Impl {
|
||||||
let queue: Queue
|
let queue: Queue
|
||||||
let context: GroupCallThreadLocalContext
|
let context: GroupCallThreadLocalContext
|
||||||
@ -615,6 +724,27 @@ public final class OngoingGroupCallContext {
|
|||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func video(endpointId: String) -> Signal<OngoingGroupCallContext.VideoFrameData, NoError> {
|
||||||
|
let queue = self.queue
|
||||||
|
return Signal { [weak self] subscriber in
|
||||||
|
let disposable = MetaDisposable()
|
||||||
|
|
||||||
|
queue.async {
|
||||||
|
guard let strongSelf = self else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
let innerDisposable = strongSelf.context.addVideoOutput(withEndpointId: endpointId) { videoFrameData in
|
||||||
|
subscriber.putNext(OngoingGroupCallContext.VideoFrameData(frameData: videoFrameData))
|
||||||
|
}
|
||||||
|
disposable.set(ActionDisposable {
|
||||||
|
innerDisposable.dispose()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return disposable
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func addExternalAudioData(data: Data) {
|
func addExternalAudioData(data: Data) {
|
||||||
self.context.addExternalAudioData(data)
|
self.context.addExternalAudioData(data)
|
||||||
}
|
}
|
||||||
@ -778,6 +908,18 @@ public final class OngoingGroupCallContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public func video(endpointId: String) -> Signal<OngoingGroupCallContext.VideoFrameData, NoError> {
|
||||||
|
return Signal { subscriber in
|
||||||
|
let disposable = MetaDisposable()
|
||||||
|
self.impl.with { impl in
|
||||||
|
disposable.set(impl.video(endpointId: endpointId).start(next: { value in
|
||||||
|
subscriber.putNext(value)
|
||||||
|
}))
|
||||||
|
}
|
||||||
|
return disposable
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public func addExternalAudioData(data: Data) {
|
public func addExternalAudioData(data: Data) {
|
||||||
self.impl.with { impl in
|
self.impl.with { impl in
|
||||||
impl.addExternalAudioData(data: data)
|
impl.addExternalAudioData(data: data)
|
||||||
|
@ -451,6 +451,27 @@ public final class OngoingCallVideoCapturer {
|
|||||||
}
|
}
|
||||||
self.impl.submitPixelBuffer(pixelBuffer, rotation: videoRotation.orientation)
|
self.impl.submitPixelBuffer(pixelBuffer, rotation: videoRotation.orientation)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public func video() -> Signal<OngoingGroupCallContext.VideoFrameData, NoError> {
|
||||||
|
let queue = Queue.mainQueue()
|
||||||
|
return Signal { [weak self] subscriber in
|
||||||
|
let disposable = MetaDisposable()
|
||||||
|
|
||||||
|
queue.async {
|
||||||
|
guard let strongSelf = self else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
let innerDisposable = strongSelf.impl.addVideoOutput { videoFrameData in
|
||||||
|
subscriber.putNext(OngoingGroupCallContext.VideoFrameData(frameData: videoFrameData))
|
||||||
|
}
|
||||||
|
disposable.set(ActionDisposable {
|
||||||
|
innerDisposable.dispose()
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return disposable
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
extension OngoingCallThreadLocalContextWebrtc: OngoingCallThreadLocalContextProtocol {
|
extension OngoingCallThreadLocalContextWebrtc: OngoingCallThreadLocalContextProtocol {
|
||||||
|
@ -110,6 +110,60 @@ typedef NS_ENUM(int32_t, OngoingCallDataSavingWebrtc) {
|
|||||||
#endif
|
#endif
|
||||||
@end
|
@end
|
||||||
|
|
||||||
|
@interface GroupCallDisposable : NSObject
|
||||||
|
|
||||||
|
- (void)dispose;
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@protocol CallVideoFrameBuffer
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@interface CallVideoFrameNativePixelBuffer : NSObject<CallVideoFrameBuffer>
|
||||||
|
|
||||||
|
@property (nonatomic, readonly) CVPixelBufferRef _Nonnull pixelBuffer;
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@interface CallVideoFrameNV12Buffer : NSObject<CallVideoFrameBuffer>
|
||||||
|
|
||||||
|
@property (nonatomic, readonly) int width;
|
||||||
|
@property (nonatomic, readonly) int height;
|
||||||
|
|
||||||
|
@property (nonatomic, strong, readonly) NSData * _Nonnull y;
|
||||||
|
@property (nonatomic, readonly) int strideY;
|
||||||
|
|
||||||
|
@property (nonatomic, strong, readonly) NSData * _Nonnull uv;
|
||||||
|
@property (nonatomic, readonly) int strideUV;
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@interface CallVideoFrameI420Buffer : NSObject<CallVideoFrameBuffer>
|
||||||
|
|
||||||
|
@property (nonatomic, readonly) int width;
|
||||||
|
@property (nonatomic, readonly) int height;
|
||||||
|
|
||||||
|
@property (nonatomic, strong, readonly) NSData * _Nonnull y;
|
||||||
|
@property (nonatomic, readonly) int strideY;
|
||||||
|
|
||||||
|
@property (nonatomic, strong, readonly) NSData * _Nonnull u;
|
||||||
|
@property (nonatomic, readonly) int strideU;
|
||||||
|
|
||||||
|
@property (nonatomic, strong, readonly) NSData * _Nonnull v;
|
||||||
|
@property (nonatomic, readonly) int strideV;
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@interface CallVideoFrameData : NSObject
|
||||||
|
|
||||||
|
@property (nonatomic, strong, readonly) id<CallVideoFrameBuffer> _Nonnull buffer;
|
||||||
|
@property (nonatomic, readonly) int width;
|
||||||
|
@property (nonatomic, readonly) int height;
|
||||||
|
@property (nonatomic, readonly) OngoingCallVideoOrientationWebrtc orientation;
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
@interface OngoingCallThreadLocalContextVideoCapturer : NSObject
|
@interface OngoingCallThreadLocalContextVideoCapturer : NSObject
|
||||||
|
|
||||||
- (instancetype _Nonnull)initWithDeviceId:(NSString * _Nonnull)deviceId keepLandscape:(bool)keepLandscape;
|
- (instancetype _Nonnull)initWithDeviceId:(NSString * _Nonnull)deviceId keepLandscape:(bool)keepLandscape;
|
||||||
@ -131,6 +185,8 @@ typedef NS_ENUM(int32_t, OngoingCallDataSavingWebrtc) {
|
|||||||
- (void)submitPixelBuffer:(CVPixelBufferRef _Nonnull)pixelBuffer rotation:(OngoingCallVideoOrientationWebrtc)rotation;
|
- (void)submitPixelBuffer:(CVPixelBufferRef _Nonnull)pixelBuffer rotation:(OngoingCallVideoOrientationWebrtc)rotation;
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
- (GroupCallDisposable * _Nonnull)addVideoOutput:(void (^_Nonnull)(CallVideoFrameData * _Nonnull))sink;
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
||||||
@interface OngoingCallThreadLocalContextWebrtc : NSObject
|
@interface OngoingCallThreadLocalContextWebrtc : NSObject
|
||||||
@ -291,6 +347,7 @@ typedef NS_ENUM(int32_t, OngoingGroupCallRequestedVideoQuality) {
|
|||||||
- (void)switchAudioOutput:(NSString * _Nonnull)deviceId;
|
- (void)switchAudioOutput:(NSString * _Nonnull)deviceId;
|
||||||
- (void)switchAudioInput:(NSString * _Nonnull)deviceId;
|
- (void)switchAudioInput:(NSString * _Nonnull)deviceId;
|
||||||
- (void)makeIncomingVideoViewWithEndpointId:(NSString * _Nonnull)endpointId requestClone:(bool)requestClone completion:(void (^_Nonnull)(UIView<OngoingCallThreadLocalContextWebrtcVideoView> * _Nullable, UIView<OngoingCallThreadLocalContextWebrtcVideoView> * _Nullable))completion;
|
- (void)makeIncomingVideoViewWithEndpointId:(NSString * _Nonnull)endpointId requestClone:(bool)requestClone completion:(void (^_Nonnull)(UIView<OngoingCallThreadLocalContextWebrtcVideoView> * _Nullable, UIView<OngoingCallThreadLocalContextWebrtcVideoView> * _Nullable))completion;
|
||||||
|
- (GroupCallDisposable * _Nonnull)addVideoOutputWithEndpointId:(NSString * _Nonnull)endpointId sink:(void (^_Nonnull)(CallVideoFrameData * _Nonnull))sink;
|
||||||
|
|
||||||
- (void)addExternalAudioData:(NSData * _Nonnull)data;
|
- (void)addExternalAudioData:(NSData * _Nonnull)data;
|
||||||
|
|
||||||
|
@ -32,6 +32,9 @@
|
|||||||
|
|
||||||
#import "VideoCaptureInterfaceImpl.h"
|
#import "VideoCaptureInterfaceImpl.h"
|
||||||
|
|
||||||
|
#include "sdk/objc/native/src/objc_frame_buffer.h"
|
||||||
|
#import "components/video_frame_buffer/RTCCVPixelBuffer.h"
|
||||||
|
|
||||||
@implementation OngoingCallConnectionDescriptionWebrtc
|
@implementation OngoingCallConnectionDescriptionWebrtc
|
||||||
|
|
||||||
- (instancetype _Nonnull)initWithConnectionId:(int64_t)connectionId hasStun:(bool)hasStun hasTurn:(bool)hasTurn ip:(NSString * _Nonnull)ip port:(int32_t)port username:(NSString * _Nonnull)username password:(NSString * _Nonnull)password {
|
- (instancetype _Nonnull)initWithConnectionId:(int64_t)connectionId hasStun:(bool)hasStun hasTurn:(bool)hasTurn ip:(NSString * _Nonnull)ip port:(int32_t)port username:(NSString * _Nonnull)username password:(NSString * _Nonnull)password {
|
||||||
@ -221,10 +224,216 @@
|
|||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
||||||
|
@interface GroupCallDisposable () {
|
||||||
|
dispatch_block_t _block;
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation GroupCallDisposable
|
||||||
|
|
||||||
|
- (instancetype)initWithBlock:(dispatch_block_t _Nonnull)block {
|
||||||
|
self = [super init];
|
||||||
|
if (self != nil) {
|
||||||
|
_block = [block copy];
|
||||||
|
}
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)dispose {
|
||||||
|
if (_block) {
|
||||||
|
_block();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation CallVideoFrameNativePixelBuffer
|
||||||
|
|
||||||
|
- (instancetype)initWithPixelBuffer:(CVPixelBufferRef)pixelBuffer {
|
||||||
|
self = [super init];
|
||||||
|
if (self != nil) {
|
||||||
|
assert(pixelBuffer != nil);
|
||||||
|
|
||||||
|
_pixelBuffer = CVPixelBufferRetain(pixelBuffer);
|
||||||
|
}
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)dealloc {
|
||||||
|
CVPixelBufferRelease(_pixelBuffer);
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation CallVideoFrameNV12Buffer
|
||||||
|
|
||||||
|
- (instancetype)initWithBuffer:(rtc::scoped_refptr<webrtc::NV12BufferInterface>)nv12Buffer {
|
||||||
|
self = [super init];
|
||||||
|
if (self != nil) {
|
||||||
|
_width = nv12Buffer->width();
|
||||||
|
_height = nv12Buffer->height();
|
||||||
|
|
||||||
|
_strideY = nv12Buffer->StrideY();
|
||||||
|
_strideUV = nv12Buffer->StrideUV();
|
||||||
|
|
||||||
|
_y = [[NSData alloc] initWithBytesNoCopy:(void *)nv12Buffer->DataY() length:nv12Buffer->StrideY() * _height deallocator:^(__unused void * _Nonnull bytes, __unused NSUInteger length) {
|
||||||
|
nv12Buffer.get();
|
||||||
|
}];
|
||||||
|
|
||||||
|
_uv = [[NSData alloc] initWithBytesNoCopy:(void *)nv12Buffer->DataUV() length:nv12Buffer->StrideUV() * _height deallocator:^(__unused void * _Nonnull bytes, __unused NSUInteger length) {
|
||||||
|
nv12Buffer.get();
|
||||||
|
}];
|
||||||
|
}
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation CallVideoFrameI420Buffer
|
||||||
|
|
||||||
|
- (instancetype)initWithBuffer:(rtc::scoped_refptr<webrtc::I420BufferInterface>)i420Buffer {
|
||||||
|
self = [super init];
|
||||||
|
if (self != nil) {
|
||||||
|
_width = i420Buffer->width();
|
||||||
|
_height = i420Buffer->height();
|
||||||
|
|
||||||
|
_strideY = i420Buffer->StrideY();
|
||||||
|
_strideU = i420Buffer->StrideU();
|
||||||
|
_strideV = i420Buffer->StrideV();
|
||||||
|
|
||||||
|
_y = [[NSData alloc] initWithBytesNoCopy:(void *)i420Buffer->DataY() length:i420Buffer->StrideY() * _height deallocator:^(__unused void * _Nonnull bytes, __unused NSUInteger length) {
|
||||||
|
i420Buffer.get();
|
||||||
|
}];
|
||||||
|
|
||||||
|
_u = [[NSData alloc] initWithBytesNoCopy:(void *)i420Buffer->DataU() length:i420Buffer->StrideU() * _height deallocator:^(__unused void * _Nonnull bytes, __unused NSUInteger length) {
|
||||||
|
i420Buffer.get();
|
||||||
|
}];
|
||||||
|
|
||||||
|
_v = [[NSData alloc] initWithBytesNoCopy:(void *)i420Buffer->DataV() length:i420Buffer->StrideV() * _height deallocator:^(__unused void * _Nonnull bytes, __unused NSUInteger length) {
|
||||||
|
i420Buffer.get();
|
||||||
|
}];
|
||||||
|
}
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@interface CallVideoFrameData () {
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation CallVideoFrameData
|
||||||
|
|
||||||
|
- (instancetype)initWithBuffer:(id<CallVideoFrameBuffer>)buffer frame:(webrtc::VideoFrame const &)frame {
|
||||||
|
self = [super init];
|
||||||
|
if (self != nil) {
|
||||||
|
_buffer = buffer;
|
||||||
|
|
||||||
|
_width = frame.width();
|
||||||
|
_height = frame.height();
|
||||||
|
|
||||||
|
switch (frame.rotation()) {
|
||||||
|
case webrtc::kVideoRotation_0: {
|
||||||
|
_orientation = OngoingCallVideoOrientation0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case webrtc::kVideoRotation_90: {
|
||||||
|
_orientation = OngoingCallVideoOrientation90;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case webrtc::kVideoRotation_180: {
|
||||||
|
_orientation = OngoingCallVideoOrientation180;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
case webrtc::kVideoRotation_270: {
|
||||||
|
_orientation = OngoingCallVideoOrientation270;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
default: {
|
||||||
|
_orientation = OngoingCallVideoOrientation0;
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
|
||||||
|
class GroupCallVideoSinkAdapter : public rtc::VideoSinkInterface<webrtc::VideoFrame> {
|
||||||
|
public:
|
||||||
|
GroupCallVideoSinkAdapter(void (^frameReceived)(webrtc::VideoFrame const &)) {
|
||||||
|
_frameReceived = [frameReceived copy];
|
||||||
|
}
|
||||||
|
|
||||||
|
void OnFrame(const webrtc::VideoFrame& nativeVideoFrame) override {
|
||||||
|
@autoreleasepool {
|
||||||
|
if (_frameReceived) {
|
||||||
|
_frameReceived(nativeVideoFrame);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private:
|
||||||
|
void (^_frameReceived)(webrtc::VideoFrame const &);
|
||||||
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
@interface GroupCallVideoSink : NSObject {
|
||||||
|
std::shared_ptr<GroupCallVideoSinkAdapter> _adapter;
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation GroupCallVideoSink
|
||||||
|
|
||||||
|
- (instancetype)initWithSink:(void (^_Nonnull)(CallVideoFrameData * _Nonnull))sink {
|
||||||
|
self = [super init];
|
||||||
|
if (self != nil) {
|
||||||
|
void (^storedSink)(CallVideoFrameData * _Nonnull) = [sink copy];
|
||||||
|
|
||||||
|
_adapter.reset(new GroupCallVideoSinkAdapter(^(webrtc::VideoFrame const &videoFrame) {
|
||||||
|
id<CallVideoFrameBuffer> mappedBuffer = nil;
|
||||||
|
|
||||||
|
if (videoFrame.video_frame_buffer()->type() == webrtc::VideoFrameBuffer::Type::kNative) {
|
||||||
|
id<RTC_OBJC_TYPE(RTCVideoFrameBuffer)> nativeBuffer = static_cast<webrtc::ObjCFrameBuffer *>(videoFrame.video_frame_buffer().get())->wrapped_frame_buffer();
|
||||||
|
if ([nativeBuffer isKindOfClass:[RTC_OBJC_TYPE(RTCCVPixelBuffer) class]]) {
|
||||||
|
RTCCVPixelBuffer *pixelBuffer = (RTCCVPixelBuffer *)nativeBuffer;
|
||||||
|
mappedBuffer = [[CallVideoFrameNativePixelBuffer alloc] initWithPixelBuffer:pixelBuffer.pixelBuffer];
|
||||||
|
}
|
||||||
|
} else if (videoFrame.video_frame_buffer()->type() == webrtc::VideoFrameBuffer::Type::kNV12) {
|
||||||
|
rtc::scoped_refptr<webrtc::NV12BufferInterface> nv12Buffer = (webrtc::NV12BufferInterface *)videoFrame.video_frame_buffer().get();
|
||||||
|
mappedBuffer = [[CallVideoFrameNV12Buffer alloc] initWithBuffer:nv12Buffer];
|
||||||
|
} else if (videoFrame.video_frame_buffer()->type() == webrtc::VideoFrameBuffer::Type::kI420) {
|
||||||
|
rtc::scoped_refptr<webrtc::I420BufferInterface> i420Buffer = (webrtc::I420BufferInterface *)videoFrame.video_frame_buffer().get();
|
||||||
|
mappedBuffer = [[CallVideoFrameI420Buffer alloc] initWithBuffer:i420Buffer];
|
||||||
|
}
|
||||||
|
|
||||||
|
if (storedSink && mappedBuffer) {
|
||||||
|
storedSink([[CallVideoFrameData alloc] initWithBuffer:mappedBuffer frame:videoFrame]);
|
||||||
|
}
|
||||||
|
}));
|
||||||
|
}
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (std::shared_ptr<rtc::VideoSinkInterface<webrtc::VideoFrame>>)sink {
|
||||||
|
return _adapter;
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
@interface OngoingCallThreadLocalContextVideoCapturer () {
|
@interface OngoingCallThreadLocalContextVideoCapturer () {
|
||||||
bool _keepLandscape;
|
bool _keepLandscape;
|
||||||
std::shared_ptr<std::vector<uint8_t>> _croppingBuffer;
|
std::shared_ptr<std::vector<uint8_t>> _croppingBuffer;
|
||||||
|
|
||||||
|
int _nextSinkId;
|
||||||
|
NSMutableDictionary<NSNumber *, GroupCallVideoSink *> *_sinks;
|
||||||
}
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
@ -312,6 +521,32 @@ tgcalls::VideoCaptureInterfaceObject *GetVideoCaptureAssumingSameThread(tgcalls:
|
|||||||
|
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
- (GroupCallDisposable * _Nonnull)addVideoOutput:(void (^_Nonnull)(CallVideoFrameData * _Nonnull))sink {
|
||||||
|
int sinkId = _nextSinkId;
|
||||||
|
_nextSinkId += 1;
|
||||||
|
|
||||||
|
GroupCallVideoSink *storedSink = [[GroupCallVideoSink alloc] initWithSink:sink];
|
||||||
|
_sinks[@(sinkId)] = storedSink;
|
||||||
|
|
||||||
|
auto sinkReference = [storedSink sink];
|
||||||
|
|
||||||
|
tgcalls::StaticThreads::getThreads()->getMediaThread()->PostTask(RTC_FROM_HERE, [interface = _interface, sinkReference]() {
|
||||||
|
interface->setOutput(sinkReference);
|
||||||
|
});
|
||||||
|
|
||||||
|
__weak OngoingCallThreadLocalContextVideoCapturer *weakSelf = self;
|
||||||
|
return [[GroupCallDisposable alloc] initWithBlock:^{
|
||||||
|
dispatch_async(dispatch_get_main_queue(), ^{
|
||||||
|
__strong OngoingCallThreadLocalContextVideoCapturer *strongSelf = weakSelf;
|
||||||
|
if (!strongSelf) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
[strongSelf->_sinks removeObjectForKey:@(sinkId)];
|
||||||
|
});
|
||||||
|
}];
|
||||||
|
}
|
||||||
|
|
||||||
- (void)switchVideoInput:(NSString * _Nonnull)deviceId {
|
- (void)switchVideoInput:(NSString * _Nonnull)deviceId {
|
||||||
std::string resolvedId = deviceId.UTF8String;
|
std::string resolvedId = deviceId.UTF8String;
|
||||||
if (_keepLandscape) {
|
if (_keepLandscape) {
|
||||||
@ -1076,6 +1311,9 @@ private:
|
|||||||
OngoingCallThreadLocalContextVideoCapturer *_videoCapturer;
|
OngoingCallThreadLocalContextVideoCapturer *_videoCapturer;
|
||||||
|
|
||||||
void (^_networkStateUpdated)(GroupCallNetworkState);
|
void (^_networkStateUpdated)(GroupCallNetworkState);
|
||||||
|
|
||||||
|
int _nextSinkId;
|
||||||
|
NSMutableDictionary<NSNumber *, GroupCallVideoSink *> *_sinks;
|
||||||
}
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
@ -1097,6 +1335,8 @@ private:
|
|||||||
if (self != nil) {
|
if (self != nil) {
|
||||||
_queue = queue;
|
_queue = queue;
|
||||||
|
|
||||||
|
_sinks = [[NSMutableDictionary alloc] init];
|
||||||
|
|
||||||
_networkStateUpdated = [networkStateUpdated copy];
|
_networkStateUpdated = [networkStateUpdated copy];
|
||||||
_videoCapturer = videoCapturer;
|
_videoCapturer = videoCapturer;
|
||||||
|
|
||||||
@ -1477,6 +1717,31 @@ private:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
- (GroupCallDisposable * _Nonnull)addVideoOutputWithEndpointId:(NSString * _Nonnull)endpointId sink:(void (^_Nonnull)(CallVideoFrameData * _Nonnull))sink {
|
||||||
|
int sinkId = _nextSinkId;
|
||||||
|
_nextSinkId += 1;
|
||||||
|
|
||||||
|
GroupCallVideoSink *storedSink = [[GroupCallVideoSink alloc] initWithSink:sink];
|
||||||
|
_sinks[@(sinkId)] = storedSink;
|
||||||
|
|
||||||
|
if (_instance) {
|
||||||
|
_instance->addIncomingVideoOutput(endpointId.UTF8String, [storedSink sink]);
|
||||||
|
}
|
||||||
|
|
||||||
|
__weak GroupCallThreadLocalContext *weakSelf = self;
|
||||||
|
id<OngoingCallThreadLocalContextQueueWebrtc> queue = _queue;
|
||||||
|
return [[GroupCallDisposable alloc] initWithBlock:^{
|
||||||
|
[queue dispatch:^{
|
||||||
|
__strong GroupCallThreadLocalContext *strongSelf = weakSelf;
|
||||||
|
if (!strongSelf) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
[strongSelf->_sinks removeObjectForKey:@(sinkId)];
|
||||||
|
}];
|
||||||
|
}];
|
||||||
|
}
|
||||||
|
|
||||||
- (void)addExternalAudioData:(NSData * _Nonnull)data {
|
- (void)addExternalAudioData:(NSData * _Nonnull)data {
|
||||||
if (_instance) {
|
if (_instance) {
|
||||||
std::vector<uint8_t> samples;
|
std::vector<uint8_t> samples;
|
||||||
|
@ -1 +1 @@
|
|||||||
Subproject commit ed88e80ae5cc57538c4f515496d4393e7d84e685
|
Subproject commit ef796349808b187b80b75ea1876b940f2882fcbb
|
Loading…
x
Reference in New Issue
Block a user