Merge branch 'master' of gitlab.com:peter-iakovlev/telegram-ios

This commit is contained in:
Ilya Laktyushin 2021-07-27 23:18:51 +03:00
commit 36ea9c8691
10 changed files with 84 additions and 38 deletions

View File

@ -6564,4 +6564,8 @@ Sorry for the inconvenience.";
"VoiceChat.VideoPreviewContinue" = "Continue";
"VoiceChat.VideoPreviewShareScreenInfo" = "Everything on your screen\nwill be shared";
"Gallery.SaveToGallery" = "Save to Gallery";
"Gallery.VideoSaved" = "Video Saved";
"Gallery.WaitForVideoDownoad" = "Please wait for the video to be fully downloaded.";
"VoiceChat.VideoParticipantsLimitExceededExtended" = "The voice chat is over %@ members.\nNew participants only have access to audio stream. ";

View File

@ -2090,7 +2090,7 @@ final class UniversalVideoGalleryItemNode: ZoomableContentGalleryItemNode {
c.setItems(strongSelf.contextMenuSpeedItems())
})))
if let (message, maybeFile, isWebpage) = strongSelf.contentInfo(), let file = maybeFile, !isWebpage {
items.append(.action(ContextMenuActionItem(text: "Save to Gallery", icon: { theme in generateTintedImage(image: UIImage(bundleImageName: "Chat/Context Menu/Download"), color: theme.actionSheet.primaryTextColor) }, action: { _, f in
items.append(.action(ContextMenuActionItem(text: strongSelf.presentationData.strings.Gallery_SaveToGallery, icon: { theme in generateTintedImage(image: UIImage(bundleImageName: "Chat/Context Menu/Download"), color: theme.actionSheet.primaryTextColor) }, action: { _, f in
f(.default)
if let strongSelf = self {
@ -2104,22 +2104,20 @@ final class UniversalVideoGalleryItemNode: ZoomableContentGalleryItemNode {
guard let controller = strongSelf.galleryController() else {
return
}
//TODO:localize
controller.present(UndoOverlayController(presentationData: strongSelf.presentationData, content: .mediaSaved(text: "Video Saved"), elevatedLayout: false, animateInAsReplacement: false, action: { _ in return false }), in: .window(.root))
controller.present(UndoOverlayController(presentationData: strongSelf.presentationData, content: .mediaSaved(text: strongSelf.presentationData.strings.Gallery_VideoSaved), elevatedLayout: false, animateInAsReplacement: false, action: { _ in return false }), in: .window(.root))
})
default:
guard let controller = strongSelf.galleryController() else {
return
}
//TODO:localize
controller.present(textAlertController(context: strongSelf.context, title: nil, text: "Please wait for the video to be fully downloaded.", actions: [TextAlertAction(type: .defaultAction, title: strongSelf.presentationData.strings.Common_OK, action: {
controller.present(textAlertController(context: strongSelf.context, title: nil, text: strongSelf.presentationData.strings.Gallery_WaitForVideoDownoad, actions: [TextAlertAction(type: .defaultAction, title: strongSelf.presentationData.strings.Common_OK, action: {
})]), in: .window(.root))
}
}
})))
}
if strongSelf.canDelete() {
items.append(.action(ContextMenuActionItem(text: "Delete", textColor: .destructive, icon: { theme in generateTintedImage(image: UIImage(bundleImageName: "Chat/Context Menu/Delete"), color: theme.contextMenu.destructiveColor) }, action: { _, f in
items.append(.action(ContextMenuActionItem(text: strongSelf.presentationData.strings.Common_Delete, textColor: .destructive, icon: { theme in generateTintedImage(image: UIImage(bundleImageName: "Chat/Context Menu/Delete"), color: theme.contextMenu.destructiveColor) }, action: { _, f in
f(.default)
if let strongSelf = self {

View File

@ -117,7 +117,8 @@ private func peerAutoremoveSetupEntries(peer: Peer?, presentationData: Presentat
var availableValues: [Int32] = [
Int32.max,
24 * 60 * 60,
24 * 60 * 60 * 7
24 * 60 * 60 * 7,
24 * 60 * 60 * 31,
]
if isDebug {
availableValues[1] = 5

View File

@ -130,7 +130,7 @@ class PeerRemoveTimeoutItemNode: ListViewItemNode, ItemListItemNode {
self.disabledOverlayNode = ASDisplayNode()
self.titleNodes = (0 ..< 3).map { _ in
self.titleNodes = (0 ..< 4).map { _ in
return TextNode()
}
@ -150,9 +150,9 @@ class PeerRemoveTimeoutItemNode: ListViewItemNode, ItemListItemNode {
sliderView.lineSize = 2.0
sliderView.dotSize = 5.0
sliderView.minimumValue = 0.0
sliderView.maximumValue = 2.0
sliderView.maximumValue = CGFloat(self.titleNodes.count - 1)
sliderView.startValue = 0.0
sliderView.positionsCount = 3
sliderView.positionsCount = self.titleNodes.count
sliderView.useLinesForPositions = true
sliderView.minimumUndottedValue = 0
sliderView.disablesInteractiveTransitionGestureRecognizer = true
@ -195,7 +195,7 @@ class PeerRemoveTimeoutItemNode: ListViewItemNode, ItemListItemNode {
if item.availableValues[index] == Int32.max {
text = item.presentationData.strings.AutoremoveSetup_TimerValueNever
} else {
text = item.presentationData.strings.AutoremoveSetup_TimerValueAfter(timeIntervalString(strings: item.presentationData.strings, value: item.availableValues[index])).string
text = timeIntervalString(strings: item.presentationData.strings, value: item.availableValues[index])
}
return makeLayout(TextNodeLayoutArguments(attributedString: NSAttributedString(string: text, font: Font.regular(13.0), textColor: item.presentationData.theme.list.itemSecondaryTextColor), maximumNumberOfLines: 1, truncationType: .end, constrainedSize: CGSize(width: 100.0, height: 100.0)))
}
@ -264,19 +264,24 @@ class PeerRemoveTimeoutItemNode: ListViewItemNode, ItemListItemNode {
strongSelf.maskNode.frame = strongSelf.backgroundNode.frame.insetBy(dx: params.leftInset, dy: 0.0)
strongSelf.topStripeNode.frame = CGRect(origin: CGPoint(x: 0.0, y: -min(insets.top, separatorHeight)), size: CGSize(width: layoutSize.width, height: separatorHeight))
strongSelf.bottomStripeNode.frame = CGRect(origin: CGPoint(x: bottomStripeInset, y: contentSize.height + bottomStripeOffset), size: CGSize(width: layoutSize.width - bottomStripeInset, height: separatorHeight))
zip(0 ..< titleLayouts.count, titleLayouts).forEach { index, layoutAndApply in
let textNode = layoutAndApply.1()
let size = layoutAndApply.0.size
switch index {
case 0:
textNode.frame = CGRect(origin: CGPoint(x: leftInset, y: 13.0), size: size)
case 1:
textNode.frame = CGRect(origin: CGPoint(x: floor((params.width - size.width) / 2.0), y: 13.0), size: size)
default:
textNode.frame = CGRect(origin: CGPoint(x: params.width - leftInset - size.width, y: 13.0), size: size)
let usableWidth = params.width - (leftInset + 7.0) * 2.0
for i in 0 ..< titleLayouts.count {
let textNode = titleLayouts[i].1()
let size = titleLayouts[i].0.size
let nextX: CGFloat
if i == 0 {
nextX = leftInset
} else if i == titleLayouts.count - 1 {
nextX = params.width - leftInset - size.width
} else {
nextX = floor(leftInset + 7.0 + CGFloat(i) * usableWidth / CGFloat(titleLayouts.count - 1) - size.width / 2.0)
}
textNode.frame = CGRect(origin: CGPoint(x: nextX, y: 13.0), size: size)
}
if let sliderView = strongSelf.sliderView {
@ -301,9 +306,8 @@ class PeerRemoveTimeoutItemNode: ListViewItemNode, ItemListItemNode {
if firstTime {
sliderView.value = value
}
let sliderInset: CGFloat = leftInset
sliderView.frame = CGRect(origin: CGPoint(x: sliderInset, y: 38.0), size: CGSize(width: params.width - sliderInset * 2.0, height: 44.0))
sliderView.frame = CGRect(origin: CGPoint(x: leftInset, y: 38.0), size: CGSize(width: params.width - leftInset * 2.0, height: 44.0))
}
}
})

View File

@ -57,7 +57,12 @@ final class GroupVideoNode: ASDisplayNode, PreviewVideoNode {
self.backdropVideoView = backdropVideoView
super.init()
if let backdropVideoView = backdropVideoView {
self.backdropVideoViewContainer.addSubview(backdropVideoView)
self.view.addSubview(self.backdropVideoViewContainer)
}
self.videoViewContainer.addSubview(self.videoView)
self.addSubnode(self.sourceContainerNode)
self.containerNode.view.addSubview(self.videoViewContainer)
@ -299,7 +304,20 @@ final class GroupVideoNode: ASDisplayNode, PreviewVideoNode {
let normalizedVideoSize = rotatedVideoFrame.size.aspectFilled(CGSize(width: 1080.0, height: 1080.0))
self.backdropVideoView?.updateIsEnabled(self.isEnabled && self.isBlurEnabled)
let effectiveBlurEnabled = self.isEnabled && self.isBlurEnabled
if effectiveBlurEnabled {
self.backdropVideoView?.updateIsEnabled(true)
}
transition.updatePosition(layer: backdropVideoView.layer, position: rotatedVideoFrame.center, force: true, completion: { [weak self] value in
guard let strongSelf = self, value else {
return
}
if !(strongSelf.isEnabled && strongSelf.isBlurEnabled) {
strongSelf.backdropVideoView?.updateIsEnabled(false)
}
})
transition.updateBounds(layer: backdropVideoView.layer, bounds: CGRect(origin: CGPoint(), size: normalizedVideoSize))

View File

@ -3504,6 +3504,8 @@ public final class VoiceChatController: ViewController {
let videoCapturer = OngoingCallVideoCapturer()
let input = videoCapturer.video()
if let videoView = strongSelf.videoRenderingContext.makeView(input: input, blur: false) {
videoView.updateIsEnabled(true)
let cameraNode = GroupVideoNode(videoView: videoView, backdropVideoView: nil)
let controller = VoiceChatCameraPreviewController(sharedContext: strongSelf.context.sharedContext, cameraNode: cameraNode, shareCamera: { [weak self] _, unmuted in
if let strongSelf = self {

View File

@ -38,10 +38,18 @@ private var fadeImage: UIImage? = {
return generateImage(CGSize(width: fadeHeight, height: fadeHeight), rotatedContext: { size, context in
let bounds = CGRect(origin: CGPoint(), size: size)
context.clear(bounds)
let colorsArray = [fadeColor.withAlphaComponent(0.0).cgColor, fadeColor.cgColor] as CFArray
var locations: [CGFloat] = [1.0, 0.0]
let gradient = CGGradient(colorsSpace: deviceColorSpace, colors: colorsArray, locations: &locations)!
let stepCount = 10
var colors: [CGColor] = []
var locations: [CGFloat] = []
for i in 0 ... stepCount {
let t = CGFloat(i) / CGFloat(stepCount)
colors.append(fadeColor.withAlphaComponent((1.0 - t * t) * 0.7).cgColor)
locations.append(t)
}
let gradient = CGGradient(colorsSpace: deviceColorSpace, colors: colors as CFArray, locations: &locations)!
context.drawLinearGradient(gradient, start: CGPoint(), end: CGPoint(x: 0.0, y: size.height), options: CGGradientDrawingOptions())
})
}()

View File

@ -161,10 +161,18 @@ final class VoiceChatMainStageNode: ASDisplayNode {
if let image = generateImage(CGSize(width: fadeHeight, height: fadeHeight), rotatedContext: { size, context in
let bounds = CGRect(origin: CGPoint(), size: size)
context.clear(bounds)
let colorsArray = [fadeColor.cgColor, fadeColor.withAlphaComponent(0.0).cgColor] as CFArray
var locations: [CGFloat] = [1.0, 0.0]
let gradient = CGGradient(colorsSpace: deviceColorSpace, colors: colorsArray, locations: &locations)!
let stepCount = 10
var colors: [CGColor] = []
var locations: [CGFloat] = []
for i in 0 ... stepCount {
let t = CGFloat(i) / CGFloat(stepCount)
colors.append(fadeColor.withAlphaComponent(t * t).cgColor)
locations.append(t)
}
let gradient = CGGradient(colorsSpace: deviceColorSpace, colors: colors as CFArray, locations: &locations)!
context.drawLinearGradient(gradient, start: CGPoint(), end: CGPoint(x: 0.0, y: size.height), options: CGGradientDrawingOptions())
}) {
self.topFadeNode.backgroundColor = UIColor(patternImage: image)
@ -181,6 +189,7 @@ final class VoiceChatMainStageNode: ASDisplayNode {
let colorsArray = [fadeColor.withAlphaComponent(0.0).cgColor, fadeColor.cgColor] as CFArray
var locations: [CGFloat] = [1.0, 0.0]
let gradient = CGGradient(colorsSpace: deviceColorSpace, colors: colorsArray, locations: &locations)!
context.drawLinearGradient(gradient, start: CGPoint(), end: CGPoint(x: 0.0, y: size.height), options: CGGradientDrawingOptions())
}) {
self.bottomGradientNode.backgroundColor = UIColor(patternImage: image)

View File

@ -446,6 +446,7 @@ private:
_interface = interface;
_isProcessingCustomSampleBuffer = [[IsProcessingCustomSampleBufferFlag alloc] init];
_croppingBuffer = std::make_shared<std::vector<uint8_t>>();
_sinks = [[NSMutableDictionary alloc] init];
}
return self;
}
@ -460,6 +461,7 @@ private:
resolvedId += std::string(":landscape");
}
_interface = tgcalls::VideoCaptureInterface::Create(tgcalls::StaticThreads::getThreads(), resolvedId);
_sinks = [[NSMutableDictionary alloc] init];
}
return self;
}
@ -473,7 +475,7 @@ tgcalls::VideoCaptureInterfaceObject *GetVideoCaptureAssumingSameThread(tgcalls:
}
+ (instancetype _Nonnull)capturerWithExternalSampleBufferProvider {
std::shared_ptr<tgcalls::VideoCaptureInterface> interface = tgcalls::VideoCaptureInterface::Create(tgcalls::StaticThreads::getThreads(), ":ios_custom");
std::shared_ptr<tgcalls::VideoCaptureInterface> interface = tgcalls::VideoCaptureInterface::Create(tgcalls::StaticThreads::getThreads(), ":ios_custom", true);
return [[OngoingCallThreadLocalContextVideoCapturer alloc] initWithInterface:interface];
}
#endif

@ -1 +1 @@
Subproject commit e8f7d439309abd4da1c15b97141c546295fcdcb4
Subproject commit ce20405bf1aa732fc83c0057ae4deaa49681bb8e