diff --git a/Images.xcassets/Chat List/EmptyMasterDetailIcon.imageset/Contents.json b/Images.xcassets/Chat List/EmptyMasterDetailIcon.imageset/Contents.json new file mode 100644 index 0000000000..350a5ab12a --- /dev/null +++ b/Images.xcassets/Chat List/EmptyMasterDetailIcon.imageset/Contents.json @@ -0,0 +1,21 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "idiom" : "universal", + "filename" : "DetailLogoBlank@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat List/EmptyMasterDetailIcon.imageset/DetailLogoBlank@2x.png b/Images.xcassets/Chat List/EmptyMasterDetailIcon.imageset/DetailLogoBlank@2x.png new file mode 100644 index 0000000000..f0ee593041 Binary files /dev/null and b/Images.xcassets/Chat List/EmptyMasterDetailIcon.imageset/DetailLogoBlank@2x.png differ diff --git a/TelegramUI.xcodeproj/project.pbxproj b/TelegramUI.xcodeproj/project.pbxproj index dfc0bdc8f5..3a89babf68 100644 --- a/TelegramUI.xcodeproj/project.pbxproj +++ b/TelegramUI.xcodeproj/project.pbxproj @@ -130,7 +130,6 @@ D056CD781FF2A6EE00880D28 /* ChatMessageSwipeToReplyNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D056CD771FF2A6EE00880D28 /* ChatMessageSwipeToReplyNode.swift */; }; D056CD7A1FF3CC2A00880D28 /* ListMessagePlaybackOverlayNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D056CD791FF3CC2A00880D28 /* ListMessagePlaybackOverlayNode.swift */; }; D056CD7C1FF3E92C00880D28 /* DirectionalPanGestureRecognizer.swift in Sources */ = {isa = PBXBuildFile; fileRef = D056CD7B1FF3E92C00880D28 /* DirectionalPanGestureRecognizer.swift */; }; - D057C5402004215B00990762 /* Lottie.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D057C5412004215B00990762 /* Lottie.framework */; }; D057C5452004235000990762 /* mute.json in Resources */ = {isa = PBXBuildFile; fileRef = D057C5422004226C00990762 /* mute.json */; }; D0642EFC1F3E1E7B00792790 /* ChatHistoryNavigationButtons.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0642EFB1F3E1E7B00792790 /* ChatHistoryNavigationButtons.swift */; }; D064EF871F69A06F00AC0398 /* MessageContentKind.swift in Sources */ = {isa = PBXBuildFile; fileRef = D064EF861F69A06F00AC0398 /* MessageContentKind.swift */; }; @@ -224,6 +223,8 @@ D0BCC3D420404CC7008126C2 /* ChatMessageActionSheetController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0BCC3D320404CC7008126C2 /* ChatMessageActionSheetController.swift */; }; D0BCC3D620404CD8008126C2 /* ChatMessageActionSheetControllerNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0BCC3D520404CD8008126C2 /* ChatMessageActionSheetControllerNode.swift */; }; D0BDB09B1F79C658002ABF2F /* SaveToCameraRoll.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0BDB09A1F79C658002ABF2F /* SaveToCameraRoll.swift */; }; + D0BE303220601FFC00FBE6D8 /* LocationBroadcastActionSheetItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0BE303120601FFC00FBE6D8 /* LocationBroadcastActionSheetItem.swift */; }; + D0BE3037206139F500FBE6D8 /* ImageCompression.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0BE3036206139F500FBE6D8 /* ImageCompression.swift */; }; D0C0B5901EDB505E000F4D2C /* ActivityIndicator.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0C0B58F1EDB505E000F4D2C /* ActivityIndicator.swift */; }; D0C0B5921EDC5A3B000F4D2C /* LinkHighlightingNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0C0B5911EDC5A3B000F4D2C /* LinkHighlightingNode.swift */; }; D0C0B59B1EE019E5000F4D2C /* ChatSearchNavigationContentNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0C0B59A1EE019E5000F4D2C /* ChatSearchNavigationContentNode.swift */; }; @@ -246,6 +247,8 @@ D0CFBB951FD8B05000B65C0D /* OverlayInstantVideoDecoration.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0CFBB941FD8B05000B65C0D /* OverlayInstantVideoDecoration.swift */; }; D0CFBB971FD8B0F700B65C0D /* ChatBubbleInstantVideoDecoration.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0CFBB961FD8B0F700B65C0D /* ChatBubbleInstantVideoDecoration.swift */; }; D0D4345C1F97CEAA00CC1806 /* ProxySettingsController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0D4345B1F97CEAA00CC1806 /* ProxySettingsController.swift */; }; + D0DE5803205AEB7600C356A8 /* include in Resources */ = {isa = PBXBuildFile; fileRef = D0DE5802205AEB7600C356A8 /* include */; }; + D0DE5805205B202500C356A8 /* ScreenCaptureDetection.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0DE5804205B202500C356A8 /* ScreenCaptureDetection.swift */; }; D0DE66061F9A51E200EF4AE9 /* GalleryHiddenMediaManager.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0DE66051F9A51E200EF4AE9 /* GalleryHiddenMediaManager.swift */; }; D0DFD5E21FCE2BA50039B3B1 /* CalculatingCacheSizeItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0DFD5E11FCE2BA50039B3B1 /* CalculatingCacheSizeItem.swift */; }; D0E266FD1F66706500BFC79F /* ChatBubbleVideoDecoration.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0E266FC1F66706500BFC79F /* ChatBubbleVideoDecoration.swift */; }; @@ -712,7 +715,7 @@ D0EC6E041EB9F58900EBF1C3 /* SecretMediaPreviewController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D00C7CDB1E3776E50080C3D5 /* SecretMediaPreviewController.swift */; }; D0EC6E051EB9F58900EBF1C3 /* SecretMediaPreviewControllerNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D00C7CDD1E37770A0080C3D5 /* SecretMediaPreviewControllerNode.swift */; }; D0EC6E061EB9F58900EBF1C3 /* ChatDocumentGalleryItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E5B1D6B8BF90046BCD6 /* ChatDocumentGalleryItem.swift */; }; - D0EC6E071EB9F58900EBF1C3 /* ChatHoleGalleryItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E5C1D6B8BF90046BCD6 /* ChatHoleGalleryItem.swift */; }; + D0EC6E071EB9F58900EBF1C3 /* ChatExternalFileGalleryItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E5C1D6B8BF90046BCD6 /* ChatExternalFileGalleryItem.swift */; }; D0EC6E081EB9F58900EBF1C3 /* ChatImageGalleryItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E5D1D6B8BF90046BCD6 /* ChatImageGalleryItem.swift */; }; D0EC6E0A1EB9F58900EBF1C3 /* ChatVideoGalleryItemScrubberView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E5F1D6B8BF90046BCD6 /* ChatVideoGalleryItemScrubberView.swift */; }; D0EC6E0B1EB9F58900EBF1C3 /* ZoomableContentGalleryItemNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E601D6B8BF90046BCD6 /* ZoomableContentGalleryItemNode.swift */; }; @@ -1430,6 +1433,8 @@ D0BCC3D320404CC7008126C2 /* ChatMessageActionSheetController.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatMessageActionSheetController.swift; sourceTree = ""; }; D0BCC3D520404CD8008126C2 /* ChatMessageActionSheetControllerNode.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ChatMessageActionSheetControllerNode.swift; sourceTree = ""; }; D0BDB09A1F79C658002ABF2F /* SaveToCameraRoll.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = SaveToCameraRoll.swift; sourceTree = ""; }; + D0BE303120601FFC00FBE6D8 /* LocationBroadcastActionSheetItem.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = LocationBroadcastActionSheetItem.swift; sourceTree = ""; }; + D0BE3036206139F500FBE6D8 /* ImageCompression.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ImageCompression.swift; sourceTree = ""; }; D0BE383B1E7C3E51000079AF /* StickerPreviewController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = StickerPreviewController.swift; sourceTree = ""; }; D0BE931A1E92DFBA00DCC1E6 /* StickerPreviewControllerNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = StickerPreviewControllerNode.swift; sourceTree = ""; }; D0C0B58F1EDB505E000F4D2C /* ActivityIndicator.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ActivityIndicator.swift; sourceTree = ""; }; @@ -1511,6 +1516,8 @@ D0DC35451DE35805000195EB /* MentionChatInputPanelItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MentionChatInputPanelItem.swift; sourceTree = ""; }; D0DC35491DE366CD000195EB /* CommandChatInputContextPanelNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CommandChatInputContextPanelNode.swift; sourceTree = ""; }; D0DC354B1DE366DE000195EB /* CommandChatInputPanelItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = CommandChatInputPanelItem.swift; sourceTree = ""; }; + D0DE5802205AEB7600C356A8 /* include */ = {isa = PBXFileReference; lastKnownFileType = folder; name = include; path = "third-party/FFmpeg-iOS/include"; sourceTree = SOURCE_ROOT; }; + D0DE5804205B202500C356A8 /* ScreenCaptureDetection.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = ScreenCaptureDetection.swift; sourceTree = ""; }; D0DE66051F9A51E200EF4AE9 /* GalleryHiddenMediaManager.swift */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.swift; path = GalleryHiddenMediaManager.swift; sourceTree = ""; }; D0DE76F61D91BA3D002B8809 /* GridHoleItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = GridHoleItem.swift; sourceTree = ""; }; D0DE76FF1D92F1EB002B8809 /* ChatTitleView.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatTitleView.swift; sourceTree = ""; }; @@ -1787,7 +1794,7 @@ D0F69E531D6B8BDA0046BCD6 /* GalleryItemNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = GalleryItemNode.swift; sourceTree = ""; }; D0F69E541D6B8BDA0046BCD6 /* GalleryPagerNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = GalleryPagerNode.swift; sourceTree = ""; }; D0F69E5B1D6B8BF90046BCD6 /* ChatDocumentGalleryItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatDocumentGalleryItem.swift; sourceTree = ""; }; - D0F69E5C1D6B8BF90046BCD6 /* ChatHoleGalleryItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatHoleGalleryItem.swift; sourceTree = ""; }; + D0F69E5C1D6B8BF90046BCD6 /* ChatExternalFileGalleryItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatExternalFileGalleryItem.swift; sourceTree = ""; }; D0F69E5D1D6B8BF90046BCD6 /* ChatImageGalleryItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatImageGalleryItem.swift; sourceTree = ""; }; D0F69E5F1D6B8BF90046BCD6 /* ChatVideoGalleryItemScrubberView.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatVideoGalleryItemScrubberView.swift; sourceTree = ""; }; D0F69E601D6B8BF90046BCD6 /* ZoomableContentGalleryItemNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ZoomableContentGalleryItemNode.swift; sourceTree = ""; }; @@ -1856,7 +1863,6 @@ buildActionMask = 2147483647; files = ( D00ACA4B20222C280045D427 /* libtgvoip.framework in Frameworks */, - D057C5402004215B00990762 /* Lottie.framework in Frameworks */, D07BCBFE1F2B792300ED97AA /* LegacyComponents.framework in Frameworks */, D053B4371F1A9CA000E2D58A /* WebKit.framework in Frameworks */, D09E63B21F11289A003444CD /* PassKit.framework in Frameworks */, @@ -2419,6 +2425,7 @@ D0177B811DFAEA5400A5083A /* MediaNavigationAccessoryItemListNode.swift */, D09394122007F5BB00997F31 /* LocationBroadcastNavigationAccessoryPanel.swift */, D04B26EB20082EB50053A58C /* LocationBroadcastPanelWavesNode.swift */, + D0BE303120601FFC00FBE6D8 /* LocationBroadcastActionSheetItem.swift */, ); name = "Telegram Controller"; sourceTree = ""; @@ -2958,6 +2965,22 @@ name = Commands; sourceTree = ""; }; + D0DE5800205AEB5E00C356A8 /* Dependencies */ = { + isa = PBXGroup; + children = ( + D0DE5801205AEB6700C356A8 /* FFmpeg */, + ); + name = Dependencies; + sourceTree = ""; + }; + D0DE5801205AEB6700C356A8 /* FFmpeg */ = { + isa = PBXGroup; + children = ( + D0DE5802205AEB7600C356A8 /* include */, + ); + name = FFmpeg; + sourceTree = ""; + }; D0DE772C1D934DCB002B8809 /* List Items */ = { isa = PBXGroup; children = ( @@ -3629,7 +3652,7 @@ isa = PBXGroup; children = ( D0F69E5B1D6B8BF90046BCD6 /* ChatDocumentGalleryItem.swift */, - D0F69E5C1D6B8BF90046BCD6 /* ChatHoleGalleryItem.swift */, + D0F69E5C1D6B8BF90046BCD6 /* ChatExternalFileGalleryItem.swift */, D0F69E5D1D6B8BF90046BCD6 /* ChatImageGalleryItem.swift */, D0F69E5F1D6B8BF90046BCD6 /* ChatVideoGalleryItemScrubberView.swift */, D0F69E601D6B8BF90046BCD6 /* ZoomableContentGalleryItemNode.swift */, @@ -3765,6 +3788,8 @@ D0BCC3D1203F0A6C008126C2 /* StringForMessageTimestampStatus.swift */, D0E8B8B8204477B600605593 /* SecretChatKeyVisualization.swift */, D0FA08BD20481EA300DD23FC /* Locale.swift */, + D0DE5804205B202500C356A8 /* ScreenCaptureDetection.swift */, + D0BE3036206139F500FBE6D8 /* ImageCompression.swift */, ); name = Utils; sourceTree = ""; @@ -3854,6 +3879,7 @@ D0FC40811D5B8E7400261D9D /* TelegramUI */ = { isa = PBXGroup; children = ( + D0DE5800205AEB5E00C356A8 /* Dependencies */, D07551891DDA4C7C0073E051 /* Legacy Components */, D0F69E911D6B8C8E0046BCD6 /* Utils */, D0B4AF891EC1132400D51FF6 /* Calls */, @@ -4028,6 +4054,7 @@ D0E9BA941F056F4C00F079A4 /* stp_card_amex_template@3x.png in Resources */, D0E9BA961F056F4C00F079A4 /* stp_card_applepay@3x.png in Resources */, D0F9720F1FFE4BD5002595C8 /* notification.caf in Resources */, + D0DE5803205AEB7600C356A8 /* include in Resources */, D0E9BA9A1F056F4C00F079A4 /* stp_card_cvc@3x.png in Resources */, D0E9BA921F056F4C00F079A4 /* stp_card_amex@3x.png in Resources */, D0E9BA9F1F056F4C00F079A4 /* stp_card_diners_template@2x.png in Resources */, @@ -4205,6 +4232,7 @@ D0EC6CF21EB9F58800EBF1C3 /* VoiceCallSettings.swift in Sources */, D0F8C397201774A200236FC5 /* FeedGroupingController.swift in Sources */, D0EC6CF31EB9F58800EBF1C3 /* PresentationThemeSettings.swift in Sources */, + D0BE303220601FFC00FBE6D8 /* LocationBroadcastActionSheetItem.swift in Sources */, D0EC6CF41EB9F58800EBF1C3 /* ManagedMediaId.swift in Sources */, D0CFBB971FD8B0F700B65C0D /* ChatBubbleInstantVideoDecoration.swift in Sources */, D0471B601EFEB5A70074D609 /* BotPaymentTextItemNode.swift in Sources */, @@ -4508,6 +4536,7 @@ D0EC6DA91EB9F58900EBF1C3 /* ChatPresentationInterfaceState.swift in Sources */, D0EC6DAA1EB9F58900EBF1C3 /* ChatPanelInterfaceInteraction.swift in Sources */, D00FF2091F4E2414006FA332 /* InstantPageSettingsNode.swift in Sources */, + D0BE3037206139F500FBE6D8 /* ImageCompression.swift in Sources */, D0EC6DAB1EB9F58900EBF1C3 /* ChatInterfaceStateAccessoryPanels.swift in Sources */, D0EC6DAC1EB9F58900EBF1C3 /* ChatInterfaceStateInputPanels.swift in Sources */, D056CD761FF2A30900880D28 /* ChatSwipeToReplyRecognizer.swift in Sources */, @@ -4647,7 +4676,7 @@ D007019C2029E8F2006B9E34 /* LegqacyICloudFileController.swift in Sources */, D0208AD61FA33D14001F0D5F /* RaiseToListenActivator.m in Sources */, D0EC6E061EB9F58900EBF1C3 /* ChatDocumentGalleryItem.swift in Sources */, - D0EC6E071EB9F58900EBF1C3 /* ChatHoleGalleryItem.swift in Sources */, + D0EC6E071EB9F58900EBF1C3 /* ChatExternalFileGalleryItem.swift in Sources */, D0EC6E081EB9F58900EBF1C3 /* ChatImageGalleryItem.swift in Sources */, D048EA891F4F297500188713 /* InstantPageSettingsFontFamilyItemNode.swift in Sources */, D0EC6E0A1EB9F58900EBF1C3 /* ChatVideoGalleryItemScrubberView.swift in Sources */, @@ -4796,6 +4825,7 @@ D0B85C231FF70BF400E795B4 /* AuthorizationSequenceAwaitingAccountResetController.swift in Sources */, D0EC6E6D1EB9F58900EBF1C3 /* ItemListStickerPackItem.swift in Sources */, D0EC6E6E1EB9F58900EBF1C3 /* ArhivedStickerPacksController.swift in Sources */, + D0DE5805205B202500C356A8 /* ScreenCaptureDetection.swift in Sources */, D0EC6E711EB9F58900EBF1C3 /* ThemeGalleryController.swift in Sources */, D0C0B5B11EE1C421000F4D2C /* ChatDateSelectionSheet.swift in Sources */, D0EC6E721EB9F58900EBF1C3 /* ThemeGalleryItem.swift in Sources */, @@ -5045,7 +5075,7 @@ SKIP_INSTALL = YES; SWIFT_INSTALL_OBJC_HEADER = YES; SWIFT_VERSION = 4.0; - USER_HEADER_SEARCH_PATHS = submodules/libtgvoip/webrtc_dsp; + USER_HEADER_SEARCH_PATHS = "$(PROJECT_DIR)/third-party/FFmpeg-iOS/include"; }; name = "Debug AppStore"; }; @@ -5155,7 +5185,7 @@ SKIP_INSTALL = YES; SWIFT_INSTALL_OBJC_HEADER = YES; SWIFT_VERSION = 4.0; - USER_HEADER_SEARCH_PATHS = submodules/libtgvoip/webrtc_dsp; + USER_HEADER_SEARCH_PATHS = "$(PROJECT_DIR)/third-party/FFmpeg-iOS/include"; }; name = "Release Hockeyapp Internal"; }; @@ -5193,7 +5223,7 @@ SKIP_INSTALL = YES; SWIFT_INSTALL_OBJC_HEADER = YES; SWIFT_VERSION = 4.0; - USER_HEADER_SEARCH_PATHS = submodules/libtgvoip/webrtc_dsp; + USER_HEADER_SEARCH_PATHS = "$(PROJECT_DIR)/third-party/FFmpeg-iOS/include"; }; name = "Debug Hockeyapp"; }; @@ -5230,7 +5260,7 @@ SKIP_INSTALL = YES; SWIFT_INSTALL_OBJC_HEADER = YES; SWIFT_VERSION = 4.0; - USER_HEADER_SEARCH_PATHS = submodules/libtgvoip/webrtc_dsp; + USER_HEADER_SEARCH_PATHS = "$(PROJECT_DIR)/third-party/FFmpeg-iOS/include"; }; name = "Release Hockeyapp"; }; @@ -5265,7 +5295,7 @@ SKIP_INSTALL = YES; SWIFT_INSTALL_OBJC_HEADER = YES; SWIFT_VERSION = 4.0; - USER_HEADER_SEARCH_PATHS = submodules/libtgvoip/webrtc_dsp; + USER_HEADER_SEARCH_PATHS = "$(PROJECT_DIR)/third-party/FFmpeg-iOS/include"; }; name = "Release AppStore"; }; diff --git a/TelegramUI/ActivityIndicator.swift b/TelegramUI/ActivityIndicator.swift index 3858c7b7f4..0d95e78805 100644 --- a/TelegramUI/ActivityIndicator.swift +++ b/TelegramUI/ActivityIndicator.swift @@ -31,7 +31,7 @@ enum ActivityIndicatorSpeed { final class ActivityIndicator: ASDisplayNode { var type: ActivityIndicatorType { didSet { - switch type { + switch self.type { case let .navigationAccent(theme): self.indicatorNode.image = PresentationResourcesRootController.navigationIndefiniteActivityImage(theme) case let .custom(color, diameter, lineWidth): @@ -40,6 +40,14 @@ final class ActivityIndicator: ASDisplayNode { } } + private var currentInHierarchy = false + + override var isHidden: Bool { + didSet { + self.updateAnimation() + } + } + private let speed: ActivityIndicatorSpeed private let indicatorNode: ASImageNode @@ -67,29 +75,48 @@ final class ActivityIndicator: ASDisplayNode { self.addSubnode(self.indicatorNode) } + private var isAnimating = false { + didSet { + if self.isAnimating != oldValue { + if self.isAnimating { + let basicAnimation = CABasicAnimation(keyPath: "transform.rotation.z") + basicAnimation.timingFunction = CAMediaTimingFunction(name: kCAMediaTimingFunctionEaseInEaseOut) + switch self.speed { + case .regular: + basicAnimation.duration = 0.5 + case .slow: + basicAnimation.duration = 0.7 + } + basicAnimation.fromValue = NSNumber(value: Float(0.0)) + basicAnimation.toValue = NSNumber(value: Float.pi * 2.0) + basicAnimation.repeatCount = Float.infinity + basicAnimation.timingFunction = CAMediaTimingFunction(name: kCAMediaTimingFunctionLinear) + basicAnimation.beginTime = 1.0 + + self.indicatorNode.layer.add(basicAnimation, forKey: "progressRotation") + } else { + self.indicatorNode.layer.removeAnimation(forKey: "progressRotation") + } + } + } + } + + private func updateAnimation() { + self.isAnimating = !self.isHidden && self.currentInHierarchy + } + override func willEnterHierarchy() { super.willEnterHierarchy() - let basicAnimation = CABasicAnimation(keyPath: "transform.rotation.z") - basicAnimation.timingFunction = CAMediaTimingFunction(name: kCAMediaTimingFunctionEaseInEaseOut) - switch self.speed { - case .regular: - basicAnimation.duration = 0.5 - case .slow: - basicAnimation.duration = 0.7 - } - basicAnimation.fromValue = NSNumber(value: Float(0.0)) - basicAnimation.toValue = NSNumber(value: Float.pi * 2.0) - basicAnimation.repeatCount = Float.infinity - basicAnimation.timingFunction = CAMediaTimingFunction(name: kCAMediaTimingFunctionLinear) - - self.indicatorNode.layer.add(basicAnimation, forKey: "progressRotation") + self.currentInHierarchy = true + self.updateAnimation() } override func didExitHierarchy() { super.didExitHierarchy() - self.indicatorNode.layer.removeAnimation(forKey: "progressRotation") + self.currentInHierarchy = false + self.updateAnimation() } override func calculateSizeThatFits(_ constrainedSize: CGSize) -> CGSize { diff --git a/TelegramUI/AuthorizationSequenceAwaitingAccountResetController.swift b/TelegramUI/AuthorizationSequenceAwaitingAccountResetController.swift index c5d8b651ca..0ddf0d4df3 100644 --- a/TelegramUI/AuthorizationSequenceAwaitingAccountResetController.swift +++ b/TelegramUI/AuthorizationSequenceAwaitingAccountResetController.swift @@ -31,7 +31,7 @@ final class AuthorizationSequenceAwaitingAccountResetController: ViewController self.strings = strings self.theme = theme - super.init(navigationBarTheme: AuthorizationSequenceController.navigationBarTheme(theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(theme: AuthorizationSequenceController.navigationBarTheme(theme), strings: NavigationBarStrings(presentationStrings: strings))) self.statusBar.statusBarStyle = theme.statusBarStyle diff --git a/TelegramUI/AuthorizationSequenceCodeEntryController.swift b/TelegramUI/AuthorizationSequenceCodeEntryController.swift index 88bb98bb99..3e174c1c68 100644 --- a/TelegramUI/AuthorizationSequenceCodeEntryController.swift +++ b/TelegramUI/AuthorizationSequenceCodeEntryController.swift @@ -34,7 +34,7 @@ final class AuthorizationSequenceCodeEntryController: ViewController { self.strings = strings self.theme = theme - super.init(navigationBarTheme: AuthorizationSequenceController.navigationBarTheme(theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(theme: AuthorizationSequenceController.navigationBarTheme(theme), strings: NavigationBarStrings(presentationStrings: strings))) self.hasActiveInput = true diff --git a/TelegramUI/AuthorizationSequenceController.swift b/TelegramUI/AuthorizationSequenceController.swift index 7d66b4cc31..4b25a40e8c 100644 --- a/TelegramUI/AuthorizationSequenceController.swift +++ b/TelegramUI/AuthorizationSequenceController.swift @@ -6,6 +6,7 @@ import TelegramCore import SwiftSignalKit import MtProtoKitDynamic import MessageUI +import CoreTelephony public final class AuthorizationSequenceController: NavigationController { static func navigationBarTheme(_ theme: AuthorizationTheme) -> NavigationBarTheme { @@ -28,7 +29,7 @@ public final class AuthorizationSequenceController: NavigationController { self.strings = strings self.theme = defaultAuthorizationTheme - super.init(nibName: nil, bundle: nil) + super.init(mode: .single, theme: NavigationControllerTheme(navigationBar: AuthorizationSequenceController.navigationBarTheme(theme), emptyAreaColor: .black, emptyDetailIcon: nil)) self.stateDisposable = (account.postbox.stateView() |> deliverOnMainQueue).start(next: { [weak self] view in self?.updateState(state: view.state ?? UnauthorizedAccountState(masterDatacenterId: account.masterDatacenterId, contents: .empty)) @@ -60,8 +61,30 @@ public final class AuthorizationSequenceController: NavigationController { controller.nextPressed = { [weak self] in if let strongSelf = self { let masterDatacenterId = strongSelf.account.masterDatacenterId + + var countryId: String? = nil + let networkInfo = CTTelephonyNetworkInfo() + if let carrier = networkInfo.subscriberCellularProvider { + countryId = carrier.isoCountryCode + } + + if countryId == nil { + countryId = (Locale.current as NSLocale).object(forKey: .countryCode) as? String + } + + var countryCode: Int32 = 1 + + if let countryId = countryId { + for (code, idAndName) in countryCodeToIdAndName { + if idAndName.0 == countryId { + countryCode = Int32(code) + break + } + } + } + let _ = (strongSelf.account.postbox.modify { modifier -> Void in - modifier.setState(UnauthorizedAccountState(masterDatacenterId: masterDatacenterId, contents: .phoneEntry(countryCode: 1, number: ""))) + modifier.setState(UnauthorizedAccountState(masterDatacenterId: masterDatacenterId, contents: .phoneEntry(countryCode: countryCode, number: ""))) }).start() } } diff --git a/TelegramUI/AuthorizationSequenceCountrySelectionController.swift b/TelegramUI/AuthorizationSequenceCountrySelectionController.swift index c4a8cbd9a7..e0d283d64b 100644 --- a/TelegramUI/AuthorizationSequenceCountrySelectionController.swift +++ b/TelegramUI/AuthorizationSequenceCountrySelectionController.swift @@ -358,7 +358,7 @@ final class AuthorizationSequenceCountrySelectionController: ViewController { self.innerNavigationController.navigationBar.isTranslucent = false self.innerNavigationController.navigationBar.titleTextAttributes = [NSAttributedStringKey.font: Font.semibold(17.0), NSAttributedStringKey.foregroundColor: theme.navigationBarTextColor] - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) self.statusBar.statusBarStyle = theme.statusBarStyle diff --git a/TelegramUI/AuthorizationSequencePasswordEntryController.swift b/TelegramUI/AuthorizationSequencePasswordEntryController.swift index 84ab845b92..b6a187ce0b 100644 --- a/TelegramUI/AuthorizationSequencePasswordEntryController.swift +++ b/TelegramUI/AuthorizationSequencePasswordEntryController.swift @@ -43,7 +43,7 @@ final class AuthorizationSequencePasswordEntryController: ViewController { self.strings = strings self.theme = theme - super.init(navigationBarTheme: AuthorizationSequenceController.navigationBarTheme(theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(theme: AuthorizationSequenceController.navigationBarTheme(theme), strings: NavigationBarStrings(presentationStrings: strings))) self.hasActiveInput = true diff --git a/TelegramUI/AuthorizationSequencePasswordRecoveryController.swift b/TelegramUI/AuthorizationSequencePasswordRecoveryController.swift index ac1bac4c0a..5d8bf475b1 100644 --- a/TelegramUI/AuthorizationSequencePasswordRecoveryController.swift +++ b/TelegramUI/AuthorizationSequencePasswordRecoveryController.swift @@ -33,7 +33,7 @@ final class AuthorizationSequencePasswordRecoveryController: ViewController { self.strings = strings self.theme = theme - super.init(navigationBarTheme: AuthorizationSequenceController.navigationBarTheme(theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(theme: AuthorizationSequenceController.navigationBarTheme(theme), strings: NavigationBarStrings(presentationStrings: strings))) self.hasActiveInput = true diff --git a/TelegramUI/AuthorizationSequencePhoneEntryController.swift b/TelegramUI/AuthorizationSequencePhoneEntryController.swift index 711c27c65a..f2806be9d0 100644 --- a/TelegramUI/AuthorizationSequencePhoneEntryController.swift +++ b/TelegramUI/AuthorizationSequencePhoneEntryController.swift @@ -31,7 +31,7 @@ final class AuthorizationSequencePhoneEntryController: ViewController { self.strings = strings self.theme = theme - super.init(navigationBarTheme: AuthorizationSequenceController.navigationBarTheme(theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(theme: AuthorizationSequenceController.navigationBarTheme(theme), strings: NavigationBarStrings(presentationStrings: strings))) self.hasActiveInput = true diff --git a/TelegramUI/AuthorizationSequenceSignUpController.swift b/TelegramUI/AuthorizationSequenceSignUpController.swift index 25678bc8e5..764eed5c27 100644 --- a/TelegramUI/AuthorizationSequenceSignUpController.swift +++ b/TelegramUI/AuthorizationSequenceSignUpController.swift @@ -31,7 +31,7 @@ final class AuthorizationSequenceSignUpController: ViewController { self.strings = strings self.theme = theme - super.init(navigationBarTheme: AuthorizationSequenceController.navigationBarTheme(theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(theme: AuthorizationSequenceController.navigationBarTheme(theme), strings: NavigationBarStrings(presentationStrings: strings))) self.navigationItem.rightBarButtonItem = UIBarButtonItem(title: "Next", style: .done, target: self, action: #selector(self.nextPressed)) } diff --git a/TelegramUI/AuthorizationSequenceSplashController.swift b/TelegramUI/AuthorizationSequenceSplashController.swift index e40806e07e..084cbe6f77 100644 --- a/TelegramUI/AuthorizationSequenceSplashController.swift +++ b/TelegramUI/AuthorizationSequenceSplashController.swift @@ -19,7 +19,7 @@ final class AuthorizationSequenceSplashController: ViewController { self.theme = theme self.controller = RMIntroViewController(backroundColor: theme.backgroundColor, primaryColor: theme.primaryColor, accentColor: theme.accentColor, regularDotColor: theme.disclosureControlColor, highlightedDotColor: theme.accentColor) - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) self.statusBar.statusBarStyle = theme.statusBarStyle diff --git a/TelegramUI/AvatarGalleryController.swift b/TelegramUI/AvatarGalleryController.swift index 95ed47218c..9735151706 100644 --- a/TelegramUI/AvatarGalleryController.swift +++ b/TelegramUI/AvatarGalleryController.swift @@ -131,7 +131,7 @@ class AvatarGalleryController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } self.replaceRootController = replaceRootController - super.init(navigationBarTheme: GalleryController.darkNavigationTheme) + super.init(navigationBarPresentationData: NavigationBarPresentationData(theme: GalleryController.darkNavigationTheme, strings: NavigationBarStrings(presentationStrings: self.presentationData.strings))) let backItem = UIBarButtonItem(backButtonAppearanceWithTitle: self.presentationData.strings.Common_Back, target: self, action: #selector(self.donePressed)) self.navigationItem.leftBarButtonItem = backItem diff --git a/TelegramUI/AvatarNode.swift b/TelegramUI/AvatarNode.swift index d3641e70f8..d6494b8fa6 100644 --- a/TelegramUI/AvatarNode.swift +++ b/TelegramUI/AvatarNode.swift @@ -37,7 +37,7 @@ private let gradientColors: [NSArray] = [ ] private let grayscaleColors: NSArray = [ - UIColor(rgb: 0xefefef).cgColor, UIColor(rgb: 0xeeeeee).cgColor + UIColor(rgb: 0xb1b1b1).cgColor, UIColor(rgb: 0xcdcdcd).cgColor ] private let savedMessagesColors: NSArray = [ diff --git a/TelegramUI/BotCheckoutController.swift b/TelegramUI/BotCheckoutController.swift index 5cf9760dd4..e87bd49ef4 100644 --- a/TelegramUI/BotCheckoutController.swift +++ b/TelegramUI/BotCheckoutController.swift @@ -30,7 +30,7 @@ final class BotCheckoutController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style diff --git a/TelegramUI/BotCheckoutControllerNode.swift b/TelegramUI/BotCheckoutControllerNode.swift index 4bdbbc1e61..64025b5e54 100644 --- a/TelegramUI/BotCheckoutControllerNode.swift +++ b/TelegramUI/BotCheckoutControllerNode.swift @@ -651,6 +651,8 @@ final class BotCheckoutControllerNode: ItemListControllerNode, controller.delegate = strongSelf if let window = strongSelf.view.window { strongSelf.applePayController = controller + controller.popoverPresentationController?.sourceView = window + controller.popoverPresentationController?.sourceRect = CGRect(origin: CGPoint(x: window.bounds.width / 2.0, y: window.bounds.size.height - 1.0), size: CGSize(width: 1.0, height: 1.0)) window.rootViewController?.present(controller, animated: true) } } diff --git a/TelegramUI/BotCheckoutInfoController.swift b/TelegramUI/BotCheckoutInfoController.swift index f1e9f8bf45..cf9971ec41 100644 --- a/TelegramUI/BotCheckoutInfoController.swift +++ b/TelegramUI/BotCheckoutInfoController.swift @@ -41,7 +41,7 @@ final class BotCheckoutInfoController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style self.doneItem = UIBarButtonItem(title: self.presentationData.strings.Common_Done, style: .done, target: self, action: #selector(self.donePressed)) diff --git a/TelegramUI/BotCheckoutNativeCardEntryController.swift b/TelegramUI/BotCheckoutNativeCardEntryController.swift index 6603ebb698..15a80f4c26 100644 --- a/TelegramUI/BotCheckoutNativeCardEntryController.swift +++ b/TelegramUI/BotCheckoutNativeCardEntryController.swift @@ -47,7 +47,7 @@ final class BotCheckoutNativeCardEntryController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style self.doneItem = UIBarButtonItem(title: self.presentationData.strings.Common_Done, style: .done, target: self, action: #selector(self.donePressed)) diff --git a/TelegramUI/BotCheckoutPasswordEntryController.swift b/TelegramUI/BotCheckoutPasswordEntryController.swift index e090ccc5fc..4ef6e9dbfd 100644 --- a/TelegramUI/BotCheckoutPasswordEntryController.swift +++ b/TelegramUI/BotCheckoutPasswordEntryController.swift @@ -19,18 +19,18 @@ private final class BotCheckoutPasswordAlertActionNode: HighlightableButtonNode let action: BotCheckoutPasswordAlertAction - init(action: BotCheckoutPasswordAlertAction) { + init(theme: PresentationTheme, action: BotCheckoutPasswordAlertAction) { self.backgroundNode = ASDisplayNode() self.backgroundNode.isLayerBacked = true - self.backgroundNode.backgroundColor = UIColor(rgb: 0xe0e5e6) + self.backgroundNode.backgroundColor = theme.actionSheet.opaqueItemHighlightedBackgroundColor self.backgroundNode.alpha = 0.0 self.action = action super.init() - self.setTitle(action.title, with: Font.regular(17.0), with: UIColor(rgb: 0x007ee5), for: []) - self.setTitle(action.title, with: Font.regular(17.0), with: UIColor(rgb: 0xb3b3b3), for: [.disabled]) + self.setTitle(action.title, with: Font.regular(17.0), with: theme.actionSheet.controlAccentColor, for: []) + self.setTitle(action.title, with: Font.regular(17.0), with: theme.actionSheet.disabledActionTextColor, for: [.disabled]) self.highligthedChanged = { [weak self] value in if let strongSelf = self { @@ -90,14 +90,14 @@ private final class BotCheckoutPasswordAlertContentNode: AlertContentNode { private let hapticFeedback = HapticFeedback() - init(account: Account, strings: PresentationStrings, cardTitle: String, period: Int32, requiresBiometrics: Bool, cancel: @escaping () -> Void, completion: @escaping (TemporaryTwoStepPasswordToken) -> Void) { + init(account: Account, theme: PresentationTheme, strings: PresentationStrings, cardTitle: String, period: Int32, requiresBiometrics: Bool, cancel: @escaping () -> Void, completion: @escaping (TemporaryTwoStepPasswordToken) -> Void) { self.account = account self.period = period self.requiresBiometrics = requiresBiometrics self.completion = completion let titleNode = ASTextNode() - titleNode.attributedText = NSAttributedString(string: strings.Checkout_PasswordEntry_Title, font: Font.semibold(17.0), textColor: .black, paragraphAlignment: .center) + titleNode.attributedText = NSAttributedString(string: strings.Checkout_PasswordEntry_Title, font: Font.semibold(17.0), textColor: theme.actionSheet.primaryTextColor, paragraphAlignment: .center) titleNode.displaysAsynchronously = false titleNode.isLayerBacked = true titleNode.maximumNumberOfLines = 1 @@ -105,20 +105,20 @@ private final class BotCheckoutPasswordAlertContentNode: AlertContentNode { self.titleNode = titleNode self.textNode = ASTextNode() - self.textNode.attributedText = NSAttributedString(string: strings.Checkout_PasswordEntry_Text(cardTitle).0, font: Font.regular(13.0), textColor: .black, paragraphAlignment: .center) + self.textNode.attributedText = NSAttributedString(string: strings.Checkout_PasswordEntry_Text(cardTitle).0, font: Font.regular(13.0), textColor: theme.actionSheet.primaryTextColor, paragraphAlignment: .center) self.textNode.displaysAsynchronously = false self.textNode.isLayerBacked = true self.actionNodesSeparator = ASDisplayNode() self.actionNodesSeparator.isLayerBacked = true - self.actionNodesSeparator.backgroundColor = UIColor(rgb: 0xc9cdd7) + self.actionNodesSeparator.backgroundColor = theme.actionSheet.opaqueItemSeparatorColor - self.cancelActionNode = BotCheckoutPasswordAlertActionNode(action: BotCheckoutPasswordAlertAction(title: strings.Common_Cancel, action: { + self.cancelActionNode = BotCheckoutPasswordAlertActionNode(theme: theme, action: BotCheckoutPasswordAlertAction(title: strings.Common_Cancel, action: { cancel() })) var doneImpl: (() -> Void)? - self.doneActionNode = BotCheckoutPasswordAlertActionNode(action: BotCheckoutPasswordAlertAction(title: strings.Checkout_PasswordEntry_Pay, action: { + self.doneActionNode = BotCheckoutPasswordAlertActionNode(theme: theme, action: BotCheckoutPasswordAlertAction(title: strings.Checkout_PasswordEntry_Pay, action: { doneImpl?() })) @@ -129,7 +129,7 @@ private final class BotCheckoutPasswordAlertContentNode: AlertContentNode { for _ in 0 ..< self.actionNodes.count - 1 { let separatorNode = ASDisplayNode() separatorNode.isLayerBacked = true - separatorNode.backgroundColor = UIColor(rgb: 0xc9cdd7) + separatorNode.backgroundColor = theme.actionSheet.opaqueItemSeparatorColor actionVerticalSeparators.append(separatorNode) } } @@ -140,13 +140,13 @@ private final class BotCheckoutPasswordAlertContentNode: AlertContentNode { self.textFieldNodeBackground.displayWithoutProcessing = true self.textFieldNodeBackground.image = generateImage(CGSize(width: 4.0, height: 4.0), rotatedContext: { size, context in context.clear(CGRect(origin: CGPoint(), size: size)) - context.setStrokeColor(UIColor.black.cgColor) + context.setStrokeColor(theme.actionSheet.primaryTextColor.cgColor) context.setLineWidth(UIScreenPixel) context.stroke(CGRect(origin: CGPoint(), size: size)) })?.stretchableImage(withLeftCapWidth: 2, topCapHeight: 2) self.textFieldNode = TextFieldNode() - self.textFieldNode.textField.textColor = .black + self.textFieldNode.textField.textColor = theme.actionSheet.primaryTextColor self.textFieldNode.textField.font = Font.regular(12.0) self.textFieldNode.textField.typingAttributes = [NSAttributedStringKey.font.rawValue: Font.regular(12.0)] self.textFieldNode.textField.isSecureTextEntry = true @@ -297,7 +297,7 @@ private final class BotCheckoutPasswordAlertContentNode: AlertContentNode { func botCheckoutPasswordEntryController(account: Account, strings: PresentationStrings, cartTitle: String, period: Int32, requiresBiometrics: Bool, completion: @escaping (TemporaryTwoStepPasswordToken) -> Void) -> AlertController { var dismissImpl: (() -> Void)? let presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - let controller = AlertController(theme: AlertControllerTheme(presentationTheme: presentationData.theme), contentNode: BotCheckoutPasswordAlertContentNode(account: account, strings: strings, cardTitle: cartTitle, period: period, requiresBiometrics: requiresBiometrics, cancel: { + let controller = AlertController(theme: AlertControllerTheme(presentationTheme: presentationData.theme), contentNode: BotCheckoutPasswordAlertContentNode(account: account, theme: presentationData.theme, strings: strings, cardTitle: cartTitle, period: period, requiresBiometrics: requiresBiometrics, cancel: { dismissImpl?() }, completion: { token in completion(token) diff --git a/TelegramUI/BotCheckoutWebInteractionController.swift b/TelegramUI/BotCheckoutWebInteractionController.swift index 37b02c77a4..1bd2229bc4 100644 --- a/TelegramUI/BotCheckoutWebInteractionController.swift +++ b/TelegramUI/BotCheckoutWebInteractionController.swift @@ -30,7 +30,7 @@ final class BotCheckoutWebInteractionController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: (account.telegramApplicationContext.currentPresentationData.with { $0 }).theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: (account.telegramApplicationContext.currentPresentationData.with { $0 }))) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style diff --git a/TelegramUI/BotReceiptController.swift b/TelegramUI/BotReceiptController.swift index 5445be7f70..72fc10b7d0 100644 --- a/TelegramUI/BotReceiptController.swift +++ b/TelegramUI/BotReceiptController.swift @@ -30,7 +30,7 @@ final class BotReceiptController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style diff --git a/TelegramUI/CallController.swift b/TelegramUI/CallController.swift index dc3e28d848..70b24cbf8d 100644 --- a/TelegramUI/CallController.swift +++ b/TelegramUI/CallController.swift @@ -38,7 +38,7 @@ public final class CallController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) self.statusBar.statusBarStyle = .White self.statusBar.ignoreInCall = true diff --git a/TelegramUI/CallListController.swift b/TelegramUI/CallListController.swift index 949068d375..52503b6d53 100644 --- a/TelegramUI/CallListController.swift +++ b/TelegramUI/CallListController.swift @@ -40,7 +40,7 @@ public final class CallListController: ViewController { self.segmentedTitleView = ItemListControllerSegmentedTitleView(segments: [self.presentationData.strings.Calls_All, self.presentationData.strings.Calls_Missed], index: 0, color: self.presentationData.theme.rootController.navigationBar.accentTextColor) - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style @@ -115,7 +115,7 @@ public final class CallListController: ViewController { } self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style - self.navigationBar?.updateTheme(NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + self.navigationBar?.updatePresentationData(NavigationBarPresentationData(presentationData: self.presentationData)) if self.isNodeLoaded { self.controllerNode.updateThemeAndStrings(theme: self.presentationData.theme, strings: self.presentationData.strings) diff --git a/TelegramUI/ChangePhoneNumberController.swift b/TelegramUI/ChangePhoneNumberController.swift index c1e216c25b..7afe172223 100644 --- a/TelegramUI/ChangePhoneNumberController.swift +++ b/TelegramUI/ChangePhoneNumberController.swift @@ -35,7 +35,7 @@ final class ChangePhoneNumberController: ViewController { self.account = account self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style diff --git a/TelegramUI/ChangePhoneNumberIntroController.swift b/TelegramUI/ChangePhoneNumberIntroController.swift index bbec7d0f9f..abd16ab139 100644 --- a/TelegramUI/ChangePhoneNumberIntroController.swift +++ b/TelegramUI/ChangePhoneNumberIntroController.swift @@ -86,7 +86,7 @@ final class ChangePhoneNumberIntroController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style diff --git a/TelegramUI/ChannelMembersSearchController.swift b/TelegramUI/ChannelMembersSearchController.swift index 926903cede..b09fef7d53 100644 --- a/TelegramUI/ChannelMembersSearchController.swift +++ b/TelegramUI/ChannelMembersSearchController.swift @@ -26,7 +26,7 @@ final class ChannelMembersSearchController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style diff --git a/TelegramUI/ChatBotStartInputPanelNode.swift b/TelegramUI/ChatBotStartInputPanelNode.swift index 375b886efc..3cc128485f 100644 --- a/TelegramUI/ChatBotStartInputPanelNode.swift +++ b/TelegramUI/ChatBotStartInputPanelNode.swift @@ -86,7 +86,7 @@ final class ChatBotStartInputPanelNode: ChatInputPanelNode { self.interfaceInteraction?.sendBotStart(presentationInterfaceState.botStartPayload) } - override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { if self.presentationInterfaceState != interfaceState { let previousState = self.presentationInterfaceState self.presentationInterfaceState = interfaceState @@ -104,7 +104,7 @@ final class ChatBotStartInputPanelNode: ChatInputPanelNode { return 47.0 } - override func minimalHeight(interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func minimalHeight(interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { return 47.0 } } diff --git a/TelegramUI/ChatChannelSubscriberInputPanelNode.swift b/TelegramUI/ChatChannelSubscriberInputPanelNode.swift index 4bab37cc9a..6b5caee32a 100644 --- a/TelegramUI/ChatChannelSubscriberInputPanelNode.swift +++ b/TelegramUI/ChatChannelSubscriberInputPanelNode.swift @@ -108,7 +108,7 @@ final class ChatChannelSubscriberInputPanelNode: ChatInputPanelNode { } } - override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { self.layoutData = (width, leftInset, rightInset) if self.presentationInterfaceState != interfaceState { @@ -140,7 +140,7 @@ final class ChatChannelSubscriberInputPanelNode: ChatInputPanelNode { return 47.0 } - override func minimalHeight(interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func minimalHeight(interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { return 47.0 } } diff --git a/TelegramUI/ChatController.swift b/TelegramUI/ChatController.swift index 4ccc0da920..e729e3ba74 100644 --- a/TelegramUI/ChatController.swift +++ b/TelegramUI/ChatController.swift @@ -6,6 +6,7 @@ import Display import AsyncDisplayKit import TelegramCore import SafariServices +import MobileCoreServices public enum ChatControllerPeekActions { case standard @@ -66,7 +67,7 @@ public enum NavigateToMessageLocation { } } -public final class ChatController: TelegramController, UIViewControllerPreviewingDelegate { +public final class ChatController: TelegramController, UIViewControllerPreviewingDelegate, UIDropInteractionDelegate { private var validLayout: ContainerViewLayout? public var peekActions: ChatControllerPeekActions = .standard @@ -181,6 +182,8 @@ public final class ChatController: TelegramController, UIViewControllerPreviewin private weak var silentPostTooltipController: TooltipController? private weak var mediaRecordingModeTooltipController: TooltipController? + private var screenCaptureEventsDisposable: Disposable? + public init(account: Account, chatLocation: ChatLocation, messageId: MessageId? = nil, botStart: ChatControllerInitialBotStart? = nil, mode: ChatControllerPresentationMode = .standard(previewing: false)) { self.account = account self.chatLocation = chatLocation @@ -208,7 +211,7 @@ public final class ChatController: TelegramController, UIViewControllerPreviewin if case .overlay = mode { enableMediaAccessoryPanel = false } - super.init(account: account, navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme), enableMediaAccessoryPanel: enableMediaAccessoryPanel, locationBroadcastPanelSource: locationBroadcastPanelSource) + super.init(account: account, navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData), enableMediaAccessoryPanel: enableMediaAccessoryPanel, locationBroadcastPanelSource: locationBroadcastPanelSource) self.navigationItem.backBarButtonItem = UIBarButtonItem(title: self.presentationData.strings.Common_Back, style: .plain, target: nil, action: nil) @@ -1050,6 +1053,14 @@ public final class ChatController: TelegramController, UIViewControllerPreviewin strongSelf.chatTitleView?.networkState = state } }) + + if case let .peer(peerId) = self.chatLocation, peerId.namespace == Namespaces.Peer.SecretChat { + self.screenCaptureEventsDisposable = screenCaptureEvents().start(next: { [weak self] _ in + if let strongSelf = self, strongSelf.canReadHistoryValue { + let _ = addSecretChatMessageScreenshot(account: account, peerId: peerId).start() + } + }) + } } required public init(coder aDecoder: NSCoder) { @@ -1092,6 +1103,7 @@ public final class ChatController: TelegramController, UIViewControllerPreviewin self.applicationInForegroundDisposable?.dispose() self.canReadHistoryDisposable?.dispose() self.networkStateDisposable?.dispose() + self.screenCaptureEventsDisposable?.dispose() } public func updatePresentationMode(_ mode: ChatControllerPresentationMode) { @@ -1109,7 +1121,7 @@ public final class ChatController: TelegramController, UIViewControllerPreviewin private func themeAndStringsUpdated() { self.navigationItem.backBarButtonItem = UIBarButtonItem(title: self.presentationData.strings.Common_Back, style: .plain, target: nil, action: nil) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style - self.navigationBar?.updateTheme(NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + self.navigationBar?.updatePresentationData(NavigationBarPresentationData(presentationData: self.presentationData)) } override public func loadDisplayNode() { @@ -2385,6 +2397,10 @@ public final class ChatController: TelegramController, UIViewControllerPreviewin return false } + if case .media(_, true) = strongSelf.presentationInterfaceState.inputMode { + return false + } + if !strongSelf.account.telegramApplicationContext.currentMediaInputSettings.with { $0.enableRaiseToSpeak } { return false } @@ -2422,6 +2438,11 @@ public final class ChatController: TelegramController, UIViewControllerPreviewin } self.registerForPreviewing(with: self, sourceView: self.chatDisplayNode.historyNodeContainer.view, theme: PeekControllerTheme(presentationTheme: self.presentationData.theme), onlyNative: true) } + + if #available(iOSApplicationExtension 11.0, *) { + let dropInteraction = UIDropInteraction(delegate: self) + self.chatDisplayNode.view.addInteraction(dropInteraction) + } } if !self.checkedPeerChatServiceActions { @@ -3644,13 +3665,27 @@ public final class ChatController: TelegramController, UIViewControllerPreviewin if let messageId = messageId { strongSelf.navigateToMessage(from: nil, to: .id(messageId)) } - } else { - (strongSelf.navigationController as? NavigationController)?.pushViewController(ChatController(account: strongSelf.account, chatLocation: .peer(peerId), messageId: messageId)) + } else if let navigationController = strongSelf.navigationController as? NavigationController { + navigateToChatController(navigationController: navigationController, account: strongSelf.account, chatLocation: .peer(peerId), messageId: messageId) } case .info: - break + strongSelf.navigationActionDisposable.set((strongSelf.account.postbox.loadedPeerWithId(peerId) + |> take(1) + |> deliverOnMainQueue).start(next: { [weak self] peer in + if let strongSelf = self, peer.restrictionText == nil { + if let infoController = peerInfoController(account: strongSelf.account, peer: peer) { + (strongSelf.navigationController as? NavigationController)?.pushViewController(infoController) + } + } + })) case let .withBotStartPayload(startPayload): - break + if case .peer(peerId) = strongSelf.chatLocation { + strongSelf.updateChatPresentationInterfaceState(animated: true, interactive: true, { + $0.updatedBotStartPayload(startPayload.payload) + }) + } else if let navigationController = strongSelf.navigationController as? NavigationController { + navigateToChatController(navigationController: navigationController, account: strongSelf.account, chatLocation: .peer(peerId), botStart: startPayload) + } } } }, present: { c, a in @@ -4016,4 +4051,44 @@ public final class ChatController: TelegramController, UIViewControllerPreviewin self.chatDisplayNode.dismissInput() self.present(actionSheet, in: .window(.root)) } + + @available(iOSApplicationExtension 11.0, *) + public func dropInteraction(_ interaction: UIDropInteraction, canHandle session: UIDropSession) -> Bool { + return session.hasItemsConforming(toTypeIdentifiers: [kUTTypeImage as String]) + } + + @available(iOSApplicationExtension 11.0, *) + public func dropInteraction(_ interaction: UIDropInteraction, sessionDidUpdate session: UIDropSession) -> UIDropProposal { + if !canSendMessagesToChat(self.presentationInterfaceState) { + return UIDropProposal(operation: .cancel) + } + + let dropLocation = session.location(in: self.chatDisplayNode.view) + self.chatDisplayNode.updateDropInteraction(isActive: true) + + let operation: UIDropOperation + operation = .copy + return UIDropProposal(operation: operation) + } + + @available(iOSApplicationExtension 11.0, *) + public func dropInteraction(_ interaction: UIDropInteraction, performDrop session: UIDropSession) { + session.loadObjects(ofClass: UIImage.self) { imageItems in + let images = imageItems as! [UIImage] + + self.chatDisplayNode.updateDropInteraction(isActive: false) + + self.chatDisplayNode.displayPasteMenu(images) + } + } + + @available(iOSApplicationExtension 11.0, *) + public func dropInteraction(_ interaction: UIDropInteraction, sessionDidExit session: UIDropSession) { + self.chatDisplayNode.updateDropInteraction(isActive: false) + } + + @available(iOSApplicationExtension 11.0, *) + public func dropInteraction(_ interaction: UIDropInteraction, sessionDidEnd session: UIDropSession) { + self.chatDisplayNode.updateDropInteraction(isActive: false) + } } diff --git a/TelegramUI/ChatControllerNode.swift b/TelegramUI/ChatControllerNode.swift index 79942bf6b1..756d981f3d 100644 --- a/TelegramUI/ChatControllerNode.swift +++ b/TelegramUI/ChatControllerNode.swift @@ -21,6 +21,16 @@ private final class ChatControllerNodeView: UITracingLayerView, WindowInputAcces } } +private final class ScrollContainerNode: ASScrollNode { + override func hitTest(_ point: CGPoint, with event: UIEvent?) -> UIView? { + if super.hitTest(point, with: event) == self.view { + return nil + } + + return super.hitTest(point, with: event) + } +} + class ChatControllerNode: ASDisplayNode, UIScrollViewDelegate { let account: Account let chatLocation: ChatLocation @@ -30,7 +40,7 @@ class ChatControllerNode: ASDisplayNode, UIScrollViewDelegate { private var backgroundEffectNode: ASDisplayNode? private var containerBackgroundNode: ASImageNode? - private var scrollContainerNode: ASScrollNode? + private var scrollContainerNode: ScrollContainerNode? private var containerNode: ASDisplayNode? private var overlayNavigationBar: ChatOverlayNavigationBar? @@ -112,6 +122,8 @@ class ChatControllerNode: ASDisplayNode, UIScrollViewDelegate { private var expandedInputDimNode: ASDisplayNode? + private var dropDimNode: ASDisplayNode? + private var containerLayoutAndNavigationBarHeight: (ContainerViewLayout, CGFloat)? private var scheduledLayoutTransitionRequestId: Int = 0 @@ -325,20 +337,14 @@ class ChatControllerNode: ASDisplayNode, UIScrollViewDelegate { if case .overlay = self.chatPresentationInterfaceState.mode { if self.backgroundEffectNode == nil { let backgroundEffectNode = ASDisplayNode() - switch self.chatPresentationInterfaceState.theme.inAppNotification.expandedNotification.backgroundType { - case .light: - backgroundEffectNode.backgroundColor = UIColor(white: 1.0, alpha: 0.8) - case .dark: - backgroundEffectNode.backgroundColor = UIColor(white: 0.0, alpha: 0.8) - } + backgroundEffectNode.backgroundColor = self.chatPresentationInterfaceState.theme.chatList.backgroundColor.withAlphaComponent(0.8) self.insertSubnode(backgroundEffectNode, at: 0) self.backgroundEffectNode = backgroundEffectNode + backgroundEffectNode.view.addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(self.backgroundEffectTap(_:)))) } if self.scrollContainerNode == nil { - let scrollContainerNode = ASScrollNode() + let scrollContainerNode = ScrollContainerNode() scrollContainerNode.view.delaysContentTouches = false - //scrollContainerNode.view.canCancelContentTouches = false - //scrollContainerNode.view.panGestureRecognizer.cancelsTouchesInView = false scrollContainerNode.view.delegate = self scrollContainerNode.view.alwaysBounceVertical = true if #available(iOSApplicationExtension 11.0, *) { @@ -469,7 +475,7 @@ class ChatControllerNode: ASDisplayNode, UIScrollViewDelegate { var inputPanelNodeBaseHeight: CGFloat = 0.0 if let inputPanelNode = self.inputPanelNode { - inputPanelNodeBaseHeight = inputPanelNode.minimalHeight(interfaceState: self.chatPresentationInterfaceState) + inputPanelNodeBaseHeight = inputPanelNode.minimalHeight(interfaceState: self.chatPresentationInterfaceState, metrics: layout.metrics) } let maximumInputNodeHeight = layout.size.height - max(navigationBarHeight, layout.safeInsets.top) - inputPanelNodeBaseHeight @@ -519,8 +525,10 @@ class ChatControllerNode: ASDisplayNode, UIScrollViewDelegate { var wrappingInsets = UIEdgeInsets() if case .overlay = self.chatPresentationInterfaceState.mode { - wrappingInsets.left = 8.0 + layout.safeInsets.left - wrappingInsets.right = 8.0 + layout.safeInsets.right + let containerWidth = horizontalContainerFillingSizeForLayout(layout: layout, sideInset: 8.0 + layout.safeInsets.left) + wrappingInsets.left = floor((layout.size.width - containerWidth) / 2.0) + wrappingInsets.right = wrappingInsets.left + wrappingInsets.top = 8.0 if let statusBarHeight = layout.statusBarHeight, CGFloat(40.0).isLess(than: statusBarHeight) { wrappingInsets.top += statusBarHeight @@ -541,12 +549,12 @@ class ChatControllerNode: ASDisplayNode, UIScrollViewDelegate { } dismissedInputPanelNode = self.inputPanelNode immediatelyLayoutInputPanelAndAnimateAppearance = true - let inputPanelHeight = inputPanelNode.updateLayout(width: layout.size.width, leftInset: layout.safeInsets.left, rightInset: layout.safeInsets.right, maxHeight: layout.size.height - insets.top - insets.bottom, transition: .immediate, interfaceState: self.chatPresentationInterfaceState) + let inputPanelHeight = inputPanelNode.updateLayout(width: layout.size.width, leftInset: layout.safeInsets.left, rightInset: layout.safeInsets.right, maxHeight: layout.size.height - insets.top - insets.bottom, transition: .immediate, interfaceState: self.chatPresentationInterfaceState, metrics: layout.metrics) inputPanelSize = CGSize(width: layout.size.width, height: inputPanelHeight) self.inputPanelNode = inputPanelNode self.insertSubnode(inputPanelNode, aboveSubnode: self.inputPanelBackgroundNode) } else { - let inputPanelHeight = inputPanelNode.updateLayout(width: layout.size.width, leftInset: layout.safeInsets.left, rightInset: layout.safeInsets.right, maxHeight: layout.size.height - insets.top - insets.bottom, transition: transition, interfaceState: self.chatPresentationInterfaceState) + let inputPanelHeight = inputPanelNode.updateLayout(width: layout.size.width, leftInset: layout.safeInsets.left, rightInset: layout.safeInsets.right, maxHeight: layout.size.height - insets.top - insets.bottom, transition: transition, interfaceState: self.chatPresentationInterfaceState, metrics: layout.metrics) inputPanelSize = CGSize(width: layout.size.width, height: inputPanelHeight) } } else { @@ -751,7 +759,7 @@ class ChatControllerNode: ASDisplayNode, UIScrollViewDelegate { transition.updateFrame(node: containerNode, frame: containerNodeFrame) if let containerBackgroundNode = self.containerBackgroundNode { - transition.updateFrame(node: containerBackgroundNode, frame: CGRect(origin: CGPoint(x: containerNodeFrame.minX - 8.0, y: containerNodeFrame.minY - 8.0), size: CGSize(width: containerNodeFrame.size.width + 8.0 * 2.0, height: containerNodeFrame.size.height + 8.0 + 20.0))) + transition.updateFrame(node: containerBackgroundNode, frame: CGRect(origin: CGPoint(x: containerNodeFrame.minX - 8.0 * 2.0, y: containerNodeFrame.minY - 8.0 * 2.0), size: CGSize(width: containerNodeFrame.size.width + 8.0 * 4.0, height: containerNodeFrame.size.height + 8.0 * 2.0 + 20.0))) } } @@ -765,6 +773,13 @@ class ChatControllerNode: ASDisplayNode, UIScrollViewDelegate { if case .standard = self.chatPresentationInterfaceState.mode { listInsets.left += layout.safeInsets.left listInsets.right += layout.safeInsets.right + + if case .regular = layout.metrics.widthClass, case .regular = layout.metrics.heightClass { + listInsets.left += 6.0 + listInsets.right += 6.0 + listInsets.top += 6.0 + listInsets.bottom += 6.0 + } } var displayTopDimNode = false @@ -773,7 +788,9 @@ class ChatControllerNode: ASDisplayNode, UIScrollViewDelegate { if let (controller, _) = self.messageActionSheetController { displayTopDimNode = true - let menuHeight = controller.controllerNode.updateLayout(layout: layout, transition: transition) + let globalSelfOrigin = self.view.convert(CGPoint(), to: nil) + + let menuHeight = controller.controllerNode.updateLayout(layout: layout, horizontalOrigin: globalSelfOrigin.x, transition: transition) ensureTopInsetForOverlayHighlightedItems = menuHeight let bottomInset = containerInsets.bottom + inputPanelsHeight + UIScreenPixel @@ -849,12 +866,12 @@ class ChatControllerNode: ASDisplayNode, UIScrollViewDelegate { } transition.updateAlpha(node: expandedInputDimNode, alpha: 1.0) expandedInputDimNode.frame = exandedFrame - transition.animatePositionAdditive(node: expandedInputDimNode, offset: previousInputPanelOrigin.y - inputPanelOrigin) + transition.animatePositionAdditive(node: expandedInputDimNode, offset: CGPoint(x: 0.0, y: previousInputPanelOrigin.y - inputPanelOrigin)) } } else { if let expandedInputDimNode = self.expandedInputDimNode { self.expandedInputDimNode = nil - transition.animatePositionAdditive(node: expandedInputDimNode, offset: previousInputPanelOrigin.y - inputPanelOrigin) + transition.animatePositionAdditive(node: expandedInputDimNode, offset: CGPoint(x: 0.0, y: previousInputPanelOrigin.y - inputPanelOrigin)) transition.updateAlpha(node: expandedInputDimNode, alpha: 0.0, completion: { [weak expandedInputDimNode] _ in expandedInputDimNode?.removeFromSupernode() }) @@ -1357,8 +1374,8 @@ class ChatControllerNode: ASDisplayNode, UIScrollViewDelegate { if let containerNode = self.containerNode { containerNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) transition.animateFrame(node: containerNode, from: fromFrame) - transition.animatePositionAdditive(node: self.backgroundNode, offset: -containerNode.bounds.size.height) - transition.animatePositionAdditive(node: self.historyNodeContainer, offset: -containerNode.bounds.size.height) + transition.animatePositionAdditive(node: self.backgroundNode, offset: CGPoint(x: 0.0, y: -containerNode.bounds.size.height)) + transition.animatePositionAdditive(node: self.historyNodeContainer, offset: CGPoint(x: 0.0, y: -containerNode.bounds.size.height)) transition.updateFrame(node: fromNode, frame: CGRect(origin: containerNode.frame.origin, size: fromNode.frame.size)) } @@ -1484,13 +1501,24 @@ class ChatControllerNode: ASDisplayNode, UIScrollViewDelegate { } if let (layout, navigationBarHeight) = self.validLayout { - let menuHeight = self.messageActionSheetController?.0.controllerNode.updateLayout(layout: layout, transition: .immediate) + let globalSelfOrigin = self.view.convert(CGPoint(), to: nil) + let menuHeight = self.messageActionSheetController?.0.controllerNode.updateLayout(layout: layout, horizontalOrigin: globalSelfOrigin.x, transition: .immediate) if let stableId = self.messageActionSheetController?.1 { var resultItemNode: ListViewItemNode? self.historyNode.forEachItemNode { itemNode in - if let itemNode = itemNode as? ChatMessageItemView { - if itemNode.item?.message.stableId == stableId { - resultItemNode = itemNode + if let itemNode = itemNode as? ChatMessageItemView, let item = itemNode.item { + switch item.content { + case let .message(message, _, _): + if message.stableId == stableId { + resultItemNode = itemNode + } + case let .group(messages): + for (message, _, _) in messages { + if message.stableId == stableId { + resultItemNode = itemNode + break + } + } } } } @@ -1499,10 +1527,8 @@ class ChatControllerNode: ASDisplayNode, UIScrollViewDelegate { if resultItemNode.frame.minY < menuHeight { messageActionSheetControllerAdditionalInset = menuHeight - resultItemNode.frame.minY } - //self.historyNode.insets.top } } - //messageActionSheetControllerAdditionalInset } self.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: transition, listViewTransaction: { updateSizeAndInsets, additionalScrollDistance, scrollToTop in self.historyNode.updateLayout(transition: transition, updateSizeAndInsets: updateSizeAndInsets, additionalScrollDistance: additionalScrollDistance, scrollToTop: scrollToTop) @@ -1561,4 +1587,30 @@ class ChatControllerNode: ASDisplayNode, UIScrollViewDelegate { self.historyNode.scrollScreenToTop() } } + + @objc func backgroundEffectTap(_ recognizer: UITapGestureRecognizer) { + if case .ended = recognizer.state { + self.dismissAsOverlay() + } + } + + func updateDropInteraction(isActive: Bool) { + if isActive { + if self.dropDimNode == nil { + let dropDimNode = ASDisplayNode() + dropDimNode.backgroundColor = self.chatPresentationInterfaceState.theme.chatList.backgroundColor.withAlphaComponent(0.35) + self.dropDimNode = dropDimNode + self.addSubnode(dropDimNode) + if let (layout, _) = self.validLayout { + dropDimNode.frame = CGRect(origin: CGPoint(), size: layout.size) + dropDimNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.25) + } + } + } else if let dropDimNode = self.dropDimNode { + self.dropDimNode = nil + dropDimNode.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.3, removeOnCompletion: false, completion: { [weak dropDimNode] _ in + dropDimNode?.removeFromSupernode() + }) + } + } } diff --git a/TelegramUI/ChatExternalFileGalleryItem.swift b/TelegramUI/ChatExternalFileGalleryItem.swift new file mode 100644 index 0000000000..aee2e13c6a --- /dev/null +++ b/TelegramUI/ChatExternalFileGalleryItem.swift @@ -0,0 +1,329 @@ +import Foundation +import Postbox +import Display +import SwiftSignalKit +import WebKit +import TelegramCore + +class ChatExternalFileGalleryItem: GalleryItem { + let account: Account + let theme: PresentationTheme + let strings: PresentationStrings + let message: Message + let location: MessageHistoryEntryLocation? + + init(account: Account, theme: PresentationTheme, strings: PresentationStrings, message: Message, location: MessageHistoryEntryLocation?) { + self.account = account + self.theme = theme + self.strings = strings + self.message = message + self.location = location + } + + func node() -> GalleryItemNode { + let node = ChatExternalFileGalleryItemNode(account: self.account, theme: self.theme, strings: self.strings) + + for media in self.message.media { + if let file = media as? TelegramMediaFile { + node.setFile(account: account, file: file) + break + } else if let webpage = media as? TelegramMediaWebpage, case let .Loaded(content) = webpage.content { + if let file = content.file { + node.setFile(account: account, file: file) + break + } + } + } + + if let location = self.location { + node._title.set(.single("\(location.index + 1) of \(location.count)")) + } + node.setMessage(self.message) + + return node + } + + func updateNode(node: GalleryItemNode) { + if let node = node as? ChatExternalFileGalleryItemNode, let location = self.location { + node._title.set(.single("\(location.index + 1) of \(location.count)")) + node.setMessage(self.message) + } + } +} + +class ChatExternalFileGalleryItemNode: GalleryItemNode { + fileprivate let _title = Promise() + + private let statusNodeContainer: HighlightableButtonNode + private let statusNode: RadialStatusNode + + private let containerNode: ASDisplayNode + private let fileNameNode: ImmediateTextNode + private let actionTitleNode: ImmediateTextNode + private let actionButtonNode: HighlightableButtonNode + + private var accountAndFile: (Account, TelegramMediaFile)? + private let dataDisposable = MetaDisposable() + + private var itemIsVisible = false + + private var message: Message? + + private let footerContentNode: ChatItemGalleryFooterContentNode + + private var fetchDisposable = MetaDisposable() + private let statusDisposable = MetaDisposable() + private var status: MediaResourceStatus? + + init(account: Account, theme: PresentationTheme, strings: PresentationStrings) { + self.containerNode = ASDisplayNode() + self.containerNode.backgroundColor = .white + + self.fileNameNode = ImmediateTextNode() + self.containerNode.addSubnode(self.fileNameNode) + + self.actionTitleNode = ImmediateTextNode() + self.actionTitleNode.attributedText = NSAttributedString(string: strings.Conversation_LinkDialogOpen, font: Font.regular(17.0), textColor: theme.list.itemAccentColor) + self.containerNode.addSubnode(self.actionTitleNode) + + self.actionButtonNode = HighlightableButtonNode() + self.containerNode.addSubnode(self.actionButtonNode) + + self.footerContentNode = ChatItemGalleryFooterContentNode(account: account, theme: theme, strings: strings) + + self.statusNodeContainer = HighlightableButtonNode() + self.statusNode = RadialStatusNode(backgroundNodeColor: UIColor(white: 0.0, alpha: 0.5)) + self.statusNode.isHidden = true + + super.init() + + self.addSubnode(self.containerNode) + + self.statusNodeContainer.addSubnode(self.statusNode) + self.addSubnode(self.statusNodeContainer) + + self.statusNodeContainer.addTarget(self, action: #selector(self.statusPressed), forControlEvents: .touchUpInside) + + self.statusNodeContainer.isUserInteractionEnabled = false + + self.actionButtonNode.addTarget(self, action: #selector(self.actionButtonPressed), forControlEvents: .touchUpInside) + self.actionButtonNode.highligthedChanged = { [weak self] highlighted in + if let strongSelf = self { + if highlighted { + strongSelf.actionTitleNode.layer.removeAnimation(forKey: "opacity") + strongSelf.actionTitleNode.alpha = 0.4 + } else { + strongSelf.actionTitleNode.alpha = 1.0 + strongSelf.actionTitleNode.layer.animateAlpha(from: 0.4, to: 1.0, duration: 0.2) + } + } + } + } + + deinit { + self.dataDisposable.dispose() + self.fetchDisposable.dispose() + self.statusDisposable.dispose() + } + + override func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: transition) + + let containerFrame = CGRect(origin: CGPoint(x: 0.0, y: navigationBarHeight), size: CGSize(width: layout.size.width, height: layout.size.height - navigationBarHeight - 44.0 - layout.insets(options: []).bottom)) + self.containerNode.frame = containerFrame + + let fileNameSize = self.fileNameNode.updateLayout(containerFrame.insetBy(dx: 10.0, dy: 0.0).size) + let actionTitleSize = self.actionTitleNode.updateLayout(containerFrame.insetBy(dx: 10.0, dy: 0.0).size) + + let spacing: CGFloat = 4.0 + + let contentHeight: CGFloat = fileNameSize.height + spacing + actionTitleSize.height + + let contentOrigin = floor((containerFrame.size.height - contentHeight) / 2.0) + + let fileNameFrame = CGRect(origin: CGPoint(x: floor((containerFrame.width - fileNameSize.width) / 2.0), y: contentOrigin), size: fileNameSize) + transition.updateFrame(node: self.fileNameNode, frame: fileNameFrame) + + let actionTitleFrame = CGRect(origin: CGPoint(x: floor((containerFrame.width - actionTitleSize.width) / 2.0), y: fileNameFrame.maxY + spacing), size: actionTitleSize) + transition.updateFrame(node: self.actionTitleNode, frame: actionTitleFrame) + transition.updateFrame(node: self.actionButtonNode, frame: actionTitleFrame.insetBy(dx: -8.0, dy: -8.0)) + + let statusSize = CGSize(width: 50.0, height: 50.0) + transition.updateFrame(node: self.statusNodeContainer, frame: CGRect(origin: CGPoint(x: floor((layout.size.width - statusSize.width) / 2.0), y: floor((layout.size.height - statusSize.height) / 2.0)), size: statusSize)) + transition.updateFrame(node: self.statusNode, frame: CGRect(origin: CGPoint(), size: statusSize)) + } + + fileprivate func setMessage(_ message: Message) { + self.message = message + self.footerContentNode.setMessage(message) + } + + override func navigationStyle() -> Signal { + return .single(.dark) + } + + func setFile(account: Account, file: TelegramMediaFile) { + let updateFile = self.accountAndFile?.1 != file + self.accountAndFile = (account, file) + if updateFile { + self.fileNameNode.attributedText = NSAttributedString(string: file.fileName ?? " ", font: Font.regular(17.0), textColor: .black) + self.setupStatus(account: account, resource: file.resource) + } + } + + private func setupStatus(account: Account, resource: MediaResource) { + self.statusDisposable.set((account.postbox.mediaBox.resourceStatus(resource) + |> deliverOnMainQueue).start(next: { [weak self] status in + if let strongSelf = self { + let previousStatus = strongSelf.status + strongSelf.status = status + switch status { + case .Remote: + strongSelf.statusNode.isHidden = false + strongSelf.statusNode.alpha = 1.0 + strongSelf.statusNodeContainer.isUserInteractionEnabled = true + strongSelf.statusNode.transitionToState(.download(.white), completion: {}) + case let .Fetching(isActive, progress): + strongSelf.statusNode.isHidden = false + strongSelf.statusNode.alpha = 1.0 + strongSelf.statusNodeContainer.isUserInteractionEnabled = true + var actualProgress = progress + if isActive { + actualProgress = max(actualProgress, 0.027) + } + strongSelf.statusNode.transitionToState(.progress(color: .white, value: CGFloat(actualProgress), cancelEnabled: true), completion: {}) + case .Local: + if let previousStatus = previousStatus, case .Fetching = previousStatus { + strongSelf.statusNode.transitionToState(.progress(color: .white, value: 1.0, cancelEnabled: true), completion: { + if let strongSelf = self { + strongSelf.statusNode.alpha = 0.0 + strongSelf.statusNodeContainer.isUserInteractionEnabled = false + strongSelf.statusNode.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.2, completion: { _ in + if let strongSelf = self { + strongSelf.statusNode.transitionToState(.none, animated: false, completion: {}) + } + }) + } + }) + } else if !strongSelf.statusNode.isHidden && !strongSelf.statusNode.alpha.isZero { + strongSelf.statusNode.alpha = 0.0 + strongSelf.statusNodeContainer.isUserInteractionEnabled = false + strongSelf.statusNode.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.2, completion: { _ in + if let strongSelf = self { + strongSelf.statusNode.transitionToState(.none, animated: false, completion: {}) + } + }) + } + } + } + })) + } + + override func visibilityUpdated(isVisible: Bool) { + super.visibilityUpdated(isVisible: isVisible) + + if self.itemIsVisible != isVisible { + self.itemIsVisible = isVisible + + if isVisible { + } else { + self.fetchDisposable.set(nil) + } + } + } + + override func title() -> Signal { + return self._title.get() + } + + override func animateIn(from node: (ASDisplayNode, () -> UIView?), addToTransitionSurface: (UIView) -> Void) { + var transformedFrame = node.0.view.convert(node.0.view.bounds, to: self.containerNode.view) + let transformedSuperFrame = node.0.view.convert(node.0.view.bounds, to: self.containerNode.view.superview) + + self.containerNode.layer.animatePosition(from: CGPoint(x: transformedSuperFrame.midX, y: transformedSuperFrame.midY), to: self.containerNode.layer.position, duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring) + + transformedFrame.origin = CGPoint() + + let transform = CATransform3DScale(self.containerNode.layer.transform, transformedFrame.size.width / self.containerNode.layer.bounds.size.width, transformedFrame.size.height / self.containerNode.layer.bounds.size.height, 1.0) + self.containerNode.layer.animate(from: NSValue(caTransform3D: transform), to: NSValue(caTransform3D: self.containerNode.layer.transform), keyPath: "transform", timingFunction: kCAMediaTimingFunctionSpring, duration: 0.25) + + self.statusNodeContainer.layer.animatePosition(from: CGPoint(x: transformedSuperFrame.midX, y: transformedSuperFrame.midY), to: self.statusNodeContainer.position, duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring) + self.statusNodeContainer.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring) + self.statusNodeContainer.layer.animateScale(from: 0.5, to: 1.0, duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring) + } + + override func animateOut(to node: (ASDisplayNode, () -> UIView?), addToTransitionSurface: (UIView) -> Void, completion: @escaping () -> Void) { + var transformedFrame = node.0.view.convert(node.0.view.bounds, to: self.containerNode.view) + let transformedSuperFrame = node.0.view.convert(node.0.view.bounds, to: self.containerNode.view.superview) + let transformedSelfFrame = node.0.view.convert(node.0.view.bounds, to: self.view) + let transformedCopyViewInitialFrame = self.containerNode.view.convert(self.containerNode.view.bounds, to: self.view) + + var positionCompleted = false + var boundsCompleted = false + var copyCompleted = false + + let copyView = node.1()! + + self.view.insertSubview(copyView, belowSubview: self.containerNode.view) + copyView.frame = transformedSelfFrame + + let intermediateCompletion = { [weak copyView] in + if positionCompleted && boundsCompleted && copyCompleted { + copyView?.removeFromSuperview() + completion() + } + } + + copyView.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.1, removeOnCompletion: false) + + copyView.layer.animatePosition(from: CGPoint(x: transformedCopyViewInitialFrame.midX, y: transformedCopyViewInitialFrame.midY), to: CGPoint(x: transformedSelfFrame.midX, y: transformedSelfFrame.midY), duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring, removeOnCompletion: false) + let scale = CGSize(width: transformedCopyViewInitialFrame.size.width / transformedSelfFrame.size.width, height: transformedCopyViewInitialFrame.size.height / transformedSelfFrame.size.height) + copyView.layer.animate(from: NSValue(caTransform3D: CATransform3DMakeScale(scale.width, scale.height, 1.0)), to: NSValue(caTransform3D: CATransform3DIdentity), keyPath: "transform", timingFunction: kCAMediaTimingFunctionSpring, duration: 0.25, removeOnCompletion: false, completion: { _ in + copyCompleted = true + intermediateCompletion() + }) + + self.containerNode.layer.animatePosition(from: self.containerNode.layer.position, to: CGPoint(x: transformedSuperFrame.midX, y: transformedSuperFrame.midY), duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring, removeOnCompletion: false, completion: { _ in + positionCompleted = true + intermediateCompletion() + }) + + self.containerNode.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.25, removeOnCompletion: false) + + transformedFrame.origin = CGPoint() + + let transform = CATransform3DScale(self.containerNode.layer.transform, transformedFrame.size.width / self.containerNode.layer.bounds.size.width, transformedFrame.size.height / self.containerNode.layer.bounds.size.height, 1.0) + self.containerNode.layer.animate(from: NSValue(caTransform3D: self.containerNode.layer.transform), to: NSValue(caTransform3D: transform), keyPath: "transform", timingFunction: kCAMediaTimingFunctionSpring, duration: 0.25, removeOnCompletion: false, completion: { _ in + boundsCompleted = true + intermediateCompletion() + }) + + self.statusNodeContainer.layer.animatePosition(from: self.statusNodeContainer.position, to: CGPoint(x: transformedSuperFrame.midX, y: transformedSuperFrame.midY), duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring, removeOnCompletion: false) + self.statusNodeContainer.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.15, timingFunction: kCAMediaTimingFunctionEaseIn, removeOnCompletion: false) + } + + override func footerContent() -> Signal { + return .single(self.footerContentNode) + } + + @objc func statusPressed() { + if let (account, file) = self.accountAndFile, let status = self.status { + switch status { + case .Fetching: + account.postbox.mediaBox.cancelInteractiveResourceFetch(file.resource) + case .Remote: + self.fetchDisposable.set(account.postbox.mediaBox.fetchedResource(file.resource, tag: TelegramMediaResourceFetchTag(statsCategory: .generic)).start()) + default: + break + } + } + } + + @objc func actionButtonPressed() { + if let (account, _) = self.accountAndFile, let message = self.message, let status = self.status, case .Local = status { + let baseNavigationController = self.baseNavigationController() + (baseNavigationController?.topViewController as? ViewController)?.present(ShareController(account: account, subject: .messages([message]), saveToCameraRoll: false, showInChat: nil, externalShare: true, immediateExternalShare: true), in: .window(.root)) + } + } +} + diff --git a/TelegramUI/ChatFeedNavigationInputPanelNode.swift b/TelegramUI/ChatFeedNavigationInputPanelNode.swift index 4ab968c213..73e1f77433 100644 --- a/TelegramUI/ChatFeedNavigationInputPanelNode.swift +++ b/TelegramUI/ChatFeedNavigationInputPanelNode.swift @@ -51,7 +51,7 @@ final class ChatFeedNavigationInputPanelNode: ChatInputPanelNode { self.interfaceInteraction?.navigateFeed() } - override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { if self.presentationInterfaceState != interfaceState { self.presentationInterfaceState = interfaceState } @@ -65,7 +65,7 @@ final class ChatFeedNavigationInputPanelNode: ChatInputPanelNode { return 47.0 } - override func minimalHeight(interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func minimalHeight(interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { return 47.0 } } diff --git a/TelegramUI/ChatHistoryListNode.swift b/TelegramUI/ChatHistoryListNode.swift index aa72a48f81..59a8b29e74 100644 --- a/TelegramUI/ChatHistoryListNode.swift +++ b/TelegramUI/ChatHistoryListNode.swift @@ -490,7 +490,7 @@ public final class ChatHistoryListNode: ListView, ChatHistoryNode { if apply { switch chatLocation { case .peer: - let _ = applyMaxReadIndexInteractively(postbox: account.postbox, network: account.network, stateManager: account.stateManager, index: messageIndex).start() + let _ = applyMaxReadIndexInteractively(postbox: account.postbox, stateManager: account.stateManager, index: messageIndex).start() case let .group(groupId): let _ = account.postbox.modify({ modifier -> Void in modifier.applyGroupFeedInteractiveReadMaxIndex(groupId: groupId, index: messageIndex) @@ -974,7 +974,7 @@ public final class ChatHistoryListNode: ListView, ChatHistoryNode { } } else if self.interactiveReadActionDisposable == nil { if case let .peer(peerId) = self.chatLocation { - self.interactiveReadActionDisposable = installInteractiveReadMessagesAction(postbox: self.account.postbox, peerId: peerId) + self.interactiveReadActionDisposable = installInteractiveReadMessagesAction(postbox: self.account.postbox, stateManager: self.account.stateManager, peerId: peerId) } } } diff --git a/TelegramUI/ChatHistorySearchContainerNode.swift b/TelegramUI/ChatHistorySearchContainerNode.swift index b7b3b9bf5a..1372c3b3a9 100644 --- a/TelegramUI/ChatHistorySearchContainerNode.swift +++ b/TelegramUI/ChatHistorySearchContainerNode.swift @@ -108,8 +108,14 @@ final class ChatHistorySearchContainerNode: SearchDisplayControllerContentNode { private var currentEntries: [ChatHistorySearchEntry]? private let searchQuery = Promise() + private let searchQueryDisposable = MetaDisposable() private let searchDisposable = MetaDisposable() + private let _isSearching = ValuePromise(false, ignoreRepeated: true) + override var isSearching: Signal { + return self._isSearching.get() + } + private var presentationData: PresentationData private let themeAndStringsPromise: Promise<(PresentationTheme, PresentationStrings)> @@ -138,13 +144,17 @@ final class ChatHistorySearchContainerNode: SearchDisplayControllerContentNode { let themeAndStringsPromise = self.themeAndStringsPromise - let searchItems = searchQuery.get() - |> mapToSignal { query -> Signal<[ChatHistorySearchEntry]?, NoError> in + let previousEntriesValue = Atomic<[ChatHistorySearchEntry]?>(value: nil) + + self.searchQueryDisposable.set((searchQuery.get() + |> deliverOnMainQueue).start(next: { [weak self] query in + if let strongSelf = self { + let signal: Signal<[ChatHistorySearchEntry]?, NoError> if let query = query, !query.isEmpty { let foundRemoteMessages: Signal<[Message], NoError> = searchMessages(account: account, location: .peer(peerId: peerId, fromId: nil, tags: tagMask), query: query) |> delay(0.2, queue: Queue.concurrentDefaultQueue()) - return combineLatest(foundRemoteMessages, themeAndStringsPromise.get()) + signal = combineLatest(foundRemoteMessages, themeAndStringsPromise.get()) |> map { messages, themeAndStrings -> [ChatHistorySearchEntry]? in if messages.isEmpty { return nil @@ -153,25 +163,28 @@ final class ChatHistorySearchContainerNode: SearchDisplayControllerContentNode { return .message(message, themeAndStrings.0, themeAndStrings.1) } } - } - } else { - return .single(nil) - } - } - - let previousEntriesValue = Atomic<[ChatHistorySearchEntry]?>(value: nil) - - self.searchDisposable.set((searchItems - |> deliverOnMainQueue).start(next: { [weak self] entries in - if let strongSelf = self { - let previousEntries = previousEntriesValue.swap(entries) + } - let firstTime = previousEntries == nil - let transition = chatHistorySearchContainerPreparedTransition(from: previousEntries ?? [], to: entries ?? [], displayingResults: entries != nil, account: account, peerId: peerId, interaction: interfaceInteraction) - strongSelf.currentEntries = entries - strongSelf.enqueueTransition(transition, firstTime: firstTime) + strongSelf._isSearching.set(true) + } else { + signal = .single(nil) + strongSelf._isSearching.set(false) } - })) + + strongSelf.searchDisposable.set((signal + |> deliverOnMainQueue).start(next: { entries in + if let strongSelf = self { + let previousEntries = previousEntriesValue.swap(entries) + + let firstTime = previousEntries == nil + let transition = chatHistorySearchContainerPreparedTransition(from: previousEntries ?? [], to: entries ?? [], displayingResults: entries != nil, account: account, peerId: peerId, interaction: interfaceInteraction) + strongSelf.currentEntries = entries + strongSelf.enqueueTransition(transition, firstTime: firstTime) + strongSelf._isSearching.set(false) + } + })) + } + })) self.listNode.beganInteractiveDragging = { [weak self] in self?.dismissInput?() @@ -179,6 +192,7 @@ final class ChatHistorySearchContainerNode: SearchDisplayControllerContentNode { } deinit { + self.searchQueryDisposable.dispose() self.searchDisposable.dispose() } diff --git a/TelegramUI/ChatHoleGalleryItem.swift b/TelegramUI/ChatHoleGalleryItem.swift deleted file mode 100644 index 867fdf45e5..0000000000 --- a/TelegramUI/ChatHoleGalleryItem.swift +++ /dev/null @@ -1,21 +0,0 @@ -import Foundation -import Display -import AsyncDisplayKit - -final class ChatHoleGalleryItem: GalleryItem { - func node() -> GalleryItemNode { - return ChatHoleGalleryItemNode() - } - - func updateNode(node: GalleryItemNode) { - - } -} - -final class ChatHoleGalleryItemNode: GalleryItemNode { - override init() { - super.init() - - self.backgroundColor = UIColor.blue - } -} diff --git a/TelegramUI/ChatInputPanelNode.swift b/TelegramUI/ChatInputPanelNode.swift index ddf091229c..41c0f1cac1 100644 --- a/TelegramUI/ChatInputPanelNode.swift +++ b/TelegramUI/ChatInputPanelNode.swift @@ -8,11 +8,11 @@ class ChatInputPanelNode: ASDisplayNode { var account: Account? var interfaceInteraction: ChatPanelInterfaceInteraction? - func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState) -> CGFloat { + func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { return 0.0 } - func minimalHeight(interfaceState: ChatPresentationInterfaceState) -> CGFloat { + func minimalHeight(interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { return 0.0 } } diff --git a/TelegramUI/ChatInterfaceStateContextMenus.swift b/TelegramUI/ChatInterfaceStateContextMenus.swift index 1edf289a35..3b887023ce 100644 --- a/TelegramUI/ChatInterfaceStateContextMenus.swift +++ b/TelegramUI/ChatInterfaceStateContextMenus.swift @@ -274,19 +274,29 @@ func contextMenuForChatPresentationIntefaceState(chatPresentationInterfaceState: if messages.count == 1 { let message = messages[0] - for media in message.media { - if let file = media as? TelegramMediaFile { - if file.isVideo { - if file.isAnimated { - actions.append(.sheet(ChatMessageContextMenuSheetAction(color: .accent, title: chatPresentationInterfaceState.strings.Conversation_LinkDialogSave, action: { - let _ = addSavedGif(postbox: account.postbox, file: file).start() - }))) - } else if !GlobalExperimentalSettings.isAppStoreBuild { - actions.append(.sheet(ChatMessageContextMenuSheetAction(color: .accent, title: "Stream", action: { - debugStreamSingleVideo(message.id) - }))) + var hasAutoremove = false + for attribute in message.attributes { + if let _ = attribute as? AutoremoveTimeoutMessageAttribute { + hasAutoremove = true + break + } + } + + if !hasAutoremove { + for media in message.media { + if let file = media as? TelegramMediaFile { + if file.isVideo { + if file.isAnimated { + actions.append(.sheet(ChatMessageContextMenuSheetAction(color: .accent, title: chatPresentationInterfaceState.strings.Conversation_LinkDialogSave, action: { + let _ = addSavedGif(postbox: account.postbox, file: file).start() + }))) + } else if !GlobalExperimentalSettings.isAppStoreBuild { + actions.append(.sheet(ChatMessageContextMenuSheetAction(color: .accent, title: "Stream", action: { + debugStreamSingleVideo(message.id) + }))) + } + break } - break } } } diff --git a/TelegramUI/ChatItemGalleryFooterContentNode.swift b/TelegramUI/ChatItemGalleryFooterContentNode.swift index 1789195093..832f8ac527 100644 --- a/TelegramUI/ChatItemGalleryFooterContentNode.swift +++ b/TelegramUI/ChatItemGalleryFooterContentNode.swift @@ -214,8 +214,10 @@ final class ChatItemGalleryFooterContentNode: GalleryFooterContentNode { var messageText = "" var hasCaption = false for media in message.media { - if media is TelegramMediaImage || media is TelegramMediaFile { + if media is TelegramMediaImage { hasCaption = true + } else if let file = media as? TelegramMediaFile { + hasCaption = file.mimeType.hasPrefix("image/") } } if hasCaption { diff --git a/TelegramUI/ChatListController.swift b/TelegramUI/ChatListController.swift index 646278bb4e..1bbae8f53e 100644 --- a/TelegramUI/ChatListController.swift +++ b/TelegramUI/ChatListController.swift @@ -39,7 +39,7 @@ public class ChatListController: TelegramController, UIViewControllerPreviewingD self.titleView = NetworkStatusTitleView(theme: self.presentationData.theme) - super.init(account: account, navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme), enableMediaAccessoryPanel: true, locationBroadcastPanelSource: .summary) + super.init(account: account, navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData), enableMediaAccessoryPanel: true, locationBroadcastPanelSource: .summary) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style @@ -170,7 +170,7 @@ public class ChatListController: TelegramController, UIViewControllerPreviewingD self.titleView.theme = self.presentationData.theme self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style - self.navigationBar?.updateTheme(NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + self.navigationBar?.updatePresentationData(NavigationBarPresentationData(presentationData: self.presentationData)) if self.isNodeLoaded { self.chatListDisplayNode.updateThemeAndStrings(theme: self.presentationData.theme, strings: self.presentationData.strings, timeFormat: self.presentationData.timeFormat) @@ -246,15 +246,19 @@ public class ChatListController: TelegramController, UIViewControllerPreviewingD self.chatListDisplayNode.chatListNode.peerSelected = { [weak self] peerId in if let strongSelf = self { - (strongSelf.navigationController as? NavigationController)?.pushViewController(ChatController(account: strongSelf.account, chatLocation: .peer(peerId))) - strongSelf.chatListDisplayNode.chatListNode.clearHighlightAnimated(true) + if let navigationController = strongSelf.navigationController as? NavigationController { + navigateToChatController(navigationController: navigationController, account: strongSelf.account, chatLocation: .peer(peerId)) + strongSelf.chatListDisplayNode.chatListNode.clearHighlightAnimated(true) + } } } self.chatListDisplayNode.chatListNode.groupSelected = { [weak self] groupId in if let strongSelf = self { - (strongSelf.navigationController as? NavigationController)?.pushViewController(ChatController(account: strongSelf.account, chatLocation: .group(groupId))) - strongSelf.chatListDisplayNode.chatListNode.clearHighlightAnimated(true) + if let navigationController = strongSelf.navigationController as? NavigationController { + navigateToChatController(navigationController: navigationController, account: strongSelf.account, chatLocation: .group(groupId)) + strongSelf.chatListDisplayNode.chatListNode.clearHighlightAnimated(true) + } } } @@ -268,7 +272,10 @@ public class ChatListController: TelegramController, UIViewControllerPreviewingD if let strongSelf = self { strongSelf.openMessageFromSearchDisposable.set((storedMessageFromSearchPeer(account: strongSelf.account, peer: peer) |> deliverOnMainQueue).start(completed: { [weak strongSelf] in if let strongSelf = strongSelf { - (strongSelf.navigationController as? NavigationController)?.pushViewController(ChatController(account: strongSelf.account, chatLocation: .peer(messageId.peerId), messageId: messageId)) + if let navigationController = strongSelf.navigationController as? NavigationController { + navigateToChatController(navigationController: navigationController, account: strongSelf.account, chatLocation: .peer(messageId.peerId), messageId: messageId) + strongSelf.chatListDisplayNode.chatListNode.clearHighlightAnimated(true) + } } })) } @@ -286,7 +293,10 @@ public class ChatListController: TelegramController, UIViewControllerPreviewingD strongSelf.openMessageFromSearchDisposable.set((storedPeer |> deliverOnMainQueue).start(completed: { [weak strongSelf] in if let strongSelf = strongSelf { strongSelf.dismissSearchOnDisappear = true - (strongSelf.navigationController as? NavigationController)?.pushViewController(ChatController(account: strongSelf.account, chatLocation: .peer(peer.id))) + if let navigationController = strongSelf.navigationController as? NavigationController { + navigateToChatController(navigationController: navigationController, account: strongSelf.account, chatLocation: .peer(peer.id)) + strongSelf.chatListDisplayNode.chatListNode.clearHighlightAnimated(true) + } } })) } @@ -340,6 +350,8 @@ public class ChatListController: TelegramController, UIViewControllerPreviewingD self.dismissSearchOnDisappear = false self.deactivateSearch(animated: false) } + + self.chatListDisplayNode.chatListNode.clearHighlightAnimated(true) } override public func containerLayoutUpdated(_ layout: ContainerViewLayout, transition: ContainedViewLayoutTransition) { @@ -348,6 +360,14 @@ public class ChatListController: TelegramController, UIViewControllerPreviewingD self.chatListDisplayNode.containerLayoutUpdated(layout, navigationBarHeight: self.navigationHeight, transition: transition) } + override public func navigationStackConfigurationUpdated(next: [ViewController]) { + super.navigationStackConfigurationUpdated(next: next) + + let chatLocation = (next.first as? ChatController)?.chatLocation + + self.chatListDisplayNode.chatListNode.updateSelectedChatLocation(chatLocation, progress: 1.0, transition: .immediate) + } + @objc func editPressed() { let editItem = UIBarButtonItem(title: self.presentationData.strings.Common_Done, style: .done, target: self, action: #selector(self.donePressed)) if self.groupId == nil { @@ -390,7 +410,7 @@ public class ChatListController: TelegramController, UIViewControllerPreviewingD } @objc func composePressed() { - (self.navigationController as? NavigationController)?.pushViewController(ComposeController(account: self.account)) + (self.navigationController as? NavigationController)?.replaceAllButRootController(ComposeController(account: self.account), animated: true) } public func previewingContext(_ previewingContext: UIViewControllerPreviewing, viewControllerForLocation location: CGPoint) -> UIViewController? { @@ -487,8 +507,11 @@ public class ChatListController: TelegramController, UIViewControllerPreviewingD if let chatController = viewControllerToCommit as? ChatController { chatController.canReadHistory.set(true) chatController.updatePresentationMode(.standard(previewing: false)) + if let navigationController = self.navigationController as? NavigationController { + navigateToChatController(navigationController: navigationController, chatController: chatController, account: self.account, chatLocation: chatController.chatLocation) + self.chatListDisplayNode.chatListNode.clearHighlightAnimated(true) + } } - (self.navigationController as? NavigationController)?.pushViewController(viewControllerToCommit, animated: false) } } } diff --git a/TelegramUI/ChatListItem.swift b/TelegramUI/ChatListItem.swift index 1f4911da59..b955d7fe2d 100644 --- a/TelegramUI/ChatListItem.swift +++ b/TelegramUI/ChatListItem.swift @@ -9,6 +9,15 @@ import TelegramCore enum ChatListItemContent { case peer(message: Message?, peer: RenderedPeer, combinedReadState: CombinedPeerReadState?, notificationSettings: PeerNotificationSettings?, summaryInfo: ChatListMessageTagSummaryInfo, embeddedState: PeerChatListEmbeddedInterfaceState?, inputActivities: [(Peer, PeerInputActivity)]?) case groupReference(groupId: PeerGroupId, message: Message?, topPeers: [Peer], counters: GroupReferenceUnreadCounters) + + var chatLocation: ChatLocation { + switch self { + case let .peer(_, peer, _, _, _, _, _): + return .peer(peer.peerId) + case let .groupReference(groupId, _, _, _): + return .group(groupId) + } + } } class ChatListItem: ListViewItem { @@ -54,6 +63,7 @@ class ChatListItem: ListViewItem { completion(node, { return (nil, { apply(false) + node.updateIsHighlighted(transition: .immediate) }) }) } @@ -209,6 +219,8 @@ class ChatListItemNode: ItemListRevealOptionsItemNode { var layoutParams: (ChatListItem, first: Bool, last: Bool, firstWithHeader: Bool, nextIsPinned: Bool, ListViewItemLayoutParams)? + private var isHighlighted: Bool = false + override var canBeSelected: Bool { if self.editableControlNode != nil { return false @@ -331,31 +343,36 @@ class ChatListItemNode: ItemListRevealOptionsItemNode { override func setHighlighted(_ highlighted: Bool, at point: CGPoint, animated: Bool) { super.setHighlighted(highlighted, at: point, animated: animated) - if highlighted { - /*var nodes: [ASDisplayNode] = [self.titleNode, self.textNode, self.dateNode, self.statusNode] - for node in nodes { - node.backgroundColor = .clear - node.recursivelyEnsureDisplaySynchronously(true) - }*/ - - self.highlightedBackgroundNode.alpha = 1.0 + self.isHighlighted = highlighted + + self.updateIsHighlighted(transition: (animated && !highlighted) ? .animated(duration: 0.3, curve: .easeInOut) : .immediate) + } + + func updateIsHighlighted(transition: ContainedViewLayoutTransition) { + var reallyHighlighted = self.isHighlighted + if let item = self.item { + let itemChatLocation = item.content.chatLocation + if itemChatLocation == item.interaction.highlightedChatLocation?.location { + reallyHighlighted = true + } + } + + if reallyHighlighted { if self.highlightedBackgroundNode.supernode == nil { self.insertSubnode(self.highlightedBackgroundNode, aboveSubnode: self.separatorNode) + self.highlightedBackgroundNode.alpha = 0.0 } + self.highlightedBackgroundNode.layer.removeAllAnimations() + transition.updateAlpha(layer: self.highlightedBackgroundNode.layer, alpha: 1.0) } else { if self.highlightedBackgroundNode.supernode != nil { - if animated { - self.highlightedBackgroundNode.layer.animateAlpha(from: self.highlightedBackgroundNode.alpha, to: 0.0, duration: 0.4, completion: { [weak self] completed in - if let strongSelf = self { - if completed { - strongSelf.highlightedBackgroundNode.removeFromSupernode() - } + transition.updateAlpha(layer: self.highlightedBackgroundNode.layer, alpha: 0.0, completion: { [weak self] completed in + if let strongSelf = self { + if completed { + strongSelf.highlightedBackgroundNode.removeFromSupernode() } - }) - self.highlightedBackgroundNode.alpha = 0.0 - } else { - self.highlightedBackgroundNode.removeFromSupernode() - } + } + }) } } } @@ -488,11 +505,11 @@ class ChatListItemNode: ItemListRevealOptionsItemNode { if let author = message.author as? TelegramUser, let peer = peer, !(peer is TelegramUser) { if let peer = peer as? TelegramChannel, case .broadcast = peer.info { } else { - peerText = author.id == account.peerId ? item.presentationData.strings.DialogList_You : author.displayTitle + peerText = author.id == account.peerId ? item.presentationData.strings.DialogList_You : author.displayTitle(or: item.presentationData.strings.Peer_DeletedUser) } } else if case .groupReference = item.content { if let messagePeer = itemPeer.chatMainPeer { - peerText = messagePeer.displayTitle + peerText = messagePeer.displayTitle(or: item.presentationData.strings.Peer_DeletedUser) } } @@ -507,7 +524,7 @@ class ChatListItemNode: ItemListRevealOptionsItemNode { case .peer: if peer?.id == item.account.peerId { titleAttributedString = NSAttributedString(string: item.presentationData.strings.DialogList_SavedMessages, font: titleFont, textColor: theme.titleColor) - } else if let displayTitle = peer?.displayTitle { + } else if let displayTitle = peer?.displayTitle(or: item.presentationData.strings.Peer_DeletedUser) { titleAttributedString = NSAttributedString(string: displayTitle, font: titleFont, textColor: item.index.messageIndex.id.peerId.namespace == Namespaces.Peer.SecretChat ? theme.secretTitleColor : theme.titleColor) } case .groupReference: diff --git a/TelegramUI/ChatListNode.swift b/TelegramUI/ChatListNode.swift index 444f8e19d5..198d5b9874 100644 --- a/TelegramUI/ChatListNode.swift +++ b/TelegramUI/ChatListNode.swift @@ -20,6 +20,20 @@ struct ChatListNodeListViewTransition { let stationaryItemRange: (Int, Int)? } +final class ChatListHighlightedLocation { + let location: ChatLocation + let progress: CGFloat + + init(location: ChatLocation, progress: CGFloat) { + self.location = location + self.progress = progress + } + + func withUpdatedProgress(_ progress: CGFloat) -> ChatListHighlightedLocation { + return ChatListHighlightedLocation(location: location, progress: progress) + } +} + final class ChatListNodeInteraction { let activateSearch: () -> Void let peerSelected: (Peer) -> Void @@ -31,6 +45,8 @@ final class ChatListNodeInteraction { let deletePeer: (PeerId) -> Void let updatePeerGrouping: (PeerId, Bool) -> Void + var highlightedChatLocation: ChatListHighlightedLocation? + init(activateSearch: @escaping () -> Void, peerSelected: @escaping (Peer) -> Void, messageSelected: @escaping (Message) -> Void, groupSelected: @escaping (PeerGroupId) -> Void, setPeerIdWithRevealedOptions: @escaping (PeerId?, PeerId?) -> Void, setItemPinned: @escaping (PinnedItemId, Bool) -> Void, setPeerMuted: @escaping (PeerId, Bool) -> Void, deletePeer: @escaping (PeerId) -> Void, updatePeerGrouping: @escaping (PeerId, Bool) -> Void) { self.activateSearch = activateSearch self.peerSelected = peerSelected @@ -203,6 +219,7 @@ final class ChatListNode: ListView { private let viewProcessingQueue = Queue() private var chatListView: ChatListNodeView? + private var interaction: ChatListNodeInteraction? private var dequeuedInitialTransitionOnLayout = false private var enqueuedTransition: (ChatListNodeListViewTransition, () -> Void)? @@ -369,6 +386,8 @@ final class ChatListNode: ListView { } } + self.interaction = nodeInteraction + self.chatListDisposable.set(appliedTransition.start()) let initialLocation: ChatListNodeLocation = .initial(count: 50) @@ -667,4 +686,22 @@ final class ChatListNode: ListView { private func enqueueHistoryPreloadUpdate() { } + + func updateSelectedChatLocation(_ chatLocation: ChatLocation?, progress: CGFloat, transition: ContainedViewLayoutTransition) { + guard let interaction = self.interaction else { + return + } + + if let chatLocation = chatLocation { + interaction.highlightedChatLocation = ChatListHighlightedLocation(location: chatLocation, progress: 1.0) + } else { + interaction.highlightedChatLocation = nil + } + + self.forEachItemNode { itemNode in + if let itemNode = itemNode as? ChatListItemNode { + itemNode.updateIsHighlighted(transition: transition) + } + } + } } diff --git a/TelegramUI/ChatListSearchContainerNode.swift b/TelegramUI/ChatListSearchContainerNode.swift index 823a1f570b..222b791081 100644 --- a/TelegramUI/ChatListSearchContainerNode.swift +++ b/TelegramUI/ChatListSearchContainerNode.swift @@ -372,6 +372,11 @@ final class ChatListSearchContainerNode: SearchDisplayControllerContentNode { private var stateValue = ChatListSearchContainerNodeState() private let statePromise: ValuePromise + private let _isSearching = ValuePromise(false, ignoreRepeated: true) + override var isSearching: Signal { + return self._isSearching.get() + } + init(account: Account, onlyWriteable: Bool, groupId: PeerGroupId?, openPeer: @escaping (Peer) -> Void, openRecentPeerOptions: @escaping (Peer) -> Void, openMessage: @escaping (Peer, MessageId) -> Void) { self.account = account self.openMessage = openMessage @@ -395,16 +400,16 @@ final class ChatListSearchContainerNode: SearchDisplayControllerContentNode { let presentationDataPromise = self.presentationDataPromise let foundItems = searchQuery.get() - |> mapToSignal { query -> Signal<[ChatListSearchEntry]?, NoError> in + |> mapToSignal { query -> Signal<([ChatListSearchEntry], Bool)?, NoError> in if let query = query, !query.isEmpty { let accountPeer = account.postbox.loadedPeerWithId(account.peerId) |> take(1) let foundLocalPeers = account.postbox.searchPeers(query: query.lowercased(), groupId: groupId) - let foundRemotePeers: Signal<([FoundPeer], [FoundPeer]), NoError> + let foundRemotePeers: Signal<([FoundPeer], [FoundPeer], Bool), NoError> if groupId == nil { - foundRemotePeers = .single(([], [])) |> then(searchPeers(account: account, query: query) - |> delay(0.2, queue: Queue.concurrentDefaultQueue())) + foundRemotePeers = (.single(([], [], true)) |> then(searchPeers(account: account, query: query) |> map { ($0.0, $0.1, false) } + |> delay(0.2, queue: Queue.concurrentDefaultQueue()))) } else { - foundRemotePeers = .single(([], [])) + foundRemotePeers = .single(([], [], false)) } let location: SearchMessagesLocation if let groupId = groupId { @@ -412,12 +417,14 @@ final class ChatListSearchContainerNode: SearchDisplayControllerContentNode { } else { location = .general } - let foundRemoteMessages: Signal<[Message], NoError> = .single([]) |> then(searchMessages(account: account, location: location, query: query) + let foundRemoteMessages: Signal<([Message], Bool), NoError> = .single(([], true)) |> then(searchMessages(account: account, location: location, query: query) + |> map { ($0, false) } |> delay(0.2, queue: Queue.concurrentDefaultQueue())) return combineLatest(accountPeer, foundLocalPeers, foundRemotePeers, foundRemoteMessages, presentationDataPromise.get()) - |> map { accountPeer, foundLocalPeers, foundRemotePeers, foundRemoteMessages, presentationData -> [ChatListSearchEntry]? in + |> map { accountPeer, foundLocalPeers, foundRemotePeers, foundRemoteMessages, presentationData -> ([ChatListSearchEntry], Bool)? in var entries: [ChatListSearchEntry] = [] + let isSearching = foundRemotePeers.2 || foundRemoteMessages.1 var index = 0 var existingPeerIds = Set() @@ -462,12 +469,12 @@ final class ChatListSearchContainerNode: SearchDisplayControllerContentNode { } index = 0 - for message in foundRemoteMessages { + for message in foundRemoteMessages.0 { entries.append(.message(message, presentationData)) index += 1 } - return entries + return (entries, isSearching) } } else { return .single(nil) @@ -553,12 +560,14 @@ final class ChatListSearchContainerNode: SearchDisplayControllerContentNode { })) self.searchDisposable.set((foundItems - |> deliverOnMainQueue).start(next: { [weak self] entries in + |> deliverOnMainQueue).start(next: { [weak self] entriesAndFlags in if let strongSelf = self { - let previousEntries = previousSearchItems.swap(entries) + strongSelf._isSearching.set(entriesAndFlags?.1 ?? false) + + let previousEntries = previousSearchItems.swap(entriesAndFlags?.0) let firstTime = previousEntries == nil - let transition = chatListSearchContainerPreparedTransition(from: previousEntries ?? [], to: entries ?? [], displayingResults: entries != nil, account: account, enableHeaders: true, onlyWriteable: onlyWriteable, interaction: interaction) + let transition = chatListSearchContainerPreparedTransition(from: previousEntries ?? [], to: entriesAndFlags?.0 ?? [], displayingResults: entriesAndFlags?.0 != nil, account: account, enableHeaders: true, onlyWriteable: onlyWriteable, interaction: interaction) strongSelf.enqueueTransition(transition, firstTime: firstTime) } })) diff --git a/TelegramUI/ChatMediaInputNode.swift b/TelegramUI/ChatMediaInputNode.swift index 50fc73333a..e3cc2e400f 100644 --- a/TelegramUI/ChatMediaInputNode.swift +++ b/TelegramUI/ChatMediaInputNode.swift @@ -369,7 +369,7 @@ final class ChatMediaInputNode: ChatInputNode { } case let .scroll(aroundIndex): var firstTime = true - return account.postbox.itemCollectionsView(orderedItemListCollectionIds: [Namespaces.OrderedItemList.CloudSavedStickers, Namespaces.OrderedItemList.CloudRecentStickers], namespaces: [Namespaces.ItemCollection.CloudStickerPacks], aroundIndex: aroundIndex, count: 200) + return account.postbox.itemCollectionsView(orderedItemListCollectionIds: [Namespaces.OrderedItemList.CloudSavedStickers, Namespaces.OrderedItemList.CloudRecentStickers], namespaces: [Namespaces.ItemCollection.CloudStickerPacks], aroundIndex: aroundIndex, count: 300) |> map { view -> (ItemCollectionsView, StickerPacksCollectionUpdate) in let update: StickerPacksCollectionUpdate if firstTime { @@ -382,7 +382,7 @@ final class ChatMediaInputNode: ChatInputNode { } case let .navigate(index, collectionId): var firstTime = true - return account.postbox.itemCollectionsView(orderedItemListCollectionIds: [Namespaces.OrderedItemList.CloudSavedStickers, Namespaces.OrderedItemList.CloudRecentStickers], namespaces: [Namespaces.ItemCollection.CloudStickerPacks], aroundIndex: index, count: 200) + return account.postbox.itemCollectionsView(orderedItemListCollectionIds: [Namespaces.OrderedItemList.CloudSavedStickers, Namespaces.OrderedItemList.CloudRecentStickers], namespaces: [Namespaces.ItemCollection.CloudStickerPacks], aroundIndex: index, count: 300) |> map { view -> (ItemCollectionsView, StickerPacksCollectionUpdate) in let update: StickerPacksCollectionUpdate if firstTime { diff --git a/TelegramUI/ChatMessageActionSheetController.swift b/TelegramUI/ChatMessageActionSheetController.swift index 7ccccd6afb..83a44d0070 100644 --- a/TelegramUI/ChatMessageActionSheetController.swift +++ b/TelegramUI/ChatMessageActionSheetController.swift @@ -16,7 +16,7 @@ final class ChatMessageActionSheetController: ViewController { self.actions = actions self.dismissed = dismissed - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) } required init(coder aDecoder: NSCoder) { diff --git a/TelegramUI/ChatMessageActionSheetControllerNode.swift b/TelegramUI/ChatMessageActionSheetControllerNode.swift index 6eee3c37c4..85dd4ea36d 100644 --- a/TelegramUI/ChatMessageActionSheetControllerNode.swift +++ b/TelegramUI/ChatMessageActionSheetControllerNode.swift @@ -63,6 +63,8 @@ private final class MessageActionButtonNode: HighlightableButtonNode { final class ChatMessageActionSheetControllerNode: ViewControllerTracingNode { private let theme: PresentationTheme + private let sideDimNode: ASDisplayNode + private let sideInputDimNode: ASDisplayNode private let inputDimNode: ASDisplayNode private let itemsShadowNode: ASImageNode private let itemsContainerNode: ASDisplayNode @@ -79,6 +81,12 @@ final class ChatMessageActionSheetControllerNode: ViewControllerTracingNode { self.actions = actions self.dismissed = dismissed + self.sideDimNode = ASDisplayNode() + self.sideDimNode.backgroundColor = UIColor(white: 0.0, alpha: 0.5) + + self.sideInputDimNode = ASDisplayNode() + self.sideInputDimNode.backgroundColor = UIColor(white: 0.0, alpha: 0.5) + self.inputDimNode = ASDisplayNode() self.inputDimNode.backgroundColor = UIColor(white: 0.0, alpha: 0.5) @@ -101,6 +109,8 @@ final class ChatMessageActionSheetControllerNode: ViewControllerTracingNode { super.init() + self.addSubnode(self.sideDimNode) + self.addSubnode(self.sideInputDimNode) self.addSubnode(self.inputDimNode) self.addSubnode(self.itemsShadowNode) self.addSubnode(self.itemsContainerNode) @@ -116,19 +126,27 @@ final class ChatMessageActionSheetControllerNode: ViewControllerTracingNode { override func didLoad() { super.didLoad() + self.sideDimNode.view.addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(self.dimTap(_:)))) + self.sideInputDimNode.view.addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(self.dimTap(_:)))) self.inputDimNode.view.addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(self.dimTap(_:)))) } func animateIn(transition: ContainedViewLayoutTransition) { self.inputDimNode.alpha = 0.0 + self.sideInputDimNode.alpha = 0.0 + self.sideDimNode.alpha = 0.0 transition.updateAlpha(node: self.inputDimNode, alpha: 1.0) - transition.animatePositionAdditive(node: self.itemsContainerNode, offset: self.bounds.size.height) - transition.animatePositionAdditive(node: self.itemsShadowNode, offset: self.bounds.size.height) + transition.updateAlpha(node: self.sideInputDimNode, alpha: 1.0) + transition.updateAlpha(node: self.sideDimNode, alpha: 1.0) + transition.animatePositionAdditive(node: self.itemsContainerNode, offset: CGPoint(x: 0.0, y: self.bounds.size.height)) + transition.animatePositionAdditive(node: self.itemsShadowNode, offset: CGPoint(x: 0.0, y: self.bounds.size.height)) self.feedback.impact() } func animateOut(transition: ContainedViewLayoutTransition, completion: @escaping () -> Void) { + transition.updateAlpha(node: self.sideInputDimNode, alpha: 0.0) + transition.updateAlpha(node: self.sideDimNode, alpha: 0.0) transition.updateAlpha(node: self.inputDimNode, alpha: 0.0) let position = CGPoint(x: self.itemsContainerNode.position.x, y: self.bounds.size.height + self.itemsContainerNode.bounds.height) transition.updatePosition(node: self.itemsContainerNode, position: position, completion: { _ in @@ -137,24 +155,33 @@ final class ChatMessageActionSheetControllerNode: ViewControllerTracingNode { transition.updatePosition(node: self.itemsShadowNode, position: position) } - func updateLayout(layout: ContainerViewLayout, transition: ContainedViewLayoutTransition) -> CGFloat { + func updateLayout(layout: ContainerViewLayout, horizontalOrigin: CGFloat, transition: ContainedViewLayoutTransition) -> CGFloat { self.validLayout = layout var height: CGFloat = max(14.0, layout.intrinsicInsets.bottom) + var horizontalOffset: CGFloat = horizontalOrigin + if !horizontalOffset.isZero { + horizontalOffset += UIScreenPixel // temporary fix for master-detail separator dimming + } + let inputHeight = layout.inputHeight ?? 0.0 - transition.updateFrame(node: self.inputDimNode, frame: CGRect(origin: CGPoint(x: 0.0, y: layout.size.height - inputHeight), size: CGSize(width: layout.size.width, height: inputHeight))) + transition.updateFrame(node: self.sideDimNode, frame: CGRect(origin: CGPoint(x: 0.0, y: 0.0), size: CGSize(width: max(0.0, horizontalOffset), height: max(0.0, layout.size.height - inputHeight)))) + transition.updateFrame(node: self.sideInputDimNode, frame: CGRect(origin: CGPoint(x: 0.0, y: layout.size.height - inputHeight), size: CGSize(width: max(0.0, horizontalOrigin), height: max(0.0, inputHeight)))) + transition.updateFrame(node: self.inputDimNode, frame: CGRect(origin: CGPoint(x: horizontalOrigin, y: layout.size.height - inputHeight), size: CGSize(width: layout.size.width, height: inputHeight))) height += layout.safeInsets.bottom + let containerWidth = horizontalContainerFillingSizeForLayout(layout: layout, sideInset: 7.0 * 2.0) + var itemsHeight: CGFloat = 0.0 for actionNode in self.actionNodes { - actionNode.frame = CGRect(origin: CGPoint(x: 0.0, y: itemsHeight), size: CGSize(width: layout.size.width - 14.0 * 2.0, height: 57.0)) + actionNode.frame = CGRect(origin: CGPoint(x: 0.0, y: itemsHeight), size: CGSize(width: containerWidth, height: 57.0)) actionNode.layout() itemsHeight += actionNode.bounds.height } - let containerFrame = CGRect(origin: CGPoint(x: 14.0, y: layout.size.height - height - itemsHeight), size: CGSize(width: layout.size.width - 14.0 * 2.0, height: itemsHeight)) + let containerFrame = CGRect(origin: CGPoint(x: horizontalOrigin + floor((layout.size.width - containerWidth) / 2.0), y: layout.size.height - height - itemsHeight), size: CGSize(width: containerWidth, height: itemsHeight)) transition.updateFrame(node: self.itemsContainerNode, frame: containerFrame) transition.updateFrame(node: self.itemsShadowNode, frame: containerFrame.insetBy(dx: -shadowInset, dy: -shadowInset)) diff --git a/TelegramUI/ChatMessageBubbleContentNode.swift b/TelegramUI/ChatMessageBubbleContentNode.swift index 8b7268c18d..bb848fee47 100644 --- a/TelegramUI/ChatMessageBubbleContentNode.swift +++ b/TelegramUI/ChatMessageBubbleContentNode.swift @@ -5,7 +5,7 @@ import Postbox import TelegramCore enum ChatMessageBubbleContentBackgroundHiding { - case none + case never case emptyWallpaper case always } diff --git a/TelegramUI/ChatMessageBubbleItemNode.swift b/TelegramUI/ChatMessageBubbleItemNode.swift index c3068942c3..65675a37d3 100644 --- a/TelegramUI/ChatMessageBubbleItemNode.swift +++ b/TelegramUI/ChatMessageBubbleItemNode.swift @@ -9,7 +9,7 @@ private func contentNodeMessagesAndClassesForItem(_ item: ChatMessageItem) -> [( var skipText = false var addFinalText = false - for message in item.content { + outer: for message in item.content { inner: for media in message.media { if let _ = media as? TelegramMediaImage { result.append((message, ChatMessageMediaBubbleContentNode.self)) @@ -38,7 +38,9 @@ private func contentNodeMessagesAndClassesForItem(_ item: ChatMessageItem) -> [( } else if let _ = media as? TelegramMediaContact { result.append((message, ChatMessageContactBubbleContentNode.self)) } else if let _ = media as? TelegramMediaExpiredContent { + result.removeAll() result.append((message, ChatMessageActionBubbleContentNode.self)) + return result } } @@ -372,6 +374,13 @@ class ChatMessageBubbleItemNode: ChatMessageItemView { break loop } } + } else { + loop: for media in item.message.media { + if media is TelegramMediaAction { + needShareButton = false + break loop + } + } } } @@ -438,7 +447,7 @@ class ChatMessageBubbleItemNode: ChatMessageItemView { let topNodeMergeStatus: ChatMessageBubbleMergeStatus = mergedTop.merged ? (incoming ? .Left : .Right) : .None(incoming ? .Incoming : .Outgoing) let bottomNodeMergeStatus: ChatMessageBubbleMergeStatus = mergedBottom.merged ? (incoming ? .Left : .Right) : .None(incoming ? .Incoming : .Outgoing) - var backgroundHiding: ChatMessageBubbleContentBackgroundHiding = .none + var backgroundHiding: ChatMessageBubbleContentBackgroundHiding? var hasSolidWallpaper = false if case .color = item.presentationData.wallpaper { hasSolidWallpaper = true @@ -526,14 +535,11 @@ class ChatMessageBubbleItemNode: ChatMessageItemView { contentPropertiesAndLayouts.append((unboundSize, properties, prepareContentPosition, nodeLayout)) switch properties.hidesBackground { - case .none: - break + case .never: + backgroundHiding = .never case .emptyWallpaper: - switch backgroundHiding { - case .none: - backgroundHiding = properties.hidesBackground - default: - break + if backgroundHiding == nil { + backgroundHiding = properties.hidesBackground } case .always: backgroundHiding = .always @@ -550,7 +556,7 @@ class ChatMessageBubbleItemNode: ChatMessageItemView { } var initialDisplayHeader = true - if case .always = backgroundHiding { + if let backgroundHiding = backgroundHiding, case .always = backgroundHiding { initialDisplayHeader = false } else { if inlineBotNameString == nil && (ignoreForward || firstMessage.forwardInfo == nil) && replyMessage == nil { @@ -758,13 +764,17 @@ class ChatMessageBubbleItemNode: ChatMessageItemView { } let hideBackground: Bool - switch backgroundHiding { - case .none: - hideBackground = false - case .emptyWallpaper: - hideBackground = hasSolidWallpaper && !displayHeader - case .always: - hideBackground = true + if let backgroundHiding = backgroundHiding { + switch backgroundHiding { + case .never: + hideBackground = false + case .emptyWallpaper: + hideBackground = hasSolidWallpaper && !displayHeader + case .always: + hideBackground = true + } + } else { + hideBackground = false } var removedContentNodeIndices: [Int]? @@ -923,7 +933,7 @@ class ChatMessageBubbleItemNode: ChatMessageItemView { let (contentNodeWidth, contentNodeFinalize) = contentNodeLayout(CGSize(width: maximumNodeWidth, height: CGFloat.greatestFiniteMagnitude), contentPosition) #if DEBUG if contentNodeWidth > maximumNodeWidth { - print("\(contentNodeWidth) > \(maximumNodeWidth)") + print("contentNodeWidth \(contentNodeWidth) > \(maximumNodeWidth)") } #endif maxContentWidth = max(maxContentWidth, contentNodeWidth) diff --git a/TelegramUI/ChatMessageCallBubbleContentNode.swift b/TelegramUI/ChatMessageCallBubbleContentNode.swift index ed7ff494ea..40b54ad9ef 100644 --- a/TelegramUI/ChatMessageCallBubbleContentNode.swift +++ b/TelegramUI/ChatMessageCallBubbleContentNode.swift @@ -59,7 +59,7 @@ class ChatMessageCallBubbleContentNode: ChatMessageBubbleContentNode { let makeLabelLayout = TextNode.asyncLayout(self.labelNode) return { item, layoutConstants, _, _, _ in - let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 0.0, hidesBackground: .none, forceFullCorners: false, forceAlignment: .none) + let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 0.0, hidesBackground: .never, forceFullCorners: false, forceAlignment: .none) return (contentProperties, nil, CGFloat.greatestFiniteMagnitude, { constrainedSize, position in let message = item.message @@ -70,13 +70,7 @@ class ChatMessageCallBubbleContentNode: ChatMessageBubbleContentNode { let bubbleTheme = item.presentationData.theme.chat.bubble - var titleString: String - if message.flags.contains(.Incoming) { - titleString = item.presentationData.strings.Notification_CallIncoming - } else { - titleString = item.presentationData.strings.Notification_CallOutgoing - } - + var titleString: String? var callDuration: Int32? var callSuccessful = true for media in item.message.media { @@ -98,11 +92,22 @@ class ChatMessageCallBubbleContentNode: ChatMessageBubbleContentNode { } } + if titleString == nil { + let baseString: String + if message.flags.contains(.Incoming) { + baseString = item.presentationData.strings.Notification_CallIncoming + } else { + baseString = item.presentationData.strings.Notification_CallOutgoing + } + + titleString = baseString + } + var attributedTitle: NSAttributedString? if message.flags.contains(.Incoming) { - attributedTitle = NSAttributedString(string: titleString, font: titleFont, textColor: bubbleTheme.incomingPrimaryTextColor) + attributedTitle = NSAttributedString(string: titleString ?? "", font: titleFont, textColor: bubbleTheme.incomingPrimaryTextColor) } else { - attributedTitle = NSAttributedString(string: titleString, font: titleFont, textColor: bubbleTheme.outgoingPrimaryTextColor) + attributedTitle = NSAttributedString(string: titleString ?? "", font: titleFont, textColor: bubbleTheme.outgoingPrimaryTextColor) } var callIcon: UIImage? @@ -129,8 +134,15 @@ class ChatMessageCallBubbleContentNode: ChatMessageBubbleContentNode { let dateText = stringForMessageTimestampStatus(message: item.message, timeFormat: item.presentationData.timeFormat, strings: item.presentationData.strings) + let statusText: String + if let callDuration = callDuration, callDuration > 1 { + statusText = item.presentationData.strings.Notification_CallFormat(dateText, callDurationString(strings: item.presentationData.strings, value: callDuration)).0 + } else { + statusText = dateText + } + var attributedLabel: NSAttributedString? - attributedLabel = NSAttributedString(string: dateText, font: labelFont, textColor: message.effectivelyIncoming(item.account.peerId) ? bubbleTheme.incomingFileDurationColor : bubbleTheme.outgoingFileDurationColor) + attributedLabel = NSAttributedString(string: statusText, font: labelFont, textColor: message.effectivelyIncoming(item.account.peerId) ? bubbleTheme.incomingFileDurationColor : bubbleTheme.outgoingFileDurationColor) let (titleLayout, titleApply) = makeTitleLayout(TextNodeLayoutArguments(attributedString: attributedTitle, backgroundColor: nil, maximumNumberOfLines: 0, truncationType: .end, constrainedSize: textConstrainedSize, alignment: .natural, cutout: nil, insets: UIEdgeInsets())) let (labelLayout, labelApply) = makeLabelLayout(TextNodeLayoutArguments(attributedString: attributedLabel, backgroundColor: nil, maximumNumberOfLines: 0, truncationType: .end, constrainedSize: textConstrainedSize, alignment: .natural, cutout: nil, insets: UIEdgeInsets())) diff --git a/TelegramUI/ChatMessageContactBubbleContentNode.swift b/TelegramUI/ChatMessageContactBubbleContentNode.swift index d944d77b3b..41c9ea7cec 100644 --- a/TelegramUI/ChatMessageContactBubbleContentNode.swift +++ b/TelegramUI/ChatMessageContactBubbleContentNode.swift @@ -86,7 +86,7 @@ class ChatMessageContactBubbleContentNode: ChatMessageBubbleContentNode { updatedPhone = nil } - let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 0.0, hidesBackground: .none, forceFullCorners: false, forceAlignment: .none) + let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 0.0, hidesBackground: .never, forceFullCorners: false, forceAlignment: .none) return (contentProperties, nil, CGFloat.greatestFiniteMagnitude, { constrainedSize, position in let avatarSize = CGSize(width: 40.0, height: 40.0) diff --git a/TelegramUI/ChatMessageEventLogPreviousDescriptionContentNode.swift b/TelegramUI/ChatMessageEventLogPreviousDescriptionContentNode.swift index 7f15e074b2..e52a2c1afc 100644 --- a/TelegramUI/ChatMessageEventLogPreviousDescriptionContentNode.swift +++ b/TelegramUI/ChatMessageEventLogPreviousDescriptionContentNode.swift @@ -45,7 +45,7 @@ final class ChatMessageEventLogPreviousDescriptionContentNode: ChatMessageBubble let (initialWidth, continueLayout) = contentNodeLayout(item.presentationData, item.controllerInteraction.automaticMediaDownloadSettings, item.account, item.message, true, title, subtitle, text, messageEntities, mediaAndFlags, nil, nil, true, layoutConstants, constrainedSize) - let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 8.0, hidesBackground: .none, forceFullCorners: false, forceAlignment: .none) + let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 8.0, hidesBackground: .never, forceFullCorners: false, forceAlignment: .none) return (contentProperties, nil, initialWidth, { constrainedSize, position in let (refinedWidth, finalizeLayout) = continueLayout(constrainedSize, position) diff --git a/TelegramUI/ChatMessageEventLogPreviousLinkContentNode.swift b/TelegramUI/ChatMessageEventLogPreviousLinkContentNode.swift index 5ce3131dfd..50a654bd80 100644 --- a/TelegramUI/ChatMessageEventLogPreviousLinkContentNode.swift +++ b/TelegramUI/ChatMessageEventLogPreviousLinkContentNode.swift @@ -40,7 +40,7 @@ final class ChatMessageEventLogPreviousLinkContentNode: ChatMessageBubbleContent let (initialWidth, continueLayout) = contentNodeLayout(item.presentationData, item.controllerInteraction.automaticMediaDownloadSettings, item.account, item.message, true, title, subtitle, text, messageEntities, mediaAndFlags, nil, nil, true, layoutConstants, constrainedSize) - let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 8.0, hidesBackground: .none, forceFullCorners: false, forceAlignment: .none) + let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 8.0, hidesBackground: .never, forceFullCorners: false, forceAlignment: .none) return (contentProperties, nil, initialWidth, { constrainedSize, position in let (refinedWidth, finalizeLayout) = continueLayout(constrainedSize, position) diff --git a/TelegramUI/ChatMessageEventLogPreviousMessageContentNode.swift b/TelegramUI/ChatMessageEventLogPreviousMessageContentNode.swift index 62db4ffe20..982bf63a84 100644 --- a/TelegramUI/ChatMessageEventLogPreviousMessageContentNode.swift +++ b/TelegramUI/ChatMessageEventLogPreviousMessageContentNode.swift @@ -45,7 +45,7 @@ final class ChatMessageEventLogPreviousMessageContentNode: ChatMessageBubbleCont let (initialWidth, continueLayout) = contentNodeLayout(item.presentationData, item.controllerInteraction.automaticMediaDownloadSettings, item.account, item.message, true, title, subtitle, text, messageEntities, mediaAndFlags, nil, nil, true, layoutConstants, constrainedSize) - let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 8.0, hidesBackground: .none, forceFullCorners: false, forceAlignment: .none) + let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 8.0, hidesBackground: .never, forceFullCorners: false, forceAlignment: .none) return (contentProperties, nil, initialWidth, { constrainedSize, position in let (refinedWidth, finalizeLayout) = continueLayout(constrainedSize, position) diff --git a/TelegramUI/ChatMessageFileBubbleContentNode.swift b/TelegramUI/ChatMessageFileBubbleContentNode.swift index a4aa49f5c4..e92b18c337 100644 --- a/TelegramUI/ChatMessageFileBubbleContentNode.swift +++ b/TelegramUI/ChatMessageFileBubbleContentNode.swift @@ -65,7 +65,7 @@ class ChatMessageFileBubbleContentNode: ChatMessageBubbleContentNode { let (initialWidth, refineLayout) = interactiveFileLayout(item.account, item.presentationData, item.message, selectedFile!, automaticDownload, item.message.effectivelyIncoming(item.account.peerId), statusType, CGSize(width: constrainedSize.width - layoutConstants.file.bubbleInsets.left - layoutConstants.file.bubbleInsets.right, height: constrainedSize.height)) - let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 0.0, hidesBackground: .none, forceFullCorners: false, forceAlignment: .none) + let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 0.0, hidesBackground: .never, forceFullCorners: false, forceAlignment: .none) return (contentProperties, nil, initialWidth + layoutConstants.file.bubbleInsets.left + layoutConstants.file.bubbleInsets.right, { constrainedSize, position in let (refinedWidth, finishLayout) = refineLayout(CGSize(width: constrainedSize.width - layoutConstants.file.bubbleInsets.left - layoutConstants.file.bubbleInsets.right, height: constrainedSize.height)) diff --git a/TelegramUI/ChatMessageForwardInfoNode.swift b/TelegramUI/ChatMessageForwardInfoNode.swift index 10e806489a..4172a7f8f7 100644 --- a/TelegramUI/ChatMessageForwardInfoNode.swift +++ b/TelegramUI/ChatMessageForwardInfoNode.swift @@ -24,9 +24,9 @@ class ChatMessageForwardInfoNode: ASDisplayNode { return { theme, strings, type, peer, authorName, constrainedSize in let peerString: String if let authorName = authorName { - peerString = "\(peer.displayTitle) (\(authorName))" + peerString = "\(peer.displayTitle(or: strings.Peer_DeletedUser)) (\(authorName))" } else { - peerString = peer.displayTitle + peerString = peer.displayTitle(or: strings.Peer_DeletedUser) } let titleColor: UIColor diff --git a/TelegramUI/ChatMessageGameBubbleContentNode.swift b/TelegramUI/ChatMessageGameBubbleContentNode.swift index 124a23f145..f191fc20ca 100644 --- a/TelegramUI/ChatMessageGameBubbleContentNode.swift +++ b/TelegramUI/ChatMessageGameBubbleContentNode.swift @@ -67,7 +67,7 @@ final class ChatMessageGameBubbleContentNode: ChatMessageBubbleContentNode { let (initialWidth, continueLayout) = contentNodeLayout(item.presentationData, item.controllerInteraction.automaticMediaDownloadSettings, item.account, item.message, item.read, title, subtitle, item.message.text.isEmpty ? text : item.message.text, item.message.text.isEmpty ? nil : messageEntities, mediaAndFlags, nil, nil, true, layoutConstants, constrainedSize) - let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 8.0, hidesBackground: .none, forceFullCorners: false, forceAlignment: .none) + let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 8.0, hidesBackground: .never, forceFullCorners: false, forceAlignment: .none) return (contentProperties, nil, initialWidth, { constrainedSize, position in let (refinedWidth, finalizeLayout) = continueLayout(constrainedSize, position) diff --git a/TelegramUI/ChatMessageInstantVideoItemNode.swift b/TelegramUI/ChatMessageInstantVideoItemNode.swift index 32059ead07..3fd97e1f7b 100644 --- a/TelegramUI/ChatMessageInstantVideoItemNode.swift +++ b/TelegramUI/ChatMessageInstantVideoItemNode.swift @@ -466,7 +466,8 @@ class ChatMessageInstantVideoItemNode: ChatMessageItemView { durationNode.defaultDuration = telegramFile.duration.flatMap(Double.init) if let videoNode = strongSelf.videoNode { - videoNode.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.2, removeOnCompletion: false, completion: { [weak videoNode] _ in + videoNode.layer.allowsGroupOpacity = true + videoNode.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.5, delay: 0.2, removeOnCompletion: false, completion: { [weak videoNode] _ in videoNode?.removeFromSupernode() }) } @@ -480,9 +481,10 @@ class ChatMessageInstantVideoItemNode: ChatMessageItemView { } } } - }), content: NativeVideoContent(id: .message(item.message.id, telegramFile.fileId), file: telegramFile, streamVideo: false, enableSound: false), priority: .embedded, autoplay: true) + }), content: NativeVideoContent(id: .message(item.message.id, item.message.stableId, telegramFile.fileId), file: telegramFile, streamVideo: false, enableSound: false), priority: .embedded, autoplay: true) + let previousVideoNode = strongSelf.videoNode strongSelf.videoNode = videoNode - strongSelf.insertSubnode(videoNode, belowSubnode: strongSelf.dateAndStatusNode) + strongSelf.insertSubnode(videoNode, belowSubnode: previousVideoNode ?? strongSelf.dateAndStatusNode) videoNode.canAttachContent = strongSelf.shouldAcquireVideoContext } diff --git a/TelegramUI/ChatMessageInteractiveMediaNode.swift b/TelegramUI/ChatMessageInteractiveMediaNode.swift index c916453781..1839e8a0b2 100644 --- a/TelegramUI/ChatMessageInteractiveMediaNode.swift +++ b/TelegramUI/ChatMessageInteractiveMediaNode.swift @@ -372,7 +372,7 @@ final class ChatMessageInteractiveMediaNode: ASTransformNode { } if replaceVideoNode, let updatedVideoFile = updateVideoFile { - let videoNode = UniversalVideoNode(postbox: account.postbox, audioSession: account.telegramApplicationContext.mediaManager.audioSession, manager: account.telegramApplicationContext.mediaManager.universalVideoManager, decoration: ChatBubbleVideoDecoration(cornerRadius: 17.0, nativeSize: nativeSize), content: NativeVideoContent(id: .message(message.id, updatedVideoFile.fileId), file: updatedVideoFile, enableSound: false), priority: .embedded) + let videoNode = UniversalVideoNode(postbox: account.postbox, audioSession: account.telegramApplicationContext.mediaManager.audioSession, manager: account.telegramApplicationContext.mediaManager.universalVideoManager, decoration: ChatBubbleVideoDecoration(cornerRadius: 17.0, nativeSize: nativeSize), content: NativeVideoContent(id: .message(message.id, message.stableId, updatedVideoFile.fileId), file: updatedVideoFile, enableSound: false, fetchAutomatically: false), priority: .embedded) videoNode.isUserInteractionEnabled = false strongSelf.videoNode = videoNode @@ -534,6 +534,8 @@ final class ChatMessageInteractiveMediaNode: ASTransformNode { strongSelf.fetchDisposable.set(chatMessagePhotoInteractiveFetched(account: account, photo: image).start()) } else if let image = media as? TelegramMediaWebFile { strongSelf.fetchDisposable.set(chatMessageWebFileInteractiveFetched(account: account, image: image).start()) + } else if let file = media as? TelegramMediaFile { + strongSelf.fetchDisposable.set(messageMediaFileInteractiveFetched(account: account, messageId: message.id, file: file).start()) } } } diff --git a/TelegramUI/ChatMessageInvoiceBubbleContentNode.swift b/TelegramUI/ChatMessageInvoiceBubbleContentNode.swift index 8e3e935b47..cc1cfdaeba 100644 --- a/TelegramUI/ChatMessageInvoiceBubbleContentNode.swift +++ b/TelegramUI/ChatMessageInvoiceBubbleContentNode.swift @@ -56,7 +56,7 @@ final class ChatMessageInvoiceBubbleContentNode: ChatMessageBubbleContentNode { let (initialWidth, continueLayout) = contentNodeLayout(item.presentationData, item.controllerInteraction.automaticMediaDownloadSettings, item.account, item.message, item.read, title, subtitle, text, nil, mediaAndFlags, nil, nil, false, layoutConstants, constrainedSize) - let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 8.0, hidesBackground: .none, forceFullCorners: false, forceAlignment: .none) + let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 8.0, hidesBackground: .never, forceFullCorners: false, forceAlignment: .none) return (contentProperties, nil, initialWidth, { constrainedSize, position in let (refinedWidth, finalizeLayout) = continueLayout(constrainedSize, position) diff --git a/TelegramUI/ChatMessageItemView.swift b/TelegramUI/ChatMessageItemView.swift index 5a423b5667..1e1b03300b 100644 --- a/TelegramUI/ChatMessageItemView.swift +++ b/TelegramUI/ChatMessageItemView.swift @@ -161,13 +161,23 @@ public class ChatMessageItemView: ListViewItemNode { } func updateHighlightedState(animated: Bool) { - if let item = self.item { - if item.content.firstMessage.stableId == item.controllerInteraction.contextHighlightedState?.messageStableId { - self.isHighligtedInOverlay = true - } else { - self.isHighligtedInOverlay = false + var isHighlightedInOverlay = false + if let item = self.item, let contextHighlightedState = item.controllerInteraction.contextHighlightedState { + switch item.content { + case let .message(message, _, _): + if contextHighlightedState.messageStableId == message.stableId { + isHighlightedInOverlay = true + } + case let .group(messages): + for (message, _, _) in messages { + if contextHighlightedState.messageStableId == message.stableId { + isHighlightedInOverlay = true + break + } + } } } + self.isHighlightedInOverlay = isHighlightedInOverlay } func updateAutomaticMediaDownloadSettings() { diff --git a/TelegramUI/ChatMessageMapBubbleContentNode.swift b/TelegramUI/ChatMessageMapBubbleContentNode.swift index e5095e0606..1f7d4969a4 100644 --- a/TelegramUI/ChatMessageMapBubbleContentNode.swift +++ b/TelegramUI/ChatMessageMapBubbleContentNode.swift @@ -134,7 +134,7 @@ class ChatMessageMapBubbleContentNode: ChatMessageBubbleContentNode { maximumWidth = imageSize.width + bubbleInsets.left + bubbleInsets.right } - let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: true, headerSpacing: 5.0, hidesBackground: (activeLiveBroadcastingTimeout == nil && selectedMedia?.venue == nil) ? .emptyWallpaper : .none, forceFullCorners: false, forceAlignment: .none) + let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: true, headerSpacing: 5.0, hidesBackground: (activeLiveBroadcastingTimeout == nil && selectedMedia?.venue == nil) ? .emptyWallpaper : .never, forceFullCorners: false, forceAlignment: .none) var pinPeer: Peer? var pinLiveLocationActive: Bool? diff --git a/TelegramUI/ChatMessageNotificationItem.swift b/TelegramUI/ChatMessageNotificationItem.swift index 5df9d26ba5..659829f463 100644 --- a/TelegramUI/ChatMessageNotificationItem.swift +++ b/TelegramUI/ChatMessageNotificationItem.swift @@ -111,6 +111,7 @@ final class ChatMessageNotificationItemNode: NotificationItemNode { var updatedMedia: Media? var imageDimensions: CGSize? + var isRound = false if item.message.id.peerId.namespace != Namespaces.Peer.SecretChat { for media in item.message.media { if let image = media as? TelegramMediaImage { @@ -124,6 +125,7 @@ final class ChatMessageNotificationItemNode: NotificationItemNode { if let representation = largestImageRepresentation(file.previewRepresentations) { imageDimensions = representation.dimensions } + isRound = file.isInstantVideo break } } @@ -133,7 +135,11 @@ final class ChatMessageNotificationItemNode: NotificationItemNode { var applyImage: (() -> Void)? if let imageDimensions = imageDimensions { let boundingSize = CGSize(width: 55.0, height: 55.0) - applyImage = imageNodeLayout(TransformImageArguments(corners: ImageCorners(radius: 6.0), imageSize: imageDimensions.aspectFilled(boundingSize), boundingSize: boundingSize, intrinsicInsets: UIEdgeInsets())) + var radius: CGFloat = 6.0 + if isRound { + radius = floor(boundingSize.width / 2.0) + } + applyImage = imageNodeLayout(TransformImageArguments(corners: ImageCorners(radius: radius), imageSize: imageDimensions.aspectFilled(boundingSize), boundingSize: boundingSize, intrinsicInsets: UIEdgeInsets())) } var updateImageSignal: Signal<(TransformImageArguments) -> DrawingContext?, NoError>? @@ -153,7 +159,7 @@ final class ChatMessageNotificationItemNode: NotificationItemNode { if item.message.id.peerId.namespace == Namespaces.Peer.SecretChat { messageText = item.strings.ENCRYPTED_MESSAGE("").0 } else { - messageText = descriptionStringForMessage(item.message, strings: item.strings, accountPeerId: item.account.peerId) + messageText = descriptionStringForMessage(item.message, strings: item.strings, accountPeerId: item.account.peerId).0 } if let applyImage = applyImage { diff --git a/TelegramUI/ChatMessageReplyInfoNode.swift b/TelegramUI/ChatMessageReplyInfoNode.swift index a98c9002c9..2eb03d8a12 100644 --- a/TelegramUI/ChatMessageReplyInfoNode.swift +++ b/TelegramUI/ChatMessageReplyInfoNode.swift @@ -55,7 +55,7 @@ class ChatMessageReplyInfoNode: ASDisplayNode { return { theme, strings, account, type, message, constrainedSize in let titleString = message.author?.displayTitle ?? "" - let textString = descriptionStringForMessage(message, strings: strings, accountPeerId: account.peerId) + let (textString, isMedia) = descriptionStringForMessage(message, strings: strings, accountPeerId: account.peerId) let titleColor: UIColor let lineImage: UIImage? @@ -65,7 +65,11 @@ class ChatMessageReplyInfoNode: ASDisplayNode { case let .bubble(incoming): titleColor = incoming ? theme.chat.bubble.incomingAccentTextColor : theme.chat.bubble.outgoingAccentTextColor lineImage = incoming ? PresentationResourcesChat.chatBubbleVerticalLineIncomingImage(theme) : PresentationResourcesChat.chatBubbleVerticalLineOutgoingImage(theme) - textColor = incoming ? theme.chat.bubble.incomingPrimaryTextColor : theme.chat.bubble.outgoingPrimaryTextColor + if isMedia { + textColor = incoming ? theme.chat.bubble.incomingSecondaryTextColor : theme.chat.bubble.outgoingSecondaryTextColor + } else { + textColor = incoming ? theme.chat.bubble.incomingPrimaryTextColor : theme.chat.bubble.outgoingPrimaryTextColor + } case .standalone: titleColor = theme.chat.serviceMessage.serviceMessagePrimaryTextColor lineImage = PresentationResourcesChat.chatServiceVerticalLineImage(theme) diff --git a/TelegramUI/ChatMessageSelectionInputPanelNode.swift b/TelegramUI/ChatMessageSelectionInputPanelNode.swift index 808d49d018..01fef3ac50 100644 --- a/TelegramUI/ChatMessageSelectionInputPanelNode.swift +++ b/TelegramUI/ChatMessageSelectionInputPanelNode.swift @@ -95,7 +95,7 @@ final class ChatMessageSelectionInputPanelNode: ChatInputPanelNode { self.interfaceInteraction?.shareSelectedMessages() } - override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { if self.presentationInterfaceState != interfaceState { self.presentationInterfaceState = interfaceState } @@ -107,7 +107,7 @@ final class ChatMessageSelectionInputPanelNode: ChatInputPanelNode { return 47.0 } - override func minimalHeight(interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func minimalHeight(interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { return 47.0 } } diff --git a/TelegramUI/ChatMessageTextBubbleContentNode.swift b/TelegramUI/ChatMessageTextBubbleContentNode.swift index dbd355d1f2..d7820eec03 100644 --- a/TelegramUI/ChatMessageTextBubbleContentNode.swift +++ b/TelegramUI/ChatMessageTextBubbleContentNode.swift @@ -61,7 +61,7 @@ class ChatMessageTextBubbleContentNode: ChatMessageBubbleContentNode { let currentCachedChatMessageText = self.cachedChatMessageText return { item, layoutConstants, _, _, _ in - let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 0.0, hidesBackground: .none, forceFullCorners: false, forceAlignment: .none) + let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 0.0, hidesBackground: .never, forceFullCorners: false, forceAlignment: .none) return (contentProperties, nil, CGFloat.greatestFiniteMagnitude, { constrainedSize, position in let message = item.message diff --git a/TelegramUI/ChatMessageWebpageBubbleContentNode.swift b/TelegramUI/ChatMessageWebpageBubbleContentNode.swift index 15e624a1d2..eae9cbf29a 100644 --- a/TelegramUI/ChatMessageWebpageBubbleContentNode.swift +++ b/TelegramUI/ChatMessageWebpageBubbleContentNode.swift @@ -229,7 +229,7 @@ final class ChatMessageWebpageBubbleContentNode: ChatMessageBubbleContentNode { let (initialWidth, continueLayout) = contentNodeLayout(item.presentationData, item.controllerInteraction.automaticMediaDownloadSettings, item.account, item.message, item.read, title, subtitle, text, entities, mediaAndFlags, actionIcon, actionTitle, true, layoutConstants, constrainedSize) - let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 8.0, hidesBackground: .none, forceFullCorners: false, forceAlignment: .none) + let contentProperties = ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 8.0, hidesBackground: .never, forceFullCorners: false, forceAlignment: .none) return (contentProperties, nil, initialWidth, { constrainedSize, position in let (refinedWidth, finalizeLayout) = continueLayout(constrainedSize, position) diff --git a/TelegramUI/ChatPinnedMessageTitlePanelNode.swift b/TelegramUI/ChatPinnedMessageTitlePanelNode.swift index 9450008878..08d3bf819a 100644 --- a/TelegramUI/ChatPinnedMessageTitlePanelNode.swift +++ b/TelegramUI/ChatPinnedMessageTitlePanelNode.swift @@ -213,7 +213,7 @@ final class ChatPinnedMessageTitlePanelNode: ChatTitleAccessoryPanelNode { let (titleLayout, titleApply) = makeTitleLayout(TextNodeLayoutArguments(attributedString: NSAttributedString(string: strings.Conversation_PinnedMessage, font: Font.medium(15.0), textColor: theme.chat.inputPanel.panelControlAccentColor), backgroundColor: nil, maximumNumberOfLines: 1, truncationType: .end, constrainedSize: CGSize(width: width - textLineInset - leftInset - rightInset - textRightInset, height: CGFloat.greatestFiniteMagnitude), alignment: .natural, cutout: nil, insets: UIEdgeInsets(top: 2.0, left: 0.0, bottom: 2.0, right: 0.0))) - let (textLayout, textApply) = makeTextLayout(TextNodeLayoutArguments(attributedString: NSAttributedString(string: descriptionStringForMessage(message, strings: strings, accountPeerId: accountPeerId), font: Font.regular(15.0), textColor: theme.chat.inputPanel.primaryTextColor), backgroundColor: nil, maximumNumberOfLines: 1, truncationType: .end, constrainedSize: CGSize(width: width - textLineInset - leftInset - rightInset - textRightInset, height: CGFloat.greatestFiniteMagnitude), alignment: .natural, cutout: nil, insets: UIEdgeInsets(top: 2.0, left: 0.0, bottom: 2.0, right: 0.0))) + let (textLayout, textApply) = makeTextLayout(TextNodeLayoutArguments(attributedString: NSAttributedString(string: descriptionStringForMessage(message, strings: strings, accountPeerId: accountPeerId).0, font: Font.regular(15.0), textColor: theme.chat.inputPanel.primaryTextColor), backgroundColor: nil, maximumNumberOfLines: 1, truncationType: .end, constrainedSize: CGSize(width: width - textLineInset - leftInset - rightInset - textRightInset, height: CGFloat.greatestFiniteMagnitude), alignment: .natural, cutout: nil, insets: UIEdgeInsets(top: 2.0, left: 0.0, bottom: 2.0, right: 0.0))) Queue.mainQueue().async { if let strongSelf = self { diff --git a/TelegramUI/ChatRecentActionsController.swift b/TelegramUI/ChatRecentActionsController.swift index 56ee6db293..6d8a29a70b 100644 --- a/TelegramUI/ChatRecentActionsController.swift +++ b/TelegramUI/ChatRecentActionsController.swift @@ -26,7 +26,7 @@ final class ChatRecentActionsController: ViewController { self.titleView = ChatRecentActionsTitleView(color: self.presentationData.theme.rootController.navigationBar.primaryTextColor) - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style diff --git a/TelegramUI/ChatRecordingPreviewInputPanelNode.swift b/TelegramUI/ChatRecordingPreviewInputPanelNode.swift index 2b8773741f..42bda39ca1 100644 --- a/TelegramUI/ChatRecordingPreviewInputPanelNode.swift +++ b/TelegramUI/ChatRecordingPreviewInputPanelNode.swift @@ -94,7 +94,7 @@ final class ChatRecordingPreviewInputPanelNode: ChatInputPanelNode { self.interfaceInteraction?.deleteChat() } - override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { if self.presentationInterfaceState != interfaceState { var updateWaveform = false if self.presentationInterfaceState?.recordedMediaPreview != interfaceState.recordedMediaPreview { @@ -110,7 +110,7 @@ final class ChatRecordingPreviewInputPanelNode: ChatInputPanelNode { self.mediaPlayer?.pause() } if let account = self.account { - let mediaPlayer = MediaPlayer(audioSessionManager: account.telegramApplicationContext.mediaManager.audioSession, postbox: account.postbox, resource: recordedMediaPreview.resource, streamable: false, video: false, preferSoftwareDecoding: false, enableSound: true) + let mediaPlayer = MediaPlayer(audioSessionManager: account.telegramApplicationContext.mediaManager.audioSession, postbox: account.postbox, resource: recordedMediaPreview.resource, streamable: false, video: false, preferSoftwareDecoding: false, enableSound: true, fetchAutomatically: true) self.mediaPlayer = mediaPlayer self.durationLabel.defaultDuration = Double(recordedMediaPreview.duration) self.durationLabel.status = mediaPlayer.status @@ -157,7 +157,7 @@ final class ChatRecordingPreviewInputPanelNode: ChatInputPanelNode { self.mediaPlayer?.togglePlayPause() } - override func minimalHeight(interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func minimalHeight(interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { return 47.0 } } diff --git a/TelegramUI/ChatSearchInputPanelNode.swift b/TelegramUI/ChatSearchInputPanelNode.swift index ca12126a45..d5b07efded 100644 --- a/TelegramUI/ChatSearchInputPanelNode.swift +++ b/TelegramUI/ChatSearchInputPanelNode.swift @@ -87,7 +87,7 @@ final class ChatSearchInputPanelNode: ChatInputPanelNode { self.interfaceInteraction?.toggleMembersSearch(true) } - override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { if self.presentationInterfaceState != interfaceState { let themeUpdated = self.presentationInterfaceState?.theme !== interfaceState.theme @@ -153,7 +153,7 @@ final class ChatSearchInputPanelNode: ChatInputPanelNode { return panelHeight } - override func minimalHeight(interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func minimalHeight(interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { return 47.0 } } diff --git a/TelegramUI/ChatTextInputMediaRecordingButton.swift b/TelegramUI/ChatTextInputMediaRecordingButton.swift index 329a4c9b23..7066f1b331 100644 --- a/TelegramUI/ChatTextInputMediaRecordingButton.swift +++ b/TelegramUI/ChatTextInputMediaRecordingButton.swift @@ -128,7 +128,7 @@ private final class ChatTextInputMediaRecordingButtonPresenter : NSObject, TGMod } else { var presentNow = false if self.presentationController == nil { - let presentationController = ChatTextInputMediaRecordingButtonPresenterController(navigationBarTheme: nil) + let presentationController = ChatTextInputMediaRecordingButtonPresenterController(navigationBarPresentationData: nil) presentationController.statusBar.statusBarStyle = .Ignore self.presentationController = presentationController presentNow = true diff --git a/TelegramUI/ChatTextInputPanelNode.swift b/TelegramUI/ChatTextInputPanelNode.swift index 27bae6a9ea..b0a5aed7bf 100644 --- a/TelegramUI/ChatTextInputPanelNode.swift +++ b/TelegramUI/ChatTextInputPanelNode.swift @@ -97,17 +97,19 @@ private final class AccessoryItemIconButton: HighlightableButton { } } -private func cauclulateTextFieldMinHeight(_ presentationInterfaceState: ChatPresentationInterfaceState) -> CGFloat { +private func calclulateTextFieldMinHeight(_ presentationInterfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { let baseFontSize = max(17.0, presentationInterfaceState.fontSize.baseDisplaySize) + let result: CGFloat if baseFontSize.isEqual(to: 17.0) { - return 33.0 + result = 33.0 } else if baseFontSize.isEqual(to: 19.0) { - return 35.0 + result = 35.0 } else if baseFontSize.isEqual(to: 21.0) { - return 38.0 + result = 38.0 } else { - return 33.0 + result = 33.0 } + return result } private var currentTextInputBackgroundImage: (UIColor, UIColor, CGFloat, UIImage)? @@ -158,7 +160,7 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { private var accessoryItemButtons: [(ChatTextInputAccessoryItem, AccessoryItemIconButton)] = [] - private var validLayout: (CGFloat, CGFloat, CGFloat, CGFloat)? + private var validLayout: (CGFloat, CGFloat, CGFloat, CGFloat, LayoutMetrics)? var displayAttachmentMenu: () -> Void = { } var sendMessage: () -> Void = { } @@ -245,7 +247,6 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { } } - private let textFieldInsets = UIEdgeInsets(top: 6.0, left: 42.0, bottom: 6.0, right: 42.0) private let textInputViewInternalInsets = UIEdgeInsets(top: 1.0, left: 13.0, bottom: 1.0, right: 13.0) private let textInputViewRealInsets = UIEdgeInsets(top: 5.5, left: 0.0, bottom: 6.5, right: 0.0) private let accessoryButtonSpacing: CGFloat = 0.0 @@ -296,8 +297,8 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { } self.actionButtons.micButton.offsetRecordingControls = { [weak self] in if let strongSelf = self, let presentationInterfaceState = strongSelf.presentationInterfaceState { - if let (width, leftInset, rightInset, maxHeight) = strongSelf.validLayout { - let _ = strongSelf.updateLayout(width: width, leftInset: leftInset, rightInset: rightInset, maxHeight: maxHeight, transition: .immediate, interfaceState: presentationInterfaceState) + if let (width, leftInset, rightInset, maxHeight, metrics) = strongSelf.validLayout { + let _ = strongSelf.updateLayout(width: width, leftInset: leftInset, rightInset: rightInset, maxHeight: maxHeight, transition: .immediate, interfaceState: presentationInterfaceState, metrics: metrics) } } } @@ -421,15 +422,18 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { textInputNode.view.addGestureRecognizer(recognizer) } - private func textFieldMaxHeight(_ maxHeight: CGFloat) -> CGFloat { - return max(33.0, maxHeight - (self.textFieldInsets.top + self.textFieldInsets.bottom + self.textInputViewInternalInsets.top + self.textInputViewInternalInsets.bottom)) + private func textFieldMaxHeight(_ maxHeight: CGFloat, metrics: LayoutMetrics) -> CGFloat { + let textFieldInsets = self.textFieldInsets(metrics: metrics) + return max(33.0, maxHeight - (textFieldInsets.top + textFieldInsets.bottom + self.textInputViewInternalInsets.top + self.textInputViewInternalInsets.bottom)) } - private func calculateTextFieldMetrics(width: CGFloat, maxHeight: CGFloat) -> (accessoryButtonsWidth: CGFloat, textFieldHeight: CGFloat) { + private func calculateTextFieldMetrics(width: CGFloat, maxHeight: CGFloat, metrics: LayoutMetrics) -> (accessoryButtonsWidth: CGFloat, textFieldHeight: CGFloat) { let accessoryButtonInset = self.accessoryButtonInset let accessoryButtonSpacing = self.accessoryButtonSpacing - let fieldMaxHeight = textFieldMaxHeight(maxHeight) + let textFieldInsets = self.textFieldInsets(metrics: metrics) + + let fieldMaxHeight = textFieldMaxHeight(maxHeight, metrics: metrics) var accessoryButtonsWidth: CGFloat = 0.0 var firstButton = true @@ -445,12 +449,12 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { var textFieldMinHeight: CGFloat = 35.0 if let presentationInterfaceState = self.presentationInterfaceState { - textFieldMinHeight = cauclulateTextFieldMinHeight(presentationInterfaceState) + textFieldMinHeight = calclulateTextFieldMinHeight(presentationInterfaceState, metrics: metrics) } let textFieldHeight: CGFloat if let textInputNode = self.textInputNode { - let unboundTextFieldHeight = max(textFieldMinHeight, ceil(textInputNode.measure(CGSize(width: width - self.textFieldInsets.left - self.textFieldInsets.right - self.textInputViewInternalInsets.left - self.textInputViewInternalInsets.right - accessoryButtonsWidth, height: CGFloat.greatestFiniteMagnitude)).height)) + let unboundTextFieldHeight = max(textFieldMinHeight, ceil(textInputNode.measure(CGSize(width: width - textFieldInsets.left - textFieldInsets.right - self.textInputViewInternalInsets.left - self.textInputViewInternalInsets.right - accessoryButtonsWidth, height: CGFloat.greatestFiniteMagnitude)).height)) let maxNumberOfLines = min(12, (Int(fieldMaxHeight - 11.0) - 33) / 22) @@ -464,18 +468,29 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { return (accessoryButtonsWidth, textFieldHeight) } - private func panelHeight(textFieldHeight: CGFloat) -> CGFloat { - return textFieldHeight + self.textFieldInsets.top + self.textFieldInsets.bottom + self.textInputViewInternalInsets.top + self.textInputViewInternalInsets.bottom + private func textFieldInsets(metrics: LayoutMetrics) -> UIEdgeInsets { + var insets = UIEdgeInsets(top: 6.0, left: 42.0, bottom: 6.0, right: 42.0) + if case .regular = metrics.widthClass, case .regular = metrics.heightClass { + insets.top += 1.0 + insets.bottom += 1.0 + } + return insets } - override func minimalHeight(interfaceState: ChatPresentationInterfaceState) -> CGFloat { - let textFieldMinHeight = cauclulateTextFieldMinHeight(interfaceState) + private func panelHeight(textFieldHeight: CGFloat, metrics: LayoutMetrics) -> CGFloat { + let textFieldInsets = self.textFieldInsets(metrics: metrics) + let result = textFieldHeight + textFieldInsets.top + textFieldInsets.bottom + self.textInputViewInternalInsets.top + self.textInputViewInternalInsets.bottom + return result + } + + override func minimalHeight(interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { + let textFieldMinHeight = calclulateTextFieldMinHeight(interfaceState, metrics: metrics) let minimalHeight: CGFloat = 14.0 + textFieldMinHeight return minimalHeight } - override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState) -> CGFloat { - self.validLayout = (width, leftInset, rightInset, maxHeight) + override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { + self.validLayout = (width, leftInset, rightInset, maxHeight, metrics) let baseWidth = width - leftInset - rightInset if self.presentationInterfaceState != interfaceState { let previousState = self.presentationInterfaceState @@ -520,7 +535,7 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { self.actionButtons.updateTheme(theme: interfaceState.theme) - let textFieldMinHeight = cauclulateTextFieldMinHeight(interfaceState) + let textFieldMinHeight = calclulateTextFieldMinHeight(interfaceState, metrics: metrics) let minimalInputHeight: CGFloat = 2.0 + textFieldMinHeight self.textInputBackgroundView.image = textInputBackgroundImage(backgroundColor: interfaceState.theme.chat.inputPanel.panelBackgroundColor, strokeColor: interfaceState.theme.chat.inputPanel.inputStrokeColor, diameter: minimalInputHeight) @@ -589,7 +604,7 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { var textFieldMinHeight: CGFloat = 33.0 if let presentationInterfaceState = self.presentationInterfaceState { - textFieldMinHeight = cauclulateTextFieldMinHeight(presentationInterfaceState) + textFieldMinHeight = calclulateTextFieldMinHeight(presentationInterfaceState, metrics: metrics) } let minimalHeight: CGFloat = 14.0 + textFieldMinHeight let minimalInputHeight: CGFloat = 2.0 + textFieldMinHeight @@ -643,8 +658,8 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { self.accessoryItemButtons = updatedButtons } - let (accessoryButtonsWidth, textFieldHeight) = self.calculateTextFieldMetrics(width: baseWidth, maxHeight: maxHeight) - let panelHeight = self.panelHeight(textFieldHeight: textFieldHeight) + let (accessoryButtonsWidth, textFieldHeight) = self.calculateTextFieldMetrics(width: baseWidth, maxHeight: maxHeight, metrics: metrics) + let panelHeight = self.panelHeight(textFieldHeight: textFieldHeight, metrics: metrics) self.actionButtons.micButton.updateMode(mode: interfaceState.interfaceState.mediaRecordingMode, animated: transition.isAnimated) @@ -819,12 +834,13 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { } let searchLayoutClearButtonSize = CGSize(width: 44.0, height: minimalHeight) - transition.updateFrame(layer: self.searchLayoutClearButton.layer, frame: CGRect(origin: CGPoint(x: width - rightInset - self.textFieldInsets.left - self.textFieldInsets.right + textInputBackgroundWidthOffset + 3.0, y: panelHeight - minimalHeight), size: searchLayoutClearButtonSize)) + let textFieldInsets = self.textFieldInsets(metrics: metrics) + transition.updateFrame(layer: self.searchLayoutClearButton.layer, frame: CGRect(origin: CGPoint(x: width - rightInset - textFieldInsets.left - textFieldInsets.right + textInputBackgroundWidthOffset + 3.0, y: panelHeight - minimalHeight), size: searchLayoutClearButtonSize)) let searchProgressSize = self.searchLayoutProgressView.bounds.size transition.updateFrame(layer: self.searchLayoutProgressView.layer, frame: CGRect(origin: CGPoint(x: floor((searchLayoutClearButtonSize.width - searchProgressSize.width) / 2.0), y: floor((searchLayoutClearButtonSize.height - searchProgressSize.height) / 2.0)), size: searchProgressSize)) - let textInputFrame = CGRect(x: leftInset + self.textFieldInsets.left, y: self.textFieldInsets.top + audioRecordingItemsVerticalOffset, width: baseWidth - self.textFieldInsets.left - self.textFieldInsets.right + textInputBackgroundWidthOffset, height: panelHeight - self.textFieldInsets.top - self.textFieldInsets.bottom) + let textInputFrame = CGRect(x: leftInset + textFieldInsets.left, y: textFieldInsets.top + audioRecordingItemsVerticalOffset, width: baseWidth - textFieldInsets.left - textFieldInsets.right + textInputBackgroundWidthOffset, height: panelHeight - textFieldInsets.top - textFieldInsets.bottom) transition.updateFrame(node: self.textInputContainer, frame: textInputFrame) if let textInputNode = self.textInputNode { @@ -838,7 +854,7 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { if let contextPlaceholder = interfaceState.inputTextPanelState.contextPlaceholder { let placeholderLayout = TextNode.asyncLayout(self.contextPlaceholderNode) - let (placeholderSize, placeholderApply) = placeholderLayout(TextNodeLayoutArguments(attributedString: contextPlaceholder, backgroundColor: nil, maximumNumberOfLines: 1, truncationType: .end, constrainedSize: CGSize(width: width - leftInset - rightInset - self.textFieldInsets.left - self.textFieldInsets.right - self.textInputViewInternalInsets.left - self.textInputViewInternalInsets.right - accessoryButtonsWidth, height: CGFloat.greatestFiniteMagnitude), alignment: .natural, cutout: nil, insets: UIEdgeInsets())) + let (placeholderSize, placeholderApply) = placeholderLayout(TextNodeLayoutArguments(attributedString: contextPlaceholder, backgroundColor: nil, maximumNumberOfLines: 1, truncationType: .end, constrainedSize: CGSize(width: width - leftInset - rightInset - textFieldInsets.left - textFieldInsets.right - self.textInputViewInternalInsets.left - self.textInputViewInternalInsets.right - accessoryButtonsWidth, height: CGFloat.greatestFiniteMagnitude), alignment: .natural, cutout: nil, insets: UIEdgeInsets())) let contextPlaceholderNode = placeholderApply() if let currentContextPlaceholderNode = self.contextPlaceholderNode, currentContextPlaceholderNode !== contextPlaceholderNode { self.contextPlaceholderNode = nil @@ -854,7 +870,7 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { let _ = placeholderApply() - contextPlaceholderNode.frame = CGRect(origin: CGPoint(x: leftInset + self.textFieldInsets.left + self.textInputViewInternalInsets.left, y: self.textFieldInsets.top + self.textInputViewInternalInsets.top + self.textInputViewRealInsets.top + audioRecordingItemsVerticalOffset + UIScreenPixel), size: placeholderSize.size) + contextPlaceholderNode.frame = CGRect(origin: CGPoint(x: leftInset + textFieldInsets.left + self.textInputViewInternalInsets.left, y: textFieldInsets.top + self.textInputViewInternalInsets.top + self.textInputViewRealInsets.top + audioRecordingItemsVerticalOffset + UIScreenPixel), size: placeholderSize.size) self.textPlaceholderNode.isHidden = true } else if let contextPlaceholderNode = self.contextPlaceholderNode { @@ -869,11 +885,11 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { self.textPlaceholderNode.isHidden = hasText } - transition.updateFrame(node: self.textPlaceholderNode, frame: CGRect(origin: CGPoint(x: leftInset + self.textFieldInsets.left + self.textInputViewInternalInsets.left, y: self.textFieldInsets.top + self.textInputViewInternalInsets.top + self.textInputViewRealInsets.top + audioRecordingItemsVerticalOffset + UIScreenPixel), size: self.textPlaceholderNode.frame.size)) + transition.updateFrame(node: self.textPlaceholderNode, frame: CGRect(origin: CGPoint(x: leftInset + textFieldInsets.left + self.textInputViewInternalInsets.left, y: textFieldInsets.top + self.textInputViewInternalInsets.top + self.textInputViewRealInsets.top + audioRecordingItemsVerticalOffset + UIScreenPixel), size: self.textPlaceholderNode.frame.size)) - transition.updateFrame(layer: self.textInputBackgroundView.layer, frame: CGRect(x: leftInset + self.textFieldInsets.left, y: self.textFieldInsets.top + audioRecordingItemsVerticalOffset, width: baseWidth - self.textFieldInsets.left - self.textFieldInsets.right + textInputBackgroundWidthOffset, height: panelHeight - self.textFieldInsets.top - self.textFieldInsets.bottom)) + transition.updateFrame(layer: self.textInputBackgroundView.layer, frame: CGRect(x: leftInset + textFieldInsets.left, y: textFieldInsets.top + audioRecordingItemsVerticalOffset, width: baseWidth - textFieldInsets.left - textFieldInsets.right + textInputBackgroundWidthOffset, height: panelHeight - textFieldInsets.top - textFieldInsets.bottom)) - var nextButtonTopRight = CGPoint(x: width - rightInset - self.textFieldInsets.right - accessoryButtonInset, y: panelHeight - self.textFieldInsets.bottom - minimalInputHeight + audioRecordingItemsVerticalOffset) + var nextButtonTopRight = CGPoint(x: width - rightInset - textFieldInsets.right - accessoryButtonInset, y: panelHeight - textFieldInsets.bottom - minimalInputHeight + audioRecordingItemsVerticalOffset) for (_, button) in self.accessoryItemButtons.reversed() { let buttonSize = CGSize(width: button.buttonWidth, height: minimalInputHeight) let buttonFrame = CGRect(origin: CGPoint(x: nextButtonTopRight.x - buttonSize.width, y: nextButtonTopRight.y + floor((minimalInputHeight - buttonSize.height) / 2.0)), size: buttonSize) @@ -894,7 +910,7 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { if let removeAccessoryButtons = removeAccessoryButtons { for button in removeAccessoryButtons { - let buttonFrame = CGRect(origin: CGPoint(x: button.frame.origin.x, y: panelHeight - self.textFieldInsets.bottom - minimalInputHeight), size: button.frame.size) + let buttonFrame = CGRect(origin: CGPoint(x: button.frame.origin.x, y: panelHeight - textFieldInsets.bottom - minimalInputHeight), size: button.frame.size) transition.updateFrame(layer: button.layer, frame: buttonFrame) button.layer.animateScale(from: 1.0, to: 0.2, duration: 0.25, removeOnCompletion: false) button.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.25, removeOnCompletion: false, completion: { [weak button] _ in @@ -1070,9 +1086,9 @@ class ChatTextInputPanelNode: ChatInputPanelNode, ASEditableTextNodeDelegate { } private func updateTextHeight() { - if let (width, leftInset, rightInset, maxHeight) = self.validLayout { - let (_, textFieldHeight) = self.calculateTextFieldMetrics(width: width - leftInset - rightInset, maxHeight: maxHeight) - let panelHeight = self.panelHeight(textFieldHeight: textFieldHeight) + if let (width, leftInset, rightInset, maxHeight, metrics) = self.validLayout { + let (_, textFieldHeight) = self.calculateTextFieldMetrics(width: width - leftInset - rightInset, maxHeight: maxHeight, metrics: metrics) + let panelHeight = self.panelHeight(textFieldHeight: textFieldHeight, metrics: metrics) if !self.bounds.size.height.isEqual(to: panelHeight) { self.updateHeight() } diff --git a/TelegramUI/ChatTitleView.swift b/TelegramUI/ChatTitleView.swift index 1fb3e795a6..e0b73b6076 100644 --- a/TelegramUI/ChatTitleView.swift +++ b/TelegramUI/ChatTitleView.swift @@ -82,6 +82,8 @@ final class ChatTitleView: UIView, NavigationBarTitleView { private var typingIndicator: TGModernConversationTitleActivityIndicator? private let button: HighlightTrackingButtonNode + private var validLayout: (CGSize, CGRect)? + private var titleLeftIcon: ChatTitleIcon = .none private var titleRightIcon: ChatTitleIcon = .none @@ -227,7 +229,7 @@ final class ChatTitleView: UIView, NavigationBarTitleView { if peerView.peerId == self.account.peerId { string = NSAttributedString(string: self.strings.Conversation_SavedMessages, font: Font.medium(17.0), textColor: self.theme.rootController.navigationBar.primaryTextColor) } else { - string = NSAttributedString(string: peer.displayTitle, font: Font.medium(17.0), textColor: self.theme.rootController.navigationBar.primaryTextColor) + string = NSAttributedString(string: peer.displayTitle(or: self.strings.Peer_DeletedUser), font: Font.medium(17.0), textColor: self.theme.rootController.navigationBar.primaryTextColor) } } if peerView.peerId.namespace == Namespaces.Peer.SecretChat { @@ -304,7 +306,7 @@ final class ChatTitleView: UIView, NavigationBarTitleView { self.presenceManager?.reset(presence: presence) } else { - let string = NSAttributedString(string: strings.LastSeen_ALongTimeAgo, font: Font.regular(13.0), textColor: self.theme.rootController.navigationBar.secondaryTextColor) + let string = NSAttributedString(string: "", font: Font.regular(13.0), textColor: self.theme.rootController.navigationBar.secondaryTextColor) if self.infoNode.attributedText == nil || !self.infoNode.attributedText!.isEqual(to: string) { self.infoNode.attributedText = string shouldUpdateLayout = true @@ -456,11 +458,18 @@ final class ChatTitleView: UIView, NavigationBarTitleView { override func layoutSubviews() { super.layoutSubviews() - let size = self.bounds.size + if let (size, clearBounds) = self.validLayout { + self.updateLayout(size: size, clearBounds: clearBounds, transition: .immediate) + } + } + + func updateLayout(size: CGSize, clearBounds: CGRect, transition: ContainedViewLayoutTransition) { + self.validLayout = (size, clearBounds) + let transition: ContainedViewLayoutTransition = .immediate - self.button.frame = CGRect(origin: CGPoint(), size: size) - self.contentContainer.frame = CGRect(origin: CGPoint(), size: size) + self.button.frame = clearBounds + self.contentContainer.frame = clearBounds//CGRect(origin: CGPoint(x: 0.0, y: 0.0), size: size) var leftIconWidth: CGFloat = 0.0 var rightIconWidth: CGFloat = 0.0 @@ -484,24 +493,34 @@ final class ChatTitleView: UIView, NavigationBarTitleView { } if size.height > 40.0 { - let titleSize = self.titleNode.measure(CGSize(width: size.width - leftIconWidth - rightIconWidth, height: size.height)) - let infoSize = self.infoNode.measure(size) - let typingSize = self.typingNode.measure(size) + let titleSize = self.titleNode.measure(CGSize(width: clearBounds.width - leftIconWidth - rightIconWidth, height: size.height)) + let infoSize = self.infoNode.measure(clearBounds.size) + let typingSize = self.typingNode.measure(clearBounds.size) let titleInfoSpacing: CGFloat = 0.0 - let titleFrame: CGRect + var titleFrame: CGRect if infoSize.width.isZero && typingSize.width.isZero { - titleFrame = CGRect(origin: CGPoint(x: floor((size.width - titleSize.width) / 2.0), y: floor((size.height - titleSize.height) / 2.0)), size: titleSize) + titleFrame = CGRect(origin: CGPoint(x: floor((clearBounds.width - titleSize.width) / 2.0), y: floor((size.height - titleSize.height) / 2.0)), size: titleSize) + if titleFrame.size.width < size.width { + titleFrame.origin.x = -clearBounds.minX + floor((size.width - titleFrame.width) / 2.0) + } self.titleNode.frame = titleFrame } else { let combinedHeight = titleSize.height + infoSize.height + titleInfoSpacing - titleFrame = CGRect(origin: CGPoint(x: floor((size.width - titleSize.width) / 2.0), y: floor((size.height - combinedHeight) / 2.0)), size: titleSize) + titleFrame = CGRect(origin: CGPoint(x: floor((clearBounds.width - titleSize.width) / 2.0), y: floor((size.height - combinedHeight) / 2.0)), size: titleSize) + if titleFrame.size.width < size.width { + titleFrame.origin.x = -clearBounds.minX + floor((size.width - titleFrame.width) / 2.0) + } self.titleNode.frame = titleFrame - self.infoNode.frame = CGRect(origin: CGPoint(x: floor((size.width - infoSize.width) / 2.0), y: floor((size.height - combinedHeight) / 2.0) + titleSize.height + titleInfoSpacing), size: infoSize) - self.typingNode.frame = CGRect(origin: CGPoint(x: floor((size.width - typingSize.width + 14.0) / 2.0), y: floor((size.height - combinedHeight) / 2.0) + titleSize.height + titleInfoSpacing), size: typingSize) + var infoFrame = CGRect(origin: CGPoint(x: floor((clearBounds.width - infoSize.width) / 2.0), y: floor((size.height - combinedHeight) / 2.0) + titleSize.height + titleInfoSpacing), size: infoSize) + if infoFrame.size.width < size.width { + infoFrame.origin.x = -clearBounds.minX + floor((size.width - infoFrame.width) / 2.0) + } + self.infoNode.frame = infoFrame + self.typingNode.frame = CGRect(origin: CGPoint(x: floor((clearBounds.width - typingSize.width + 14.0) / 2.0), y: floor((size.height - combinedHeight) / 2.0) + titleSize.height + titleInfoSpacing), size: typingSize) if let typingIndicator = self.typingIndicator { typingIndicator.frame = CGRect(x: self.typingNode.frame.origin.x - 24.0, y: self.typingNode.frame.origin.y, width: 24.0, height: 16.0) @@ -515,17 +534,17 @@ final class ChatTitleView: UIView, NavigationBarTitleView { self.titleRightIconNode.frame = CGRect(origin: CGPoint(x: titleFrame.maxX + 3.0, y: titleFrame.minY + 7.0), size: image.size) } } else { - let titleSize = self.titleNode.measure(CGSize(width: floor(size.width / 2.0 - leftIconWidth - rightIconWidth), height: size.height)) - let infoSize = self.infoNode.measure(CGSize(width: floor(size.width / 2.0), height: size.height)) - let typingSize = self.typingNode.measure(CGSize(width: floor(size.width / 2.0), height: size.height)) + let titleSize = self.titleNode.measure(CGSize(width: floor(clearBounds.width / 2.0 - leftIconWidth - rightIconWidth), height: size.height)) + let infoSize = self.infoNode.measure(CGSize(width: floor(clearBounds.width / 2.0), height: size.height)) + let typingSize = self.typingNode.measure(CGSize(width: floor(clearBounds.width / 2.0), height: size.height)) let titleInfoSpacing: CGFloat = 8.0 let combinedWidth = titleSize.width + leftIconWidth + rightIconWidth + infoSize.width + titleInfoSpacing - let titleFrame = CGRect(origin: CGPoint(x: leftIconWidth + floor((size.width - combinedWidth) / 2.0), y: floor((size.height - titleSize.height) / 2.0)), size: titleSize) + let titleFrame = CGRect(origin: CGPoint(x: leftIconWidth + floor((clearBounds.width - combinedWidth) / 2.0), y: floor((size.height - titleSize.height) / 2.0)), size: titleSize) self.titleNode.frame = titleFrame - self.infoNode.frame = CGRect(origin: CGPoint(x: floor((size.width - combinedWidth) / 2.0 + titleSize.width + leftIconWidth + rightIconWidth + titleInfoSpacing), y: floor((size.height - infoSize.height) / 2.0)), size: infoSize) - self.typingNode.frame = CGRect(origin: CGPoint(x: floor((size.width - combinedWidth) / 2.0 + titleSize.width + leftIconWidth + rightIconWidth + titleInfoSpacing), y: floor((size.height - typingSize.height) / 2.0)), size: typingSize) + self.infoNode.frame = CGRect(origin: CGPoint(x: floor((clearBounds.width - combinedWidth) / 2.0 + titleSize.width + leftIconWidth + rightIconWidth + titleInfoSpacing), y: floor((size.height - infoSize.height) / 2.0)), size: infoSize) + self.typingNode.frame = CGRect(origin: CGPoint(x: floor((clearBounds.width - combinedWidth) / 2.0 + titleSize.width + leftIconWidth + rightIconWidth + titleInfoSpacing), y: floor((size.height - typingSize.height) / 2.0)), size: typingSize) if let image = self.titleLeftIconNode.image { self.titleLeftIconNode.frame = CGRect(origin: CGPoint(x: titleFrame.minX, y: titleFrame.minY + 4.0), size: image.size) diff --git a/TelegramUI/ChatUnblockInputPanelNode.swift b/TelegramUI/ChatUnblockInputPanelNode.swift index d97216742c..8ebfbbdbbe 100644 --- a/TelegramUI/ChatUnblockInputPanelNode.swift +++ b/TelegramUI/ChatUnblockInputPanelNode.swift @@ -80,7 +80,7 @@ final class ChatUnblockInputPanelNode: ChatInputPanelNode { self.interfaceInteraction?.unblockPeer() } - override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { if self.presentationInterfaceState != interfaceState { self.presentationInterfaceState = interfaceState } @@ -97,7 +97,7 @@ final class ChatUnblockInputPanelNode: ChatInputPanelNode { return 47.0 } - override func minimalHeight(interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func minimalHeight(interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { return 47.0 } } diff --git a/TelegramUI/ComponentsThemes.swift b/TelegramUI/ComponentsThemes.swift index 54a25c2fbc..c3a4afbe1e 100644 --- a/TelegramUI/ComponentsThemes.swift +++ b/TelegramUI/ComponentsThemes.swift @@ -15,6 +15,18 @@ public extension NavigationBarTheme { } } +public extension NavigationBarStrings { + public convenience init(presentationStrings: PresentationStrings) { + self.init(back: presentationStrings.Common_Back, close: presentationStrings.Common_Close) + } +} + +public extension NavigationBarPresentationData { + public convenience init(presentationData: PresentationData) { + self.init(theme: NavigationBarTheme(rootControllerTheme: presentationData.theme), strings: NavigationBarStrings(presentationStrings: presentationData.strings)) + } +} + extension ActionSheetControllerTheme { convenience init(presentationTheme: PresentationTheme) { let actionSheet = presentationTheme.actionSheet @@ -45,3 +57,9 @@ extension PeekControllerTheme { self.init(isDark: actionSheet.backgroundType == .dark, menuBackgroundColor: actionSheet.opaqueItemBackgroundColor, menuItemHighligtedColor: actionSheet.opaqueItemHighlightedBackgroundColor, menuItemSeparatorColor: actionSheet.opaqueItemSeparatorColor, accentColor: actionSheet.controlAccentColor, destructiveColor: actionSheet.destructiveActionTextColor) } } + +public extension NavigationControllerTheme { + convenience init(presentationTheme: PresentationTheme) { + self.init(navigationBar: NavigationBarTheme(rootControllerTheme: presentationTheme), emptyAreaColor: presentationTheme.chatList.backgroundColor, emptyDetailIcon: generateTintedImage(image: UIImage(bundleImageName: "Chat List/EmptyMasterDetailIcon"), color: presentationTheme.chatList.messageTextColor.withAlphaComponent(0.2))) + } +} diff --git a/TelegramUI/ComposeController.swift b/TelegramUI/ComposeController.swift index fea13d7c19..d8c2c0fd5d 100644 --- a/TelegramUI/ComposeController.swift +++ b/TelegramUI/ComposeController.swift @@ -29,7 +29,7 @@ public class ComposeController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style @@ -69,7 +69,7 @@ public class ComposeController: ViewController { private func updateThemeAndStrings() { self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style - self.navigationBar?.updateTheme(NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + self.navigationBar?.updatePresentationData(NavigationBarPresentationData(presentationData: self.presentationData)) self.title = self.presentationData.strings.Compose_NewMessage self.navigationItem.backBarButtonItem = UIBarButtonItem(title: self.presentationData.strings.Common_Back, style: .plain, target: nil, action: nil) } diff --git a/TelegramUI/ContactListNode.swift b/TelegramUI/ContactListNode.swift index b611783dd3..9114cc0e1f 100644 --- a/TelegramUI/ContactListNode.swift +++ b/TelegramUI/ContactListNode.swift @@ -419,7 +419,9 @@ final class ContactListNode: ASDisplayNode { if value != self.enableUpdatesValue { self.enableUpdatesValue = value if value { - self.contactPeersViewPromise.set(self.account.postbox.contactPeersView(accountPeerId: self.account.peerId, includePresences: true)) + self.contactPeersViewPromise.set(self.account.postbox.contactPeersView(accountPeerId: self.account.peerId, includePresences: true) |> mapToThrottled { next -> Signal in + return .single(next) |> then(.complete() |> delay(5.0, queue: Queue.concurrentDefaultQueue())) + }) } else { self.contactPeersViewPromise.set(self.account.postbox.contactPeersView(accountPeerId: self.account.peerId, includePresences: true) |> take(1)) } diff --git a/TelegramUI/ContactMultiselectionController.swift b/TelegramUI/ContactMultiselectionController.swift index 9a432a110f..8b3ffc2b02 100644 --- a/TelegramUI/ContactMultiselectionController.swift +++ b/TelegramUI/ContactMultiselectionController.swift @@ -52,7 +52,7 @@ public class ContactMultiselectionController: ViewController { self.titleView = CounterContollerTitleView(theme: self.presentationData.theme) - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style @@ -103,7 +103,7 @@ public class ContactMultiselectionController: ViewController { private func updateThemeAndStrings() { self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style - self.navigationBar?.updateTheme(NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + self.navigationBar?.updatePresentationData(NavigationBarPresentationData(presentationData: self.presentationData)) self.navigationItem.backBarButtonItem = UIBarButtonItem(title: self.presentationData.strings.Common_Back, style: .plain, target: nil, action: nil) self.updateTitle() } diff --git a/TelegramUI/ContactSelectionController.swift b/TelegramUI/ContactSelectionController.swift index 2924e42cb6..a8f3482598 100644 --- a/TelegramUI/ContactSelectionController.swift +++ b/TelegramUI/ContactSelectionController.swift @@ -54,7 +54,7 @@ public class ContactSelectionController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style @@ -94,7 +94,7 @@ public class ContactSelectionController: ViewController { private func updateThemeAndStrings() { self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style - self.navigationBar?.updateTheme(NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + self.navigationBar?.updatePresentationData(NavigationBarPresentationData(presentationData: self.presentationData)) self.title = self.titleProducer(self.presentationData.strings) self.tabBarItem.title = self.presentationData.strings.Contacts_Title self.navigationItem.backBarButtonItem = UIBarButtonItem(title: self.presentationData.strings.Common_Back, style: .plain, target: nil, action: nil) diff --git a/TelegramUI/ContactsController.swift b/TelegramUI/ContactsController.swift index c8ff48d7b1..2bdc349437 100644 --- a/TelegramUI/ContactsController.swift +++ b/TelegramUI/ContactsController.swift @@ -27,7 +27,7 @@ public class ContactsController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style @@ -70,7 +70,7 @@ public class ContactsController: ViewController { private func updateThemeAndStrings() { self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style - self.navigationBar?.updateTheme(NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + self.navigationBar?.updatePresentationData(NavigationBarPresentationData(presentationData: self.presentationData)) self.title = self.presentationData.strings.Contacts_Title self.tabBarItem.title = self.presentationData.strings.Contacts_Title self.navigationItem.backBarButtonItem = UIBarButtonItem(title: self.presentationData.strings.Common_Back, style: .plain, target: nil, action: nil) diff --git a/TelegramUI/DeleteChatInputPanelNode.swift b/TelegramUI/DeleteChatInputPanelNode.swift index 4dd9e6a0e9..71f9e99b09 100644 --- a/TelegramUI/DeleteChatInputPanelNode.swift +++ b/TelegramUI/DeleteChatInputPanelNode.swift @@ -33,7 +33,7 @@ final class DeleteChatInputPanelNode: ChatInputPanelNode { self.interfaceInteraction?.deleteChat() } - override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { if self.presentationInterfaceState != interfaceState { self.presentationInterfaceState = interfaceState @@ -49,7 +49,7 @@ final class DeleteChatInputPanelNode: ChatInputPanelNode { return panelHeight } - override func minimalHeight(interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func minimalHeight(interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { return 47.0 } } diff --git a/TelegramUI/EditAccessoryPanelNode.swift b/TelegramUI/EditAccessoryPanelNode.swift index ec0a30e191..ace76a4904 100644 --- a/TelegramUI/EditAccessoryPanelNode.swift +++ b/TelegramUI/EditAccessoryPanelNode.swift @@ -86,7 +86,7 @@ final class EditAccessoryPanelNode: AccessoryPanelNode { if let strongSelf = self { var text = "" if let message = message { - text = descriptionStringForMessage(message, strings: strings, accountPeerId: account.peerId) + (text, _) = descriptionStringForMessage(message, strings: strings, accountPeerId: account.peerId) } var updatedMedia: Media? diff --git a/TelegramUI/ExternalMusicAlbumArtResources.swift b/TelegramUI/ExternalMusicAlbumArtResources.swift index 75aa990fe5..af02de54a1 100644 --- a/TelegramUI/ExternalMusicAlbumArtResources.swift +++ b/TelegramUI/ExternalMusicAlbumArtResources.swift @@ -5,7 +5,9 @@ import Postbox private func urlEncodedStringFromString(_ string: String) -> String { var nsString: NSString = string as NSString - nsString = nsString.replacingPercentEscapes(using: String.Encoding.utf8.rawValue)! as NSString + if let value = nsString.replacingPercentEscapes(using: String.Encoding.utf8.rawValue) { + nsString = value as NSString + } let result = CFURLCreateStringByAddingPercentEscapes(nil, nsString as CFString, nil, "?!@#$^&%*+=,:;'\"`<>()[]{}/\\|~ " as CFString, CFStringConvertNSStringEncodingToEncoding(String.Encoding.utf8.rawValue))! return result as String diff --git a/TelegramUI/FFMpegMediaFrameSource.swift b/TelegramUI/FFMpegMediaFrameSource.swift index f5cce54a31..89db572226 100644 --- a/TelegramUI/FFMpegMediaFrameSource.swift +++ b/TelegramUI/FFMpegMediaFrameSource.swift @@ -72,6 +72,7 @@ final class FFMpegMediaFrameSource: NSObject, MediaFrameSource { private let streamable: Bool private let video: Bool private let preferSoftwareDecoding: Bool + private let fetchAutomatically: Bool private let taskQueue: ThreadTaskQueue private let thread: Thread @@ -90,13 +91,14 @@ final class FFMpegMediaFrameSource: NSObject, MediaFrameSource { } } - init(queue: Queue, postbox: Postbox, resource: MediaResource, streamable: Bool, video: Bool, preferSoftwareDecoding: Bool) { + init(queue: Queue, postbox: Postbox, resource: MediaResource, streamable: Bool, video: Bool, preferSoftwareDecoding: Bool, fetchAutomatically: Bool) { self.queue = queue self.postbox = postbox self.resource = resource self.streamable = streamable self.video = video self.preferSoftwareDecoding = preferSoftwareDecoding + self.fetchAutomatically = fetchAutomatically self.taskQueue = ThreadTaskQueue() @@ -148,9 +150,10 @@ final class FFMpegMediaFrameSource: NSObject, MediaFrameSource { let streamable = self.streamable let video = self.video let preferSoftwareDecoding = self.preferSoftwareDecoding + let fetchAutomatically = self.fetchAutomatically self.performWithContext { [weak self] context in - context.initializeState(postbox: postbox, resource: resource, streamable: streamable, video: video, preferSoftwareDecoding: preferSoftwareDecoding) + context.initializeState(postbox: postbox, resource: resource, streamable: streamable, video: video, preferSoftwareDecoding: preferSoftwareDecoding, fetchAutomatically: fetchAutomatically) let (frames, endOfStream) = context.takeFrames(until: timestamp) @@ -195,9 +198,10 @@ final class FFMpegMediaFrameSource: NSObject, MediaFrameSource { let streamable = self.streamable let video = self.video let preferSoftwareDecoding = self.preferSoftwareDecoding + let fetchAutomatically = self.fetchAutomatically self.performWithContext { [weak self] context in - context.initializeState(postbox: postbox, resource: resource, streamable: streamable, video: video, preferSoftwareDecoding: preferSoftwareDecoding) + context.initializeState(postbox: postbox, resource: resource, streamable: streamable, video: video, preferSoftwareDecoding: preferSoftwareDecoding, fetchAutomatically: fetchAutomatically) context.seek(timestamp: timestamp, completed: { streamDescriptions, timestamp in queue.async { diff --git a/TelegramUI/FFMpegMediaFrameSourceContext.swift b/TelegramUI/FFMpegMediaFrameSourceContext.swift index c3809443ac..7e0e126101 100644 --- a/TelegramUI/FFMpegMediaFrameSourceContext.swift +++ b/TelegramUI/FFMpegMediaFrameSourceContext.swift @@ -72,39 +72,41 @@ private func readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: Unsa var fetchedCount: Int32 = 0 - let resourceSize: Int = resource.size ?? 0 - - let readCount = min(resourceSize - context.readingOffset, Int(bufferSize)) var fetchedData: Data? if streamable { let data: Signal + let resourceSize: Int = resource.size ?? Int(Int32.max - 1) + let readCount = min(resourceSize - context.readingOffset, Int(bufferSize)) data = postbox.mediaBox.resourceData(resource, size: resourceSize, in: context.readingOffset ..< (context.readingOffset + readCount), mode: .complete) let semaphore = DispatchSemaphore(value: 0) if readCount == 0 { fetchedData = Data() } else { - let _ = data.start(next: { data in + let disposable = data.start(next: { data in if data.count == readCount { fetchedData = data semaphore.signal() } }) semaphore.wait() + disposable.dispose() } } else { let data = postbox.mediaBox.resourceData(resource, pathExtension: nil, option: .complete(waitUntilFetchStatus: false)) - let range = context.readingOffset ..< (context.readingOffset + readCount) let semaphore = DispatchSemaphore(value: 0) - let _ = data.start(next: { next in + let disposable = data.start(next: { next in if next.complete { + let readCount = max(0, min(next.size - context.readingOffset, Int(bufferSize))) + let range = context.readingOffset ..< (context.readingOffset + readCount) + let fd = open(next.path, O_RDONLY, S_IRUSR) if fd >= 0 { lseek(fd, off_t(range.lowerBound), SEEK_SET) var data = Data(count: readCount) data.withUnsafeMutableBytes { (bytes: UnsafeMutablePointer) -> Void in let readBytes = read(fd, bytes, readCount) - assert(readBytes == readCount) + assert(readBytes <= readCount) } fetchedData = data close(fd) @@ -113,6 +115,7 @@ private func readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: Unsa } }) semaphore.wait() + disposable.dispose() } if let fetchedData = fetchedData { fetchedData.withUnsafeBytes { (bytes: UnsafePointer) -> Void in @@ -133,10 +136,30 @@ private func seekCallback(userData: UnsafeMutableRawPointer?, offset: Int64, whe var result: Int64 = offset - let resourceSize: Int = resource.size ?? 0 + let resourceSize: Int + if let size = resource.size { + resourceSize = size + } else { + if !streamable { + var resultSize: Int = Int(Int32.max - 1) + let data = postbox.mediaBox.resourceData(resource, pathExtension: nil, option: .complete(waitUntilFetchStatus: false)) + let semaphore = DispatchSemaphore(value: 0) + let disposable = data.start(next: { next in + if next.complete { + resultSize = Int(next.size) + semaphore.signal() + } + }) + semaphore.wait() + disposable.dispose() + resourceSize = resultSize + } else { + resourceSize = Int(Int32.max - 1) + } + } if (whence & AVSEEK_SIZE) != 0 { - result = Int64(resourceSize) + result = Int64(resourceSize == Int(Int32.max - 1) ? -1 : resourceSize) } else { context.readingOffset = Int(min(Int64(resourceSize), offset)) @@ -149,7 +172,7 @@ private func seekCallback(userData: UnsafeMutableRawPointer?, offset: Int64, whe } else { if streamable { context.fetchedDataDisposable.set(postbox.mediaBox.fetchedResourceData(resource, in: context.readingOffset ..< resourceSize, tag: fetchTag).start()) - } else if !context.requestedCompleteFetch { + } else if !context.requestedCompleteFetch && context.fetchAutomatically { context.requestedCompleteFetch = true context.fetchedDataDisposable.set(postbox.mediaBox.fetchedResource(resource, tag: fetchTag).start()) } @@ -183,6 +206,7 @@ final class FFMpegMediaFrameSourceContext: NSObject { private var packetQueue: [FFMpegPacket] = [] private var preferSoftwareDecoding: Bool = false + fileprivate var fetchAutomatically: Bool = true init(thread: Thread) { self.thread = thread @@ -194,7 +218,7 @@ final class FFMpegMediaFrameSourceContext: NSObject { fetchedDataDisposable.dispose() } - func initializeState(postbox: Postbox, resource: MediaResource, streamable: Bool, video: Bool, preferSoftwareDecoding: Bool) { + func initializeState(postbox: Postbox, resource: MediaResource, streamable: Bool, video: Bool, preferSoftwareDecoding: Bool, fetchAutomatically: Bool) { if self.readingError || self.initializedState != nil { return } @@ -205,17 +229,18 @@ final class FFMpegMediaFrameSourceContext: NSObject { self.resource = resource self.streamable = streamable self.preferSoftwareDecoding = preferSoftwareDecoding + self.fetchAutomatically = fetchAutomatically if video { self.fetchTag = TelegramMediaResourceFetchTag(statsCategory: .video) } else { self.fetchTag = TelegramMediaResourceFetchTag(statsCategory: .audio) } - let resourceSize: Int = resource.size ?? 0 + let resourceSize: Int = resource.size ?? Int(Int32.max - 1) if streamable { self.fetchedDataDisposable.set(postbox.mediaBox.fetchedResourceData(resource, in: 0 ..< resourceSize, tag: self.fetchTag).start()) - } else if !self.requestedCompleteFetch { + } else if !self.requestedCompleteFetch && self.fetchAutomatically { self.requestedCompleteFetch = true self.fetchedDataDisposable.set(postbox.mediaBox.fetchedResource(resource, tag: self.fetchTag).start()) } @@ -287,7 +312,27 @@ final class FFMpegMediaFrameSourceContext: NSObject { } } } else if codecPar.pointee.codec_id == AV_CODEC_ID_H264 { - if let videoFormat = FFMpegMediaFrameSourceContextHelpers.createFormatDescriptionFromCodecData(UInt32(kCMVideoCodecType_H264), codecPar.pointee.width, codecPar.pointee.height, codecPar.pointee.extradata, codecPar.pointee.extradata_size, 0x43637661) { + if let videoFormat = FFMpegMediaFrameSourceContextHelpers.createFormatDescriptionFromAVCCodecData(UInt32(kCMVideoCodecType_H264), codecPar.pointee.width, codecPar.pointee.height, codecPar.pointee.extradata, codecPar.pointee.extradata_size) { + let (fps, timebase) = FFMpegMediaFrameSourceContextHelpers.streamFpsAndTimeBase(stream: avFormatContext.pointee.streams.advanced(by: streamIndex).pointee!, defaultTimeBase: CMTimeMake(1, 1000)) + + let duration = CMTimeMake(avFormatContext.pointee.streams.advanced(by: streamIndex).pointee!.pointee.duration, timebase.timescale) + + var rotationAngle: Double = 0.0 + if let rotationInfo = av_dict_get(avFormatContext.pointee.streams.advanced(by: streamIndex).pointee!.pointee.metadata, "rotate", nil, 0), let value = rotationInfo.pointee.value { + if strcmp(value, "0") != 0 { + if let angle = Double(String(cString: value)) { + rotationAngle = angle * Double.pi / 180.0 + } + } + } + + let aspect = Double(codecPar.pointee.width) / Double(codecPar.pointee.height) + + videoStream = StreamContext(index: streamIndex, codecContext: nil, fps: fps, timebase: timebase, duration: duration, decoder: FFMpegMediaPassthroughVideoFrameDecoder(videoFormat: videoFormat, rotationAngle: rotationAngle), rotationAngle: rotationAngle, aspect: aspect) + break + } + } else if codecPar.pointee.codec_id == AV_CODEC_ID_HEVC { + if let videoFormat = FFMpegMediaFrameSourceContextHelpers.createFormatDescriptionFromHEVCCodecData(UInt32(kCMVideoCodecType_HEVC), codecPar.pointee.width, codecPar.pointee.height, codecPar.pointee.extradata, codecPar.pointee.extradata_size) { let (fps, timebase) = FFMpegMediaFrameSourceContextHelpers.streamFpsAndTimeBase(stream: avFormatContext.pointee.streams.advanced(by: streamIndex).pointee!, defaultTimeBase: CMTimeMake(1, 1000)) let duration = CMTimeMake(avFormatContext.pointee.streams.advanced(by: streamIndex).pointee!.pointee.duration, timebase.timescale) diff --git a/TelegramUI/FFMpegMediaFrameSourceContextHelpers.swift b/TelegramUI/FFMpegMediaFrameSourceContextHelpers.swift index 9adfea9c24..d5ccaf6de5 100644 --- a/TelegramUI/FFMpegMediaFrameSourceContextHelpers.swift +++ b/TelegramUI/FFMpegMediaFrameSourceContextHelpers.swift @@ -4,12 +4,16 @@ import TelegramUIPrivateModule final class FFMpegMediaFrameSourceContextHelpers { static let registerFFMpegGlobals: Void = { + #if DEBUG + av_log_set_level(AV_LOG_ERROR) + #else av_log_set_level(AV_LOG_QUIET) + #endif av_register_all() return }() - static func createFormatDescriptionFromCodecData(_ formatId: UInt32, _ width: Int32, _ height: Int32, _ extradata: UnsafePointer, _ extradata_size: Int32, _ atom: UInt32) -> CMFormatDescription? { + static func createFormatDescriptionFromAVCCodecData(_ formatId: UInt32, _ width: Int32, _ height: Int32, _ extradata: UnsafePointer, _ extradata_size: Int32) -> CMFormatDescription? { let par = NSMutableDictionary() par.setObject(1 as NSNumber, forKey: "HorizontalSpacing" as NSString) par.setObject(1 as NSNumber, forKey: "VerticalSpacing" as NSString) @@ -35,6 +39,33 @@ final class FFMpegMediaFrameSourceContextHelpers { return formatDescription } + + static func createFormatDescriptionFromHEVCCodecData(_ formatId: UInt32, _ width: Int32, _ height: Int32, _ extradata: UnsafePointer, _ extradata_size: Int32) -> CMFormatDescription? { + let par = NSMutableDictionary() + par.setObject(1 as NSNumber, forKey: "HorizontalSpacing" as NSString) + par.setObject(1 as NSNumber, forKey: "VerticalSpacing" as NSString) + + let atoms = NSMutableDictionary() + atoms.setObject(NSData(bytes: extradata, length: Int(extradata_size)), forKey: "hvcC" as NSString) + + let extensions = NSMutableDictionary() + extensions.setObject("left" as NSString, forKey: "CVImageBufferChromaLocationBottomField" as NSString) + extensions.setObject("left" as NSString, forKey: "CVImageBufferChromaLocationTopField" as NSString) + extensions.setObject(0 as NSNumber, forKey: "FullRangeVideo" as NSString) + extensions.setObject(par, forKey: "CVPixelAspectRatio" as NSString) + extensions.setObject(atoms, forKey: "SampleDescriptionExtensionAtoms" as NSString) + extensions.setObject("hevc" as NSString, forKey: "FormatName" as NSString) + extensions.setObject(0 as NSNumber, forKey: "SpatialQuality" as NSString) + extensions.setObject(0 as NSNumber, forKey: "Version" as NSString) + extensions.setObject(0 as NSNumber, forKey: "FullRangeVideo" as NSString) + extensions.setObject(1 as NSNumber, forKey: "CVFieldCount" as NSString) + extensions.setObject(24 as NSNumber, forKey: "Depth" as NSString) + + var formatDescription: CMFormatDescription? + CMVideoFormatDescriptionCreate(nil, CMVideoCodecType(formatId), width, height, extensions, &formatDescription) + + return formatDescription + } static func streamIndices(formatContext: UnsafeMutablePointer, codecType: AVMediaType) -> [Int] { var indices: [Int] = [] diff --git a/TelegramUI/FeedGroupingController.swift b/TelegramUI/FeedGroupingController.swift index 2c5f377781..8690f25b43 100644 --- a/TelegramUI/FeedGroupingController.swift +++ b/TelegramUI/FeedGroupingController.swift @@ -19,7 +19,7 @@ final class FeedGroupingController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style diff --git a/TelegramUI/FetchPhotoLibraryImageResource.swift b/TelegramUI/FetchPhotoLibraryImageResource.swift index 74efdae386..e675cc1586 100644 --- a/TelegramUI/FetchPhotoLibraryImageResource.swift +++ b/TelegramUI/FetchPhotoLibraryImageResource.swift @@ -45,12 +45,12 @@ func fetchPhotoLibraryResource(localIdentifier: String) -> Signal Media? { return nil } +private let internalExtensions = Set([ + "txt", + "doc", + "docx", + "xls", + "xlsx", + "ppt", + "pptx", + "php", + "cpp", + "h", + "swift", + "m", + "mm", + "java", +]) + +private let internalMimeTypes = Set([ + "application/pdf", + "application/postscript", + "application/text" +]) + +private var intermalMimePrefixes: [String] = [ + "image/", + "text/", + "application/vnd.ms-", + "video/" +] + +func internalDocumentItemSupportsMimeType(_ type: String, fileName: String?) -> Bool { + if let fileName = fileName { + let ext = (fileName as NSString).pathExtension + if internalExtensions.contains(ext.lowercased()) { + return true + } + } + + if internalMimeTypes.contains(type) { + return true + } + for prefix in intermalMimePrefixes { + if type.hasPrefix(prefix) { + return true + } + } + return false +} + func galleryItemForEntry(account: Account, theme: PresentationTheme, strings: PresentationStrings, entry: MessageHistoryEntry, streamVideos: Bool, loopVideos: Bool = false, hideControls: Bool = false, playbackCompleted: @escaping () -> Void = {}) -> GalleryItem? { switch entry { case let .MessageEntry(message, _, location, _): @@ -79,18 +128,24 @@ func galleryItemForEntry(account: Account, theme: PresentationTheme, strings: Pr return ChatImageGalleryItem(account: account, theme: theme, strings: strings, message: message, location: location) } else if let file = media as? TelegramMediaFile { if file.isVideo || file.mimeType.hasPrefix("video/") { - return UniversalVideoGalleryItem(account: account, theme: theme, strings: strings, content: NativeVideoContent(id: .message(message.id, file.fileId), file: file, streamVideo: streamVideos, loopVideo: loopVideos), originData: GalleryItemOriginData(title: message.author?.displayTitle, timestamp: message.timestamp), indexData: location.flatMap { GalleryItemIndexData(position: Int32($0.index), totalCount: Int32($0.count)) }, contentInfo: .message(message), caption: message.text, hideControls: hideControls, playbackCompleted: playbackCompleted) + return UniversalVideoGalleryItem(account: account, theme: theme, strings: strings, content: NativeVideoContent(id: .message(message.id, message.stableId, file.fileId), file: file, streamVideo: streamVideos, loopVideo: loopVideos), originData: GalleryItemOriginData(title: message.author?.displayTitle, timestamp: message.timestamp), indexData: location.flatMap { GalleryItemIndexData(position: Int32($0.index), totalCount: Int32($0.count)) }, contentInfo: .message(message), caption: message.text, hideControls: hideControls, playbackCompleted: playbackCompleted) } else { - if file.mimeType.hasPrefix("image/") && file.mimeType != "image/gif" && (file.size == nil || file.size! < 5 * 1024 * 1024) { - return ChatImageGalleryItem(account: account, theme: theme, strings: strings, message: message, location: location) - } else { + if file.mimeType.hasPrefix("image/") && file.mimeType != "image/gif" { + if file.size == nil || file.size! < 5 * 1024 * 1024 { + return ChatImageGalleryItem(account: account, theme: theme, strings: strings, message: message, location: location) + } else { + return ChatDocumentGalleryItem(account: account, theme: theme, strings: strings, message: message, location: location) + } + } else if internalDocumentItemSupportsMimeType(file.mimeType, fileName: file.fileName) { return ChatDocumentGalleryItem(account: account, theme: theme, strings: strings, message: message, location: location) + } else { + return ChatExternalFileGalleryItem(account: account, theme: theme, strings: strings, message: message, location: location) } } } else if let webpage = media as? TelegramMediaWebpage, case let .Loaded(webpageContent) = webpage.content { switch websiteType(of: webpageContent) { case .instagram where webpageContent.file != nil && webpageContent.image != nil && webpageContent.file!.isVideo: - return UniversalVideoGalleryItem(account: account, theme: theme, strings: strings, content: NativeVideoContent(id: NativeVideoContentId.message(message.id, webpage.webpageId), file: webpageContent.file!, streamVideo: true, enableSound: true), originData: GalleryItemOriginData(title: message.author?.displayTitle, timestamp: message.timestamp), indexData: location.flatMap { GalleryItemIndexData(position: Int32($0.index), totalCount: Int32($0.count)) }, contentInfo: .message(message), caption: "") + return UniversalVideoGalleryItem(account: account, theme: theme, strings: strings, content: NativeVideoContent(id: NativeVideoContentId.message(message.id, message.stableId, webpage.webpageId), file: webpageContent.file!, streamVideo: true, enableSound: true), originData: GalleryItemOriginData(title: message.author?.displayTitle, timestamp: message.timestamp), indexData: location.flatMap { GalleryItemIndexData(position: Int32($0.index), totalCount: Int32($0.count)) }, contentInfo: .message(message), caption: "") //return UniversalVideoGalleryItem(account: account, theme: theme, strings: strings, content: SystemVideoContent(url: webpageContent.embedUrl!, image: webpageContent.image!, dimensions: webpageContent.embedSize ?? CGSize(width: 640.0, height: 640.0), duration: Int32(webpageContent.duration ?? 0)), originData: GalleryItemOriginData(title: message.author?.displayTitle, timestamp: message.timestamp), indexData: location.flatMap { GalleryItemIndexData(position: Int32($0.index), totalCount: Int32($0.count)) }, contentInfo: .message(message), caption: "") /*case .twitter where webpageContent.embedUrl != nil && webpageContent.image != nil: return UniversalVideoGalleryItem(account: account, theme: theme, strings: strings, content: SystemVideoContent(url: webpageContent.embedUrl!, image: webpageContent.image!, dimensions: webpageContent.embedSize ?? CGSize(width: 640.0, height: 640.0), duration: Int32(webpageContent.duration ?? 0)), originData: GalleryItemOriginData(title: message.author?.displayTitle, timestamp: message.timestamp), indexData: location.flatMap { GalleryItemIndexData(position: Int32($0.index), totalCount: Int32($0.count)) }, contentInfo: .message(message), caption: "")*/ @@ -196,7 +251,7 @@ class GalleryController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: GalleryController.darkNavigationTheme) + super.init(navigationBarPresentationData: NavigationBarPresentationData(theme: GalleryController.darkNavigationTheme, strings: NavigationBarStrings(presentationStrings: self.presentationData.strings))) let backItem = UIBarButtonItem(backButtonAppearanceWithTitle: presentationData.strings.Common_Back, target: self, action: #selector(self.donePressed)) self.navigationItem.leftBarButtonItem = backItem @@ -359,12 +414,12 @@ class GalleryController: ViewController { switch style { case .dark: strongSelf.statusBar.statusBarStyle = .White - strongSelf.navigationBar?.updateTheme(GalleryController.darkNavigationTheme) + strongSelf.navigationBar?.updatePresentationData(NavigationBarPresentationData(theme: GalleryController.darkNavigationTheme, strings: NavigationBarStrings(presentationStrings: strongSelf.presentationData.strings))) strongSelf.galleryNode.backgroundNode.backgroundColor = UIColor.black strongSelf.galleryNode.isBackgroundExtendedOverNavigationBar = true case .light: strongSelf.statusBar.statusBarStyle = .Black - strongSelf.navigationBar?.updateTheme(GalleryController.lightNavigationTheme) + strongSelf.navigationBar?.updatePresentationData(NavigationBarPresentationData(theme: GalleryController.darkNavigationTheme, strings: NavigationBarStrings(presentationStrings: strongSelf.presentationData.strings))) strongSelf.galleryNode.backgroundNode.backgroundColor = UIColor(rgb: 0xbdbdc2) strongSelf.galleryNode.isBackgroundExtendedOverNavigationBar = false } diff --git a/TelegramUI/GameController.swift b/TelegramUI/GameController.swift index c6684e744c..731a7bbb64 100644 --- a/TelegramUI/GameController.swift +++ b/TelegramUI/GameController.swift @@ -25,7 +25,7 @@ final class GameController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: (account.telegramApplicationContext.currentPresentationData.with { $0 }).theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style diff --git a/TelegramUI/HashtagSearchController.swift b/TelegramUI/HashtagSearchController.swift index 83009ca4da..940acf4727 100644 --- a/TelegramUI/HashtagSearchController.swift +++ b/TelegramUI/HashtagSearchController.swift @@ -22,7 +22,7 @@ final class HashtagSearchController: TelegramController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(account: account, navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme), enableMediaAccessoryPanel: true, locationBroadcastPanelSource: .none) + super.init(account: account, navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData), enableMediaAccessoryPanel: true, locationBroadcastPanelSource: .none) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style diff --git a/TelegramUI/HorizontalPeerItem.swift b/TelegramUI/HorizontalPeerItem.swift index acad4e7220..26b4994336 100644 --- a/TelegramUI/HorizontalPeerItem.swift +++ b/TelegramUI/HorizontalPeerItem.swift @@ -84,11 +84,11 @@ final class HorizontalPeerItemNode: ListViewItemNode { item.action(item.peer) } } - self.peerNode.longTapAction = { [weak self] in + /*self.peerNode.longTapAction = { [weak self] in if let item = self?.item { item.longTapAction(item.peer) } - } + }*/ } override func didLoad() { diff --git a/TelegramUI/ImageCompression.swift b/TelegramUI/ImageCompression.swift new file mode 100644 index 0000000000..6cd9e59fb7 --- /dev/null +++ b/TelegramUI/ImageCompression.swift @@ -0,0 +1,47 @@ +import Foundation +import AVFoundation + +func compressImageToJPEG(_ image: UIImage, quality: Float) -> Data? { + let data = NSMutableData() + guard let destination = CGImageDestinationCreateWithData(data as CFMutableData, "public.jpeg" as CFString, 1, nil) else { + return nil + } + + let options = NSMutableDictionary() + options.setObject(quality as NSNumber, forKey: kCGImageDestinationLossyCompressionQuality as NSString) + + guard let cgImage = image.cgImage else { + return nil + } + CGImageDestinationAddImage(destination, cgImage, options as CFDictionary) + CGImageDestinationFinalize(destination) + + if data.length == 0 { + return nil + } + + return data as Data +} + +@available(iOSApplicationExtension 11.0, *) +func compressImage(_ image: UIImage, quality: Float) -> Data? { + let data = NSMutableData() + guard let destination = CGImageDestinationCreateWithData(data as CFMutableData, AVFileType.heic as CFString, 1, nil) else { + return nil + } + + let options = NSMutableDictionary() + options.setObject(quality as NSNumber, forKey: kCGImageDestinationLossyCompressionQuality as NSString) + + guard let cgImage = image.cgImage else { + return nil + } + CGImageDestinationAddImage(destination, cgImage, options as CFDictionary) + CGImageDestinationFinalize(destination) + + if data.length == 0 { + return nil + } + + return data as Data +} diff --git a/TelegramUI/InstantPageController.swift b/TelegramUI/InstantPageController.swift index 3b26dc60e8..f7da622e78 100644 --- a/TelegramUI/InstantPageController.swift +++ b/TelegramUI/InstantPageController.swift @@ -32,7 +32,7 @@ final class InstantPageController: ViewController { self.webPage = webPage self.anchor = anchor - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) self.statusBar.statusBarStyle = .White diff --git a/TelegramUI/InstantPageControllerNode.swift b/TelegramUI/InstantPageControllerNode.swift index 039f2dfc30..ea12dd5366 100644 --- a/TelegramUI/InstantPageControllerNode.swift +++ b/TelegramUI/InstantPageControllerNode.swift @@ -54,7 +54,7 @@ final class InstantPageControllerNode: ASDisplayNode, UIScrollViewDelegate { self.timeFormat = timeFormat self.strings = strings self.settings = settings - self.theme = settings.flatMap { return instantPageThemeForSettingsAndTime(settings: $0, time: Date()) } + self.theme = settings.flatMap { return instantPageThemeForSettingsAndTime(presentationTheme: presentationTheme, settings: $0, time: Date()) } self.statusBar = statusBar self.getNavigationController = getNavigationController @@ -113,7 +113,7 @@ final class InstantPageControllerNode: ASDisplayNode, UIScrollViewDelegate { var updateLayout = previousSettings == nil self.settings = settings - let theme = instantPageThemeForSettingsAndTime(settings: settings, time: Date()) + let theme = instantPageThemeForSettingsAndTime(presentationTheme: self.presentationTheme, settings: settings, time: Date()) self.theme = theme self.strings = strings diff --git a/TelegramUI/InstantPageGalleryController.swift b/TelegramUI/InstantPageGalleryController.swift index aadbcdc0c1..86d29014dc 100644 --- a/TelegramUI/InstantPageGalleryController.swift +++ b/TelegramUI/InstantPageGalleryController.swift @@ -83,7 +83,7 @@ class InstantPageGalleryController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: GalleryController.darkNavigationTheme) + super.init(navigationBarPresentationData: NavigationBarPresentationData(theme: GalleryController.darkNavigationTheme, strings: NavigationBarStrings(presentationStrings: self.presentationData.strings))) let backItem = UIBarButtonItem(backButtonAppearanceWithTitle: presentationData.strings.Common_Back, target: self, action: #selector(self.donePressed)) self.navigationItem.leftBarButtonItem = backItem diff --git a/TelegramUI/InstantPageTheme.swift b/TelegramUI/InstantPageTheme.swift index 4215bd7c7c..957dec02b5 100644 --- a/TelegramUI/InstantPageTheme.swift +++ b/TelegramUI/InstantPageTheme.swift @@ -184,13 +184,28 @@ private func fontSizeMultiplierForVariant(_ variant: InstantPagePresentationFont } } -func instantPageThemeForSettingsAndTime(settings: InstantPagePresentationSettings, time: Date) -> InstantPageTheme { +func instantPageThemeForSettingsAndTime(presentationTheme: PresentationTheme, settings: InstantPagePresentationSettings, time: Date) -> InstantPageTheme { if settings.autoNightMode { switch settings.themeType { case .light, .sepia, .gray: + var useDarkTheme = false + switch presentationTheme.name { + case let .builtin(name): + switch name { + case .nightAccent, .nightGrayscale: + useDarkTheme = true + default: + break + } + default: + break + } let calendar = Calendar.current let hour = calendar.component(.hour, from: time) if hour <= 8 || hour >= 22 { + useDarkTheme = true + } + if useDarkTheme { return darkTheme.withUpdatedFontStyles(sizeMultiplier: fontSizeMultiplierForVariant(settings.fontSize), forceSerif: settings.forceSerif) } case .dark: diff --git a/TelegramUI/InstantVideoNode.swift b/TelegramUI/InstantVideoNode.swift index d6b2167a75..68502e3d90 100644 --- a/TelegramUI/InstantVideoNode.swift +++ b/TelegramUI/InstantVideoNode.swift @@ -12,7 +12,7 @@ private final class SharedInstantVideoContext: SharedVideoContext { private let playbackCompletedListeners = Bag<() -> Void>() init(audioSessionManager: ManagedAudioSession, postbox: Postbox, resource: MediaResource) { - self.player = MediaPlayer(audioSessionManager: audioSessionManager, postbox: postbox, resource: resource, streamable: false, video: true, preferSoftwareDecoding: false, enableSound: false) + self.player = MediaPlayer(audioSessionManager: audioSessionManager, postbox: postbox, resource: resource, streamable: false, video: true, preferSoftwareDecoding: false, enableSound: false, fetchAutomatically: true) var actionAtEndImpl: (() -> Void)? self.player.actionAtEnd = .loopDisablingSound({ actionAtEndImpl?() diff --git a/TelegramUI/InviteContactsController.swift b/TelegramUI/InviteContactsController.swift index a106e55a06..9efe24d171 100644 --- a/TelegramUI/InviteContactsController.swift +++ b/TelegramUI/InviteContactsController.swift @@ -28,7 +28,7 @@ public class InviteContactsController: ViewController, MFMessageComposeViewContr self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style @@ -68,7 +68,7 @@ public class InviteContactsController: ViewController, MFMessageComposeViewContr private func updateThemeAndStrings() { self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style - self.navigationBar?.updateTheme(NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + self.navigationBar?.updatePresentationData(NavigationBarPresentationData(presentationData: self.presentationData)) self.title = self.presentationData.strings.Contacts_InviteFriends self.navigationItem.backBarButtonItem = UIBarButtonItem(title: self.presentationData.strings.Common_Back, style: .plain, target: nil, action: nil) } @@ -89,31 +89,43 @@ public class InviteContactsController: ViewController, MFMessageComposeViewContr self.contactsNode.requestShareTelegram = { [weak self] in if let strongSelf = self { - let shareController = ShareController(account: strongSelf.account, subject: .url("https://telegram.org/dl"), externalShare: true, immediateExternalShare: true) + let url = strongSelf.presentationData.strings.InviteText_URL + var body = strongSelf.presentationData.strings.InviteText_SingleContact(url).0 + + let shareController = ShareController(account: strongSelf.account, subject: .text(body), externalShare: true, immediateExternalShare: true) strongSelf.present(shareController, in: .window(.root)) } } self.contactsNode.requestShare = { [weak self] numbers in - if let strongSelf = self, MFMessageComposeViewController.canSendText() { - let composer = MFMessageComposeViewController() - composer.messageComposeDelegate = strongSelf - let recipients: [String] = Array(numbers.map { - return $0.0.phoneNumbers.map { $0.number.plain } - }.joined()) - composer.recipients = Array(Set(recipients)) - let url = strongSelf.presentationData.strings.InviteText_URL - var body = strongSelf.presentationData.strings.InviteText_SingleContact(url).0 - if numbers.count == 1, numbers[0].1 > 0 { - body = strongSelf.presentationData.strings.InviteText_ContactsCount(numbers[0].1) - body = body.replacingOccurrences(of: "(null)", with: url) - } - composer.body = body - strongSelf.composer = composer - if let window = strongSelf.view.window { - window.rootViewController?.present(composer, animated: true) + let recipients: [String] = Array(numbers.map { + return $0.0.phoneNumbers.map { $0.number.plain } + }.joined()) + + let f: () -> Void = { + if let strongSelf = self, MFMessageComposeViewController.canSendText() { + let composer = MFMessageComposeViewController() + composer.messageComposeDelegate = strongSelf + composer.recipients = Array(Set(recipients)) + let url = strongSelf.presentationData.strings.InviteText_URL + var body = strongSelf.presentationData.strings.InviteText_SingleContact(url).0 + if numbers.count == 1, numbers[0].1 > 0 { + body = strongSelf.presentationData.strings.InviteText_ContactsCount(numbers[0].1) + body = body.replacingOccurrences(of: "(null)", with: url) + } + composer.body = body + strongSelf.composer = composer + if let window = strongSelf.view.window { + window.rootViewController?.present(composer, animated: true) + } } } + + if recipients.count < 100 { + f() + } else if let strongSelf = self { + strongSelf.present(standardTextAlertController(theme: AlertControllerTheme(presentationTheme: strongSelf.presentationData.theme), title: nil, text: strongSelf.presentationData.strings.Invite_LargeRecipientsCountWarning, actions: [TextAlertAction(type: .genericAction, title: strongSelf.presentationData.strings.Common_Cancel, action: {}), TextAlertAction(type: .defaultAction, title: strongSelf.presentationData.strings.Common_OK, action: f)]), in: .window(.root)) + } } self.displayNodeDidLoad() diff --git a/TelegramUI/ItemListAvatarAndNameItem.swift b/TelegramUI/ItemListAvatarAndNameItem.swift index 82eb4df390..b6532cd760 100644 --- a/TelegramUI/ItemListAvatarAndNameItem.swift +++ b/TelegramUI/ItemListAvatarAndNameItem.swift @@ -46,6 +46,23 @@ enum ItemListAvatarAndNameInfoItemName: Equatable { } } + func composedDisplayTitle(strings: PresentationStrings) -> String { + switch self { + case let .personName(firstName, lastName): + if !firstName.isEmpty && !lastName.isEmpty { + return firstName + " " + lastName + } else if !firstName.isEmpty { + return firstName + } else if !lastName.isEmpty { + return lastName + } else { + return strings.Peer_DeletedUser + } + case let .title(title, _): + return title + } + } + var isEmpty: Bool { switch self { case let .personName(firstName, _): @@ -355,7 +372,7 @@ class ItemListAvatarAndNameInfoItemNode: ListViewItemNode, ItemListItemNode, Ite additionalTitleInset += 3.0 + verificationIconImage.size.width } - let (nameNodeLayout, nameNodeApply) = layoutNameNode(TextNodeLayoutArguments(attributedString: NSAttributedString(string: displayTitle.composedTitle, font: nameFont, textColor: item.theme.list.itemPrimaryTextColor), backgroundColor: nil, maximumNumberOfLines: 1, truncationType: .end, constrainedSize: CGSize(width: baseWidth - 20 - 94.0 - (item.call != nil ? 36.0 : 0.0) - additionalTitleInset, height: CGFloat.greatestFiniteMagnitude), alignment: .natural, cutout: nil, insets: UIEdgeInsets())) + let (nameNodeLayout, nameNodeApply) = layoutNameNode(TextNodeLayoutArguments(attributedString: NSAttributedString(string: displayTitle.composedDisplayTitle(strings: item.strings), font: nameFont, textColor: item.theme.list.itemPrimaryTextColor), backgroundColor: nil, maximumNumberOfLines: 1, truncationType: .end, constrainedSize: CGSize(width: baseWidth - 20 - 94.0 - (item.call != nil ? 36.0 : 0.0) - additionalTitleInset, height: CGFloat.greatestFiniteMagnitude), alignment: .natural, cutout: nil, insets: UIEdgeInsets())) var statusText: String = "" let statusColor: UIColor @@ -373,14 +390,21 @@ class ItemListAvatarAndNameInfoItemNode: ListViewItemNode, ItemListItemNode, Ite } statusColor = item.theme.list.itemSecondaryTextColor case .generic: - let presence = (item.presence as? TelegramUserPresence) ?? TelegramUserPresence(status: .none) - let timestamp = CFAbsoluteTimeGetCurrent() + NSTimeIntervalSince1970 - let (string, activity) = stringAndActivityForUserPresence(strings: item.strings, timeFormat: .regular, presence: presence, relativeTo: Int32(timestamp)) - statusText = string - if activity { - statusColor = item.theme.list.itemAccentColor - } else { + if let _ = peer.botInfo { + statusText = item.strings.Bot_GenericBotStatus statusColor = item.theme.list.itemSecondaryTextColor + } else if let presence = item.presence as? TelegramUserPresence { + let timestamp = CFAbsoluteTimeGetCurrent() + NSTimeIntervalSince1970 + let (string, activity) = stringAndActivityForUserPresence(strings: item.strings, timeFormat: .regular, presence: presence, relativeTo: Int32(timestamp)) + statusText = string + if activity { + statusColor = item.theme.list.itemAccentColor + } else { + statusColor = item.theme.list.itemSecondaryTextColor + } + } else { + statusText = "" + statusColor = item.theme.list.itemPrimaryTextColor } } } else if let channel = item.peer as? TelegramChannel { diff --git a/TelegramUI/ItemListController.swift b/TelegramUI/ItemListController.swift index f2e9af9d7e..eb9f7f90bc 100644 --- a/TelegramUI/ItemListController.swift +++ b/TelegramUI/ItemListController.swift @@ -137,6 +137,7 @@ final class ItemListController: ViewController { private var segmentedTitleView: ItemListControllerSegmentedTitleView? private var theme: PresentationTheme + private var strings: PresentationStrings private var didPlayPresentationAnimation = false @@ -176,9 +177,11 @@ final class ItemListController: ViewController { init(account: Account, state: Signal<(ItemListControllerState, (ItemListNodeState, Entry.ItemGenerationArguments)), NoError>, tabBarItem: Signal? = nil) { self.state = state - self.theme = (account.telegramApplicationContext.currentPresentationData.with { $0 }).theme + let presentationData = (account.telegramApplicationContext.currentPresentationData.with { $0 }) + self.theme = presentationData.theme + self.strings = presentationData.strings - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: presentationData)) self.statusBar.statusBarStyle = (account.telegramApplicationContext.currentPresentationData.with { $0 }).theme.rootController.statusBar.style.style @@ -337,7 +340,7 @@ final class ItemListController: ViewController { if strongSelf.theme !== controllerState.theme { strongSelf.theme = controllerState.theme - strongSelf.navigationBar?.updateTheme(NavigationBarTheme(rootControllerTheme: strongSelf.theme)) + strongSelf.navigationBar?.updatePresentationData(NavigationBarPresentationData(theme: NavigationBarTheme(rootControllerTheme: strongSelf.theme), strings: NavigationBarStrings(presentationStrings: strongSelf.strings))) strongSelf.statusBar.statusBarStyle = strongSelf.theme.rootController.statusBar.style.style strongSelf.segmentedTitleView?.color = controllerState.theme.rootController.navigationBar.accentTextColor diff --git a/TelegramUI/ItemListRevealOptionsNode.swift b/TelegramUI/ItemListRevealOptionsNode.swift index ebe3afe5a5..4a554ad520 100644 --- a/TelegramUI/ItemListRevealOptionsNode.swift +++ b/TelegramUI/ItemListRevealOptionsNode.swift @@ -1,7 +1,7 @@ import Foundation import AsyncDisplayKit import Display -import Lottie +//import Lottie struct ItemListRevealOption: Equatable { let key: Int32 @@ -37,7 +37,7 @@ final class ItemListRevealOptionNode: ASDisplayNode { private let titleNode: ASTextNode private let iconNode: ASImageNode? - private var animView: LOTView? + //private var animView: LOTView? init(title: String, icon: UIImage?, color: UIColor, textColor: UIColor) { self.titleNode = ASTextNode() diff --git a/TelegramUI/JoinLinkPreviewController.swift b/TelegramUI/JoinLinkPreviewController.swift index d25e425aa3..3704e44225 100644 --- a/TelegramUI/JoinLinkPreviewController.swift +++ b/TelegramUI/JoinLinkPreviewController.swift @@ -26,7 +26,7 @@ public final class JoinLinkPreviewController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) } required public init(coder aDecoder: NSCoder) { diff --git a/TelegramUI/JoinLinkPreviewControllerNode.swift b/TelegramUI/JoinLinkPreviewControllerNode.swift index 671f75571e..abc7c29fc3 100644 --- a/TelegramUI/JoinLinkPreviewControllerNode.swift +++ b/TelegramUI/JoinLinkPreviewControllerNode.swift @@ -300,7 +300,7 @@ final class JoinLinkPreviewControllerNode: ViewControllerTracingNode, UIScrollVi self.animateContentNodeOffsetFromBackgroundOffset = nil let offset = backgroundFrame.minY - animateContentNodeOffsetFromBackgroundOffset if let contentNode = self.contentNode { - transition.animatePositionAdditive(node: contentNode, offset: -offset) + transition.animatePositionAdditive(node: contentNode, offset: CGPoint(x: 0.0, y: -offset)) } if let previousContentNode = self.previousContentNode { transition.updatePosition(node: previousContentNode, position: previousContentNode.position.offsetBy(dx: 0.0, dy: offset)) diff --git a/TelegramUI/LanguageSelectionController.swift b/TelegramUI/LanguageSelectionController.swift index bd1a7fceed..8ec6bb15e0 100644 --- a/TelegramUI/LanguageSelectionController.swift +++ b/TelegramUI/LanguageSelectionController.swift @@ -361,7 +361,7 @@ final class LanguageSelectionController: ViewController { self.innerController = InnerLanguageSelectionController(account: account) self.innerNavigationController = UINavigationController(rootViewController: self.innerController) - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style self.innerNavigationController.navigationBar.barTintColor = self.presentationData.theme.rootController.navigationBar.backgroundColor diff --git a/TelegramUI/LegacyAttachmentMenu.swift b/TelegramUI/LegacyAttachmentMenu.swift index b03efd9a8c..6f9bcf04d5 100644 --- a/TelegramUI/LegacyAttachmentMenu.swift +++ b/TelegramUI/LegacyAttachmentMenu.swift @@ -14,7 +14,6 @@ func legacyAttachmentMenu(account: Account, peer: Peer, saveEditedPhotos: Bool, var itemViews: [Any] = [] let carouselItem = TGAttachmentCarouselItemView(context: parentController.context, camera: PGCamera.cameraAvailable(), selfPortrait: false, forProfilePhoto: false, assetType: TGMediaAssetAnyType, saveEditedPhotos: saveEditedPhotos, allowGrouping: allowGrouping)! - //carouselItem.defaultStatusBarStyle = theme.rootController.statusBar.style.style.systemStyle carouselItem.suggestionContext = legacySuggestionContext(account: account, peerId: peer.id) carouselItem.recipientName = peer.displayTitle carouselItem.cameraPressed = { [weak controller] cameraView in @@ -22,7 +21,7 @@ func legacyAttachmentMenu(account: Account, peer: Peer, saveEditedPhotos: Bool, openCamera(cameraView, controller) } } - if peer is TelegramUser || peer is TelegramSecretChat { + if (peer is TelegramUser || peer is TelegramSecretChat) && peer.id != account.peerId { carouselItem.hasTimer = true } carouselItem.sendPressed = { [weak controller, weak carouselItem] currentItem, asFiles in @@ -95,7 +94,7 @@ func legacyPasteMenu(account: Account, peer: Peer, saveEditedPhotos: Bool, allow let baseController = TGViewController(context: legacyController.context)! legacyController.bind(controller: baseController) var hasTimer = false - if peer is TelegramUser || peer is TelegramSecretChat { + if (peer is TelegramUser || peer is TelegramSecretChat) && peer.id != account.peerId { hasTimer = true } let recipientName = peer.displayTitle diff --git a/TelegramUI/LegacyCamera.swift b/TelegramUI/LegacyCamera.swift index e18bb50863..1c7ba45ec9 100644 --- a/TelegramUI/LegacyCamera.swift +++ b/TelegramUI/LegacyCamera.swift @@ -26,7 +26,7 @@ func presentedLegacyCamera(account: Account, peer: Peer, cameraView: TGAttachmen controller.inhibitDocumentCaptions = false controller.suggestionContext = legacySuggestionContext(account: account, peerId: peer.id) controller.recipientName = peer.displayTitle - if peer is TelegramUser || peer is TelegramSecretChat { + if (peer is TelegramUser || peer is TelegramSecretChat) && peer.id != account.peerId { controller.hasTimer = true } diff --git a/TelegramUI/LegacyController.swift b/TelegramUI/LegacyController.swift index 81cb38dbcf..686d061fdd 100644 --- a/TelegramUI/LegacyController.swift +++ b/TelegramUI/LegacyController.swift @@ -288,7 +288,7 @@ public class LegacyController: ViewController { self.presentation = presentation self.validLayout = initialLayout - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) if let theme = theme { self.statusBar.statusBarStyle = theme.rootController.statusBar.style.style diff --git a/TelegramUI/LegacyMediaPickers.swift b/TelegramUI/LegacyMediaPickers.swift index b221b49aed..787071c73c 100644 --- a/TelegramUI/LegacyMediaPickers.swift +++ b/TelegramUI/LegacyMediaPickers.swift @@ -17,7 +17,7 @@ func configureLegacyAssetPicker(_ controller: TGMediaAssetsController, account: controller.captionsEnabled = captionsEnabled controller.inhibitDocumentCaptions = false controller.suggestionContext = legacySuggestionContext(account: account, peerId: peer.id) - if peer is TelegramUser || peer is TelegramSecretChat { + if (peer is TelegramUser || peer is TelegramSecretChat) && peer.id != account.peerId { controller.hasTimer = true } controller.dismissalBlock = { @@ -165,11 +165,26 @@ func legacyAssetPickerEnqueueMessages(account: Account, peerId: PeerId, signals: arc4random_buf(&randomId, 8) let tempFilePath = NSTemporaryDirectory() + "\(randomId).jpeg" let scaledSize = image.size.aspectFitted(CGSize(width: 1280.0, height: 1280.0)) - if let scaledImage = generateImage(scaledSize, contextGenerator: { size, context in - context.draw(image.cgImage!, in: CGRect(origin: CGPoint(), size: size)) - }, opaque: true) { - if let scaledImageData = UIImageJPEGRepresentation(scaledImage, 0.52) { + if let scaledImage = TGScaleImageToPixelSize(image, scaledSize) { + if let scaledImageData = compressImageToJPEG(scaledImage, quality: 0.42) { let _ = try? scaledImageData.write(to: URL(fileURLWithPath: tempFilePath)) + #if DEBUG + if #available(iOSApplicationExtension 11.0, *) { + if false, let heicData = compressImage(scaledImage, quality: 0.65) { + var randomId: Int64 = 0 + arc4random_buf(&randomId, 8) + let _ = try? heicData.write(to: URL(fileURLWithPath: tempFilePath + ".heic")) + let resource = LocalFileReferenceMediaResource(localFilePath: tempFilePath + ".heic", randomId: randomId) + let media = TelegramMediaFile(fileId: MediaId(namespace: Namespaces.Media.LocalFile, id: randomId), resource: resource, previewRepresentations: [], mimeType: "image/heic", size: nil, attributes: [.FileName(fileName: "image.heic")]) + var attributes: [MessageAttribute] = [] + if let timer = item.timer, timer > 0 && timer <= 60 { + attributes.append(AutoremoveTimeoutMessageAttribute(timeout: Int32(timer), countdownBeginTime: nil)) + } + messages.append(.message(text: caption ?? "", attributes: attributes, media: media, replyToMessageId: nil, localGroupingKey: item.groupedId)) + } + } + #endif + let resource = LocalFileReferenceMediaResource(localFilePath: tempFilePath, randomId: randomId) let media = TelegramMediaImage(imageId: MediaId(namespace: Namespaces.Media.LocalImage, id: randomId), representations: [TelegramMediaImageRepresentation(dimensions: scaledSize, resource: resource)], reference: nil) var attributes: [MessageAttribute] = [] diff --git a/TelegramUI/LegqacyICloudFileController.swift b/TelegramUI/LegqacyICloudFileController.swift index d4577afcae..48d99e09c1 100644 --- a/TelegramUI/LegqacyICloudFileController.swift +++ b/TelegramUI/LegqacyICloudFileController.swift @@ -54,7 +54,11 @@ func legacyICloudFileController(theme: PresentationTheme, completion: @escaping legacyController.presentationCompleted = { [weak legacyController] in if let legacyController = legacyController { - legacyController.view.window?.rootViewController?.present(controller, animated: true) + if let window = legacyController.view.window { + controller.popoverPresentationController?.sourceView = window + controller.popoverPresentationController?.sourceRect = CGRect(origin: CGPoint(x: window.bounds.width / 2.0, y: window.bounds.size.height - 1.0), size: CGSize(width: 1.0, height: 1.0)) + window.rootViewController?.present(controller, animated: true) + } } } diff --git a/TelegramUI/LiveLocationManager.swift b/TelegramUI/LiveLocationManager.swift index 70d009196d..b563adfc70 100644 --- a/TelegramUI/LiveLocationManager.swift +++ b/TelegramUI/LiveLocationManager.swift @@ -17,7 +17,6 @@ public final class LiveLocationManager { private var requiredLocationTypeDisposable: Disposable? private let hasActiveMessagesToBroadcast = ValuePromise(false, ignoreRepeated: true) - public var isPolling: Signal { return self.pollingOnce.get() } diff --git a/TelegramUI/LiveLocationSummaryManager.swift b/TelegramUI/LiveLocationSummaryManager.swift index 98439f05f3..d88f766bb9 100644 --- a/TelegramUI/LiveLocationSummaryManager.swift +++ b/TelegramUI/LiveLocationSummaryManager.swift @@ -6,21 +6,21 @@ import SwiftSignalKit private final class LiveLocationSummaryContext { private let queue: Queue private let postbox: Postbox - private var subscribers = Bag<([Peer]) -> Void>() + private var subscribers = Bag<([MessageId: Message]) -> Void>() - var peerIds = Set() { + var messageIds = Set() { didSet { assert(self.queue.isCurrent()) - if self.peerIds != oldValue { - if self.peerIds.isEmpty { + if self.messageIds != oldValue { + if self.messageIds.isEmpty { self.disposable.set(nil) - self.peers = [] + self.messages = [:] } else { - self.disposable.set((self.postbox.multiplePeersView(Array(self.peerIds)) |> deliverOn(self.queue)).start(next: { [weak self] view in + let key = PostboxViewKey.messages(self.messageIds) + self.disposable.set((self.postbox.combinedView(keys: [key]) |> deliverOn(self.queue)).start(next: { [weak self] view in if let strongSelf = self { - let peers: [Peer] = Array(view.peers.values) - strongSelf.peers = peers + strongSelf.messages = (view.views[key] as? MessagesView)?.messages ?? [:] } })) } @@ -28,12 +28,12 @@ private final class LiveLocationSummaryContext { } } - private var peers: [Peer] = [] { + private var messages: [MessageId: Message] = [:] { didSet { assert(self.queue.isCurrent()) for f in self.subscribers.copyItems() { - f(self.peers) + f(self.messages) } } } @@ -49,7 +49,7 @@ private final class LiveLocationSummaryContext { self.disposable.dispose() } - func subscribe() -> Signal<[Peer], NoError> { + func subscribe() -> Signal<[MessageId: Message], NoError> { let queue = self.queue return Signal { [weak self] subscriber in let disposable = MetaDisposable() @@ -59,7 +59,7 @@ private final class LiveLocationSummaryContext { subscriber.putNext(next) }) - subscriber.putNext(strongSelf.peers) + subscriber.putNext(strongSelf.messages) disposable.set(ActionDisposable { [weak self] in queue.async { @@ -210,10 +210,10 @@ final class LiveLocationSummaryManager { context.isActive = peerIds.contains(peerId) } - self.globalContext.peerIds = peerIds + self.globalContext.messageIds = messageIds } - func broadcastingToPeers() -> Signal<[Peer], NoError> { + func broadcastingToMessages() -> Signal<[MessageId: Message], NoError> { return self.globalContext.subscribe() } diff --git a/TelegramUI/LocationBroadcastActionSheetItem.swift b/TelegramUI/LocationBroadcastActionSheetItem.swift new file mode 100644 index 0000000000..931d2dbcb9 --- /dev/null +++ b/TelegramUI/LocationBroadcastActionSheetItem.swift @@ -0,0 +1,115 @@ +import Foundation +import AsyncDisplayKit +import Display + +public class LocationBroadcastActionSheetItem: ActionSheetItem { + public let title: String + public let beginTimestamp: Double + public let timeout: Double + public let strings: PresentationStrings + public let action: () -> Void + + public init(title: String, beginTimestamp: Double, timeout: Double, strings: PresentationStrings, action: @escaping () -> Void) { + self.title = title + self.beginTimestamp = beginTimestamp + self.timeout = timeout + self.strings = strings + self.action = action + } + + public func node(theme: ActionSheetControllerTheme) -> ActionSheetItemNode { + let node = LocationBroadcastActionSheetItemNode(theme: theme) + node.setItem(self) + return node + } + + public func updateNode(_ node: ActionSheetItemNode) { + guard let node = node as? LocationBroadcastActionSheetItemNode else { + assertionFailure() + return + } + + node.setItem(self) + } +} + +public class LocationBroadcastActionSheetItemNode: ActionSheetItemNode { + private let theme: ActionSheetControllerTheme + + public static let defaultFont: UIFont = Font.regular(20.0) + + private var item: LocationBroadcastActionSheetItem? + + private let button: HighlightTrackingButton + private let label: ImmediateTextNode + private let timerNode: ChatMessageLiveLocationTimerNode + + override public init(theme: ActionSheetControllerTheme) { + self.theme = theme + + self.button = HighlightTrackingButton() + + self.label = ImmediateTextNode() + self.label.isLayerBacked = true + self.label.displaysAsynchronously = false + self.label.maximumNumberOfLines = 1 + + self.timerNode = ChatMessageLiveLocationTimerNode() + + super.init(theme: theme) + + self.view.addSubview(self.button) + self.addSubnode(self.label) + self.addSubnode(self.timerNode) + + self.button.highligthedChanged = { [weak self] highlighted in + if let strongSelf = self { + if highlighted { + strongSelf.backgroundNode.backgroundColor = strongSelf.theme.itemHighlightedBackgroundColor + } else { + UIView.animate(withDuration: 0.3, animations: { + strongSelf.backgroundNode.backgroundColor = strongSelf.theme.itemBackgroundColor + }) + } + } + } + + self.button.addTarget(self, action: #selector(self.buttonPressed), for: .touchUpInside) + } + + func setItem(_ item: LocationBroadcastActionSheetItem) { + self.item = item + + let textColor: UIColor = self.theme.standardActionTextColor + self.label.attributedText = NSAttributedString(string: item.title, font: ActionSheetButtonNode.defaultFont, textColor: textColor) + + self.timerNode.update(backgroundColor: self.theme.controlAccentColor.withAlphaComponent(0.4), foregroundColor: self.theme.controlAccentColor, textColor: self.theme.controlAccentColor, beginTimestamp: item.beginTimestamp, timeout: item.timeout, strings: item.strings) + + self.setNeedsLayout() + } + + public override func calculateSizeThatFits(_ constrainedSize: CGSize) -> CGSize { + return CGSize(width: constrainedSize.width, height: 57.0) + } + + public override func layout() { + super.layout() + + let size = self.bounds.size + + self.button.frame = CGRect(origin: CGPoint(), size: size) + + let labelSize = self.label.updateLayout(CGSize(width: max(1.0, size.width - 10.0), height: size.height)) + self.label.frame = CGRect(origin: CGPoint(x: 16.0, y: floorToScreenPixels((size.height - labelSize.height) / 2.0)), size: labelSize) + + let timerSize = CGSize(width: 28.0, height: 28.0) + self.timerNode.frame = CGRect(origin: CGPoint(x: size.width - 16.0 - timerSize.width, y: floorToScreenPixels((size.height - timerSize.height) / 2.0)), size: timerSize) + } + + @objc func buttonPressed() { + if let item = self.item { + item.action() + } + } +} + diff --git a/TelegramUI/ManagedAudioPlaylistPlayer.swift b/TelegramUI/ManagedAudioPlaylistPlayer.swift index e35e562b57..ab19e8b4b0 100644 --- a/TelegramUI/ManagedAudioPlaylistPlayer.swift +++ b/TelegramUI/ManagedAudioPlaylistPlayer.swift @@ -314,7 +314,7 @@ final class ManagedAudioPlaylistPlayer { itemPlayer = .sharedVideo(videoNode) } } else { - let player = MediaPlayer(audioSessionManager: audioSessionManager, postbox: postbox, resource: resource, streamable: item.streamable, video: false, preferSoftwareDecoding: false, enableSound: true) + let player = MediaPlayer(audioSessionManager: audioSessionManager, postbox: postbox, resource: resource, streamable: item.streamable, video: false, preferSoftwareDecoding: false, enableSound: true, fetchAutomatically: true) itemPlayer = .player(player) } return .single((item, AudioPlaylistItemState(item: item, player: itemPlayer))) diff --git a/TelegramUI/MapInputController.swift b/TelegramUI/MapInputController.swift index 8e24bd5d02..8f7b04a57c 100644 --- a/TelegramUI/MapInputController.swift +++ b/TelegramUI/MapInputController.swift @@ -18,7 +18,7 @@ final class MapInputController: ViewController { } init() { - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) self._ready.set(.single(true)) diff --git a/TelegramUI/MediaManager.swift b/TelegramUI/MediaManager.swift index ac725b61e4..74822566bf 100644 --- a/TelegramUI/MediaManager.swift +++ b/TelegramUI/MediaManager.swift @@ -488,7 +488,7 @@ public final class MediaManager: NSObject { if let currentActiveContext = self.managedVideoContexts[wrappedId] { activeContext = currentActiveContext } else { - let mediaPlayer = MediaPlayer(audioSessionManager: self.audioSession, postbox: postbox, resource: resource, streamable: false, video: true, preferSoftwareDecoding: preferSoftwareDecoding, enableSound: false) + let mediaPlayer = MediaPlayer(audioSessionManager: self.audioSession, postbox: postbox, resource: resource, streamable: false, video: true, preferSoftwareDecoding: preferSoftwareDecoding, enableSound: false, fetchAutomatically: true) mediaPlayer.actionAtEnd = .loop(nil) let playerNode = MediaPlayerNode(backgroundThread: backgroundThread) mediaPlayer.attachPlayerNode(playerNode) diff --git a/TelegramUI/MediaNavigationAccessoryHeaderNode.swift b/TelegramUI/MediaNavigationAccessoryHeaderNode.swift index ed6cf7a29f..3d04485d3d 100644 --- a/TelegramUI/MediaNavigationAccessoryHeaderNode.swift +++ b/TelegramUI/MediaNavigationAccessoryHeaderNode.swift @@ -2,6 +2,7 @@ import Foundation import AsyncDisplayKit import Display import SwiftSignalKit +import TelegramCore private let titleFont = Font.regular(12.0) private let subtitleFont = Font.regular(10.0) @@ -166,17 +167,31 @@ final class MediaNavigationAccessoryHeaderNode: ASDisplayNode { case let .voice(author, peer): let titleText: String = author?.displayTitle ?? "" let subtitleText: String - if author?.id == peer?.id { - subtitleText = self.strings.MusicPlayer_VoiceNote + if let peer = peer { + if peer is TelegramGroup || peer is TelegramChannel { + subtitleText = peer.displayTitle + } else { + subtitleText = self.strings.MusicPlayer_VoiceNote + } } else { - subtitleText = peer?.displayTitle ?? "" + subtitleText = self.strings.MusicPlayer_VoiceNote } titleString = NSAttributedString(string: titleText, font: titleFont, textColor: self.theme.rootController.navigationBar.primaryTextColor) subtitleString = NSAttributedString(string: subtitleText, font: subtitleFont, textColor: self.theme.rootController.navigationBar.secondaryTextColor) case let .instantVideo(author, peer): let titleText: String = author?.displayTitle ?? "" - let subtitleText: String = peer?.displayTitle ?? "" + let subtitleText: String + + if let peer = peer { + if peer is TelegramGroup || peer is TelegramChannel { + subtitleText = peer.displayTitle + } else { + subtitleText = self.strings.MusicPlayer_VoiceNote + } + } else { + subtitleText = self.strings.MusicPlayer_VoiceNote + } titleString = NSAttributedString(string: titleText, font: titleFont, textColor: self.theme.rootController.navigationBar.primaryTextColor) subtitleString = NSAttributedString(string: subtitleText, font: subtitleFont, textColor: self.theme.rootController.navigationBar.secondaryTextColor) diff --git a/TelegramUI/MediaPlayer.swift b/TelegramUI/MediaPlayer.swift index a57836b5d4..46ea8acc50 100644 --- a/TelegramUI/MediaPlayer.swift +++ b/TelegramUI/MediaPlayer.swift @@ -63,6 +63,7 @@ private final class MediaPlayerContext { private let video: Bool private let preferSoftwareDecoding: Bool private var enableSound: Bool + private let fetchAutomatically: Bool private var playAndRecord: Bool private var keepAudioSessionWhilePaused: Bool @@ -82,7 +83,7 @@ private final class MediaPlayerContext { private var stoppedAtEnd = false - init(queue: Queue, audioSessionManager: ManagedAudioSession, playerStatus: ValuePromise, postbox: Postbox, resource: MediaResource, streamable: Bool, video: Bool, preferSoftwareDecoding: Bool, playAutomatically: Bool, enableSound: Bool, playAndRecord: Bool, keepAudioSessionWhilePaused: Bool) { + init(queue: Queue, audioSessionManager: ManagedAudioSession, playerStatus: ValuePromise, postbox: Postbox, resource: MediaResource, streamable: Bool, video: Bool, preferSoftwareDecoding: Bool, playAutomatically: Bool, enableSound: Bool, fetchAutomatically: Bool, playAndRecord: Bool, keepAudioSessionWhilePaused: Bool) { assert(queue.isCurrent()) self.queue = queue @@ -94,6 +95,7 @@ private final class MediaPlayerContext { self.video = video self.preferSoftwareDecoding = preferSoftwareDecoding self.enableSound = enableSound + self.fetchAutomatically = fetchAutomatically self.playAndRecord = playAndRecord self.keepAudioSessionWhilePaused = keepAudioSessionWhilePaused @@ -235,7 +237,7 @@ private final class MediaPlayerContext { self.playerStatus.set(status) } - let frameSource = FFMpegMediaFrameSource(queue: self.queue, postbox: self.postbox, resource: self.resource, streamable: self.streamable, video: self.video, preferSoftwareDecoding: self.preferSoftwareDecoding) + let frameSource = FFMpegMediaFrameSource(queue: self.queue, postbox: self.postbox, resource: self.resource, streamable: self.streamable, video: self.video, preferSoftwareDecoding: self.preferSoftwareDecoding, fetchAutomatically: self.fetchAutomatically) let disposable = MetaDisposable() self.state = .seeking(frameSource: frameSource, timestamp: timestamp, disposable: disposable, action: action, enableSound: self.enableSound) @@ -815,9 +817,9 @@ final class MediaPlayer { } } - init(audioSessionManager: ManagedAudioSession, postbox: Postbox, resource: MediaResource, streamable: Bool, video: Bool, preferSoftwareDecoding: Bool, playAutomatically: Bool = false, enableSound: Bool, playAndRecord: Bool = false, keepAudioSessionWhilePaused: Bool = true) { + init(audioSessionManager: ManagedAudioSession, postbox: Postbox, resource: MediaResource, streamable: Bool, video: Bool, preferSoftwareDecoding: Bool, playAutomatically: Bool = false, enableSound: Bool, fetchAutomatically: Bool, playAndRecord: Bool = false, keepAudioSessionWhilePaused: Bool = true) { self.queue.async { - let context = MediaPlayerContext(queue: self.queue, audioSessionManager: audioSessionManager, playerStatus: self.statusValue, postbox: postbox, resource: resource, streamable: streamable, video: video, preferSoftwareDecoding: preferSoftwareDecoding, playAutomatically: playAutomatically, enableSound: enableSound, playAndRecord: playAndRecord, keepAudioSessionWhilePaused: keepAudioSessionWhilePaused) + let context = MediaPlayerContext(queue: self.queue, audioSessionManager: audioSessionManager, playerStatus: self.statusValue, postbox: postbox, resource: resource, streamable: streamable, video: video, preferSoftwareDecoding: preferSoftwareDecoding, playAutomatically: playAutomatically, enableSound: enableSound, fetchAutomatically: fetchAutomatically, playAndRecord: playAndRecord, keepAudioSessionWhilePaused: keepAudioSessionWhilePaused) self.contextRef = Unmanaged.passRetained(context) } } diff --git a/TelegramUI/MessageContentKind.swift b/TelegramUI/MessageContentKind.swift index 102c545d0f..0c8f2d2910 100644 --- a/TelegramUI/MessageContentKind.swift +++ b/TelegramUI/MessageContentKind.swift @@ -153,42 +153,42 @@ func messageContentKind(_ message: Message, strings: PresentationStrings, accoun return .text(message.text) } -func descriptionStringForMessage(_ message: Message, strings: PresentationStrings, accountPeerId: PeerId) -> String { +func descriptionStringForMessage(_ message: Message, strings: PresentationStrings, accountPeerId: PeerId) -> (String, Bool) { if !message.text.isEmpty { - return message.text + return (message.text, false) } switch messageContentKind(message, strings: strings, accountPeerId: accountPeerId) { case let .text(text): - return text + return (text, false) case .image: - return strings.Message_Photo + return (strings.Message_Photo, true) case .video: - return strings.Message_Video + return (strings.Message_Video, true) case .videoMessage: - return strings.Message_VideoMessage + return (strings.Message_VideoMessage, true) case .audioMessage: - return strings.Message_Audio + return (strings.Message_Audio, true) case let .sticker(text): if text.isEmpty { - return strings.Message_Sticker + return (strings.Message_Sticker, true) } else { - return "\(text) \(strings.Message_Sticker)" + return ("\(text) \(strings.Message_Sticker)", true) } case .animation: - return strings.Message_Animation + return (strings.Message_Animation, true) case let .file(text): if text.isEmpty { - return strings.Message_File + return (strings.Message_File, true) } else { - return text + return (text, true) } case .contact: - return strings.Message_Contact + return (strings.Message_Contact, true) case let .game(text): - return text + return (text, true) case .location: - return strings.Message_Location + return (strings.Message_Location, true) case .liveLocation: - return strings.Message_LiveLocation + return (strings.Message_LiveLocation, true) } } diff --git a/TelegramUI/NativeVideoContent.swift b/TelegramUI/NativeVideoContent.swift index 70c1c83518..a7b437e80e 100644 --- a/TelegramUI/NativeVideoContent.swift +++ b/TelegramUI/NativeVideoContent.swift @@ -6,13 +6,13 @@ import Postbox import TelegramCore enum NativeVideoContentId: Hashable { - case message(MessageId, MediaId) + case message(MessageId, UInt32, MediaId) case instantPage(MediaId, MediaId) static func ==(lhs: NativeVideoContentId, rhs: NativeVideoContentId) -> Bool { switch lhs { - case let .message(messageId, mediaId): - if case .message(messageId, mediaId) = rhs { + case let .message(messageId, stableId, mediaId): + if case .message(messageId, stableId, mediaId) = rhs { return true } else { return false @@ -28,7 +28,7 @@ enum NativeVideoContentId: Hashable { var hashValue: Int { switch self { - case let .message(messageId, mediaId): + case let .message(messageId, _, mediaId): return messageId.hashValue &* 31 &+ mediaId.hashValue case let .instantPage(pageId, mediaId): return pageId.hashValue &* 31 &+ mediaId.hashValue @@ -38,25 +38,42 @@ enum NativeVideoContentId: Hashable { final class NativeVideoContent: UniversalVideoContent { let id: AnyHashable + let nativeId: NativeVideoContentId let file: TelegramMediaFile let dimensions: CGSize let duration: Int32 let streamVideo: Bool let loopVideo: Bool let enableSound: Bool + let fetchAutomatically: Bool - init(id: NativeVideoContentId, file: TelegramMediaFile, streamVideo: Bool = false, loopVideo: Bool = false, enableSound: Bool = true) { + init(id: NativeVideoContentId, file: TelegramMediaFile, streamVideo: Bool = false, loopVideo: Bool = false, enableSound: Bool = true, fetchAutomatically: Bool = true) { self.id = id + self.nativeId = id self.file = file self.dimensions = file.dimensions ?? CGSize(width: 128.0, height: 128.0) self.duration = file.duration ?? 0 self.streamVideo = streamVideo self.loopVideo = loopVideo self.enableSound = enableSound + self.fetchAutomatically = fetchAutomatically } func makeContentNode(postbox: Postbox, audioSession: ManagedAudioSession) -> UniversalVideoContentNode & ASDisplayNode { - return NativeVideoContentNode(postbox: postbox, audioSessionManager: audioSession, file: self.file, streamVideo: self.streamVideo, loopVideo: self.loopVideo, enableSound: self.enableSound) + return NativeVideoContentNode(postbox: postbox, audioSessionManager: audioSession, file: self.file, streamVideo: self.streamVideo, loopVideo: self.loopVideo, enableSound: self.enableSound, fetchAutomatically: self.fetchAutomatically) + } + + func isEqual(to other: UniversalVideoContent) -> Bool { + if let other = other as? NativeVideoContent { + if case let .message(_, stableId, _) = self.nativeId { + if case .message(_, stableId, _) = other.nativeId { + if self.file.isInstantVideo { + return true + } + } + } + } + return false } } @@ -91,13 +108,13 @@ private final class NativeVideoContentNode: ASDisplayNode, UniversalVideoContent private var validLayout: CGSize? - init(postbox: Postbox, audioSessionManager: ManagedAudioSession, file: TelegramMediaFile, streamVideo: Bool, loopVideo: Bool, enableSound: Bool) { + init(postbox: Postbox, audioSessionManager: ManagedAudioSession, file: TelegramMediaFile, streamVideo: Bool, loopVideo: Bool, enableSound: Bool, fetchAutomatically: Bool) { self.postbox = postbox self.file = file self.imageNode = TransformImageNode() - self.player = MediaPlayer(audioSessionManager: audioSessionManager, postbox: postbox, resource: file.resource, streamable: streamVideo, video: true, preferSoftwareDecoding: false, playAutomatically: false, enableSound: enableSound) + self.player = MediaPlayer(audioSessionManager: audioSessionManager, postbox: postbox, resource: file.resource, streamable: streamVideo, video: true, preferSoftwareDecoding: false, playAutomatically: false, enableSound: enableSound, fetchAutomatically: fetchAutomatically) var actionAtEndImpl: (() -> Void)? if enableSound && !loopVideo { self.player.actionAtEnd = .action({ @@ -170,7 +187,7 @@ private final class NativeVideoContentNode: ASDisplayNode, UniversalVideoContent if let dimensions = self.dimensions { let imageSize = CGSize(width: floor(dimensions.width / 2.0), height: floor(dimensions.height / 2.0)) let makeLayout = self.imageNode.asyncLayout() - let applyLayout = makeLayout(TransformImageArguments(corners: ImageCorners(), imageSize: imageSize, boundingSize: imageSize, intrinsicInsets: UIEdgeInsets())) + let applyLayout = makeLayout(TransformImageArguments(corners: ImageCorners(), imageSize: imageSize, boundingSize: imageSize, intrinsicInsets: UIEdgeInsets(), emptyColor: self.file.isInstantVideo ? .clear : .white)) applyLayout() } diff --git a/TelegramUI/NavigateToChatController.swift b/TelegramUI/NavigateToChatController.swift index 8a907be836..57204e4c11 100644 --- a/TelegramUI/NavigateToChatController.swift +++ b/TelegramUI/NavigateToChatController.swift @@ -3,7 +3,7 @@ import Display import TelegramCore import Postbox -public func navigateToChatController(navigationController: NavigationController, account: Account, chatLocation: ChatLocation, messageId: MessageId? = nil, animated: Bool = true) { +public func navigateToChatController(navigationController: NavigationController, chatController: ChatController? = nil, account: Account, chatLocation: ChatLocation, messageId: MessageId? = nil, botStart: ChatControllerInitialBotStart? = nil, animated: Bool = true) { var found = false var isFirst = true for controller in navigationController.viewControllers.reversed() { @@ -24,7 +24,13 @@ public func navigateToChatController(navigationController: NavigationController, } if !found { - navigationController.pushViewController(ChatController(account: account, chatLocation: chatLocation, messageId: messageId)) + let controller: ChatController + if let chatController = chatController { + controller = chatController + } else { + controller = ChatController(account: account, chatLocation: chatLocation, messageId: messageId, botStart: botStart) + } + navigationController.replaceAllButRootController(controller, animated: animated) } } diff --git a/TelegramUI/NetworkStatusTitleView.swift b/TelegramUI/NetworkStatusTitleView.swift index 5c6276097b..ef63f53099 100644 --- a/TelegramUI/NetworkStatusTitleView.swift +++ b/TelegramUI/NetworkStatusTitleView.swift @@ -12,27 +12,20 @@ struct NetworkStatusTitle: Equatable { } } -final class NetworkStatusTitleView: UIView, NavigationBarTitleTransitionNode { +final class NetworkStatusTitleView: UIView, NavigationBarTitleView, NavigationBarTitleTransitionNode { private let titleNode: ASTextNode private let lockView: ChatListTitleLockView private let activityIndicator: ActivityIndicator private let buttonView: HighlightTrackingButton + private var validLayout: (CGSize, CGRect)? + var title: NetworkStatusTitle = NetworkStatusTitle(text: "", activity: false) { didSet { if self.title != oldValue { self.titleNode.attributedText = NSAttributedString(string: title.text, font: Font.bold(17.0), textColor: self.theme.rootController.navigationBar.primaryTextColor) - if self.title.activity != oldValue.activity { - if self.title.activity { - if self.activityIndicator.layer.superlayer == nil { - self.addSubnode(self.activityIndicator) - } - } else { - if self.activityIndicator.layer.superlayer != nil { - self.activityIndicator.removeFromSupernode() - } - } - } + self.activityIndicator.isHidden = !self.title.activity + self.setNeedsLayout() } } @@ -77,13 +70,14 @@ final class NetworkStatusTitleView: UIView, NavigationBarTitleTransitionNode { super.init(frame: CGRect()) + self.addSubnode(self.activityIndicator) self.addSubview(self.buttonView) self.addSubnode(self.titleNode) self.addSubview(self.lockView) self.buttonView.highligthedChanged = { [weak self] highlighted in if let strongSelf = self { - if highlighted && (strongSelf.activityIndicator.isHidden || strongSelf.activityIndicator.layer.superlayer == nil) { + if highlighted && !strongSelf.lockView.isHidden && strongSelf.activityIndicator.isHidden { strongSelf.titleNode.layer.removeAnimation(forKey: "opacity") strongSelf.lockView.layer.removeAnimation(forKey: "opacity") strongSelf.titleNode.alpha = 0.4 @@ -107,12 +101,18 @@ final class NetworkStatusTitleView: UIView, NavigationBarTitleTransitionNode { override func layoutSubviews() { super.layoutSubviews() - let size = self.bounds.size + if let (size, clearBounds) = self.validLayout { + self.updateLayout(size: size, clearBounds: clearBounds, transition: .immediate) + } + } + + func updateLayout(size: CGSize, clearBounds: CGRect, transition: ContainedViewLayoutTransition) { + self.validLayout = (size, clearBounds) var indicatorPadding: CGFloat = 0.0 let indicatorSize = self.activityIndicator.bounds.size - if self.activityIndicator.layer.superlayer != nil { + if !self.activityIndicator.isHidden { indicatorPadding = indicatorSize.width + 6.0 } @@ -127,9 +127,7 @@ final class NetworkStatusTitleView: UIView, NavigationBarTitleTransitionNode { self.lockView.frame = CGRect(x: titleFrame.maxX + 6.0, y: titleFrame.minY + 4.0, width: 2.0, height: 2.0) - if self.activityIndicator.layer.superlayer != nil { - self.activityIndicator.frame = CGRect(origin: CGPoint(x: titleFrame.minX - indicatorSize.width - 6.0, y: titleFrame.minY - 1.0), size: indicatorSize) - } + self.activityIndicator.frame = CGRect(origin: CGPoint(x: titleFrame.minX - indicatorSize.width - 6.0, y: titleFrame.minY - 1.0), size: indicatorSize) } func updatePasscode(isPasscodeSet: Bool, isManuallyLocked: Bool) { @@ -163,4 +161,7 @@ final class NetworkStatusTitleView: UIView, NavigationBarTitleTransitionNode { return view }, didLoad: nil) } + + func animateLayoutTransition() { + } } diff --git a/TelegramUI/NotificationContainerController.swift b/TelegramUI/NotificationContainerController.swift index cfb6b6ff0c..6db4142111 100644 --- a/TelegramUI/NotificationContainerController.swift +++ b/TelegramUI/NotificationContainerController.swift @@ -15,7 +15,7 @@ public final class NotificationContainerController: ViewController { public init(account: Account) { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) self.statusBar.statusBarStyle = .Ignore diff --git a/TelegramUI/NotificationItemContainerNode.swift b/TelegramUI/NotificationItemContainerNode.swift index 069ab5f2f8..497f87e53f 100644 --- a/TelegramUI/NotificationItemContainerNode.swift +++ b/TelegramUI/NotificationItemContainerNode.swift @@ -88,12 +88,15 @@ final class NotificationItemContainerNode: ASDisplayNode { if let statusBarHeight = layout.statusBarHeight, CGFloat(44.0).isLessThanOrEqualTo(statusBarHeight) { contentInsets.top += 34.0 } - let contentWidth = layout.size.width - contentInsets.left - contentInsets.right + + let containerWidth = horizontalContainerFillingSizeForLayout(layout: layout, sideInset: layout.safeInsets.left) + + let contentWidth = containerWidth - 8.0 * 2.0 let contentHeight = contentNode.updateLayout(width: contentWidth, transition: transition) - transition.updateFrame(node: self.backgroundNode, frame: CGRect(origin: CGPoint(x: layout.safeInsets.left, y: contentInsets.top - 8.0), size: CGSize(width: layout.size.width - layout.safeInsets.left - layout.safeInsets.right, height: 8.0 + contentHeight + 20.0))) + transition.updateFrame(node: self.backgroundNode, frame: CGRect(origin: CGPoint(x: floor((layout.size.width - containerWidth - 8.0 * 2.0) / 2.0), y: contentInsets.top - 16.0), size: CGSize(width: containerWidth + 8.0 * 2.0, height: 8.0 + contentHeight + 20.0 + 8.0))) - transition.updateFrame(node: contentNode, frame: CGRect(origin: CGPoint(x: contentInsets.left, y: contentInsets.top), size: CGSize(width: contentWidth, height: contentHeight))) + transition.updateFrame(node: contentNode, frame: CGRect(origin: CGPoint(x: floor((layout.size.width - contentWidth) / 2.0), y: contentInsets.top), size: CGSize(width: contentWidth, height: contentHeight))) } } diff --git a/TelegramUI/NumericFormat.swift b/TelegramUI/NumericFormat.swift index 3181608f0d..62f92eb7f8 100644 --- a/TelegramUI/NumericFormat.swift +++ b/TelegramUI/NumericFormat.swift @@ -67,3 +67,11 @@ func unmuteIntervalString(strings: PresentationStrings, value: Int32) -> String return strings.MuteExpires_Days(max(1, value / (60 * 60 * 24))) } } + +func callDurationString(strings: PresentationStrings, value: Int32) -> String { + if value < 60 { + return strings.Call_Seconds(max(1, value)) + } else { + return strings.Call_Minutes(max(1, value / 60)) + } +} diff --git a/TelegramUI/OpenChatMessage.swift b/TelegramUI/OpenChatMessage.swift index ddf4120b1e..b57f94c97b 100644 --- a/TelegramUI/OpenChatMessage.swift +++ b/TelegramUI/OpenChatMessage.swift @@ -13,6 +13,7 @@ private enum ChatMessageGalleryControllerData { case map(TelegramMediaMap) case stickerPack(StickerPackReference) case audio(TelegramMediaFile) + case document(TelegramMediaFile) case gallery(GalleryController) case secretGallery(SecretMediaPreviewController) case other(Media) @@ -87,6 +88,22 @@ private func chatMessageGalleryControllerData(account: Account, message: Message } else if let file = galleryMedia as? TelegramMediaFile, file.mimeType == "application/vnd.apple.pkpass" || (file.fileName != nil && file.fileName!.lowercased().hasSuffix(".pkpass")) { return .pass(file) } else { + if let file = galleryMedia as? TelegramMediaFile { + if file.mimeType.hasPrefix("audio/") { + return .audio(file) + } + if let fileName = file.fileName { + let ext = (fileName as NSString).pathExtension.lowercased() + if ext == "wav" || ext == "opus" { + return .audio(file) + } + } + + if !file.isVideo, !internalDocumentItemSupportsMimeType(file.mimeType, fileName: file.fileName) { + return .document(file) + } + } + if message.containsSecretMedia { let gallery = SecretMediaPreviewController(account: account, messageId: message.id) return .secretGallery(gallery) @@ -143,6 +160,8 @@ func openChatMessage(account: Account, message: Message, standalone: Bool, rever if error == nil { let controller = PKAddPassesViewController(pass: pass) if let window = navigationController.view.window { + controller.popoverPresentationController?.sourceView = window + controller.popoverPresentationController?.sourceRect = CGRect(origin: CGPoint(x: window.bounds.width / 2.0, y: window.bounds.size.height - 1.0), size: CGSize(width: 1.0, height: 1.0)) window.rootViewController?.present(controller, animated: true) } } @@ -181,6 +200,9 @@ func openChatMessage(account: Account, message: Message, standalone: Bool, rever dismissInput() present(controller, nil) return true + case .document: + present(ShareController(account: account, subject: .messages([message]), saveToCameraRoll: false, showInChat: nil, externalShare: true, immediateExternalShare: true), nil) + return true case let .audio(file): let location: PeerMessagesPlaylistLocation let playerType: MediaManagerPlayerType diff --git a/TelegramUI/OverlayMediaController.swift b/TelegramUI/OverlayMediaController.swift index 91966309bd..82b802a014 100644 --- a/TelegramUI/OverlayMediaController.swift +++ b/TelegramUI/OverlayMediaController.swift @@ -10,7 +10,7 @@ public final class OverlayMediaController: ViewController { } public init() { - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) self.statusBar.statusBarStyle = .Ignore } diff --git a/TelegramUI/OverlayMediaControllerNode.swift b/TelegramUI/OverlayMediaControllerNode.swift index b28ed1b0c1..5386502918 100644 --- a/TelegramUI/OverlayMediaControllerNode.swift +++ b/TelegramUI/OverlayMediaControllerNode.swift @@ -262,6 +262,11 @@ final class OverlayMediaControllerNode: ASDisplayNode, UIGestureRecognizerDelega node.updateLayout(nodeSize) self.containerLayoutUpdated(validLayout, transition: .immediate) + + if !customTransition { + let positionX = CGRect(origin: self.nodePosition(layout: validLayout, size: nodeSize, location: location, hidden: true, isMinimized: false, tempExtendedTopInset: node.tempExtendedTopInset), size: nodeSize).center.x + node.layer.animatePosition(from: CGPoint(x: positionX - node.layer.position.x, y: 0.0), to: CGPoint(), duration: 0.3, timingFunction: kCAMediaTimingFunctionSpring, additive: true) + } } node.hasAttachedContextUpdated = { [weak self] _ in if let strongSelf = self, let validLayout = strongSelf.validLayout, !customTransition { diff --git a/TelegramUI/OverlayPlayerController.swift b/TelegramUI/OverlayPlayerController.swift index 1cdf89d566..3e27cc3742 100644 --- a/TelegramUI/OverlayPlayerController.swift +++ b/TelegramUI/OverlayPlayerController.swift @@ -27,7 +27,7 @@ final class OverlayPlayerController: ViewController { self.initialOrder = initialOrder self.parentNavigationController = parentNavigationController - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) self.statusBar.statusBarStyle = .Ignore diff --git a/TelegramUI/PeerMediaCollectionController.swift b/TelegramUI/PeerMediaCollectionController.swift index 310b2fbf33..2696c7f0fe 100644 --- a/TelegramUI/PeerMediaCollectionController.swift +++ b/TelegramUI/PeerMediaCollectionController.swift @@ -44,7 +44,7 @@ public class PeerMediaCollectionController: TelegramController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } self.interfaceState = PeerMediaCollectionInterfaceState(theme: self.presentationData.theme, strings: self.presentationData.strings) - super.init(account: account, navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme).withUpdatedSeparatorColor(self.presentationData.theme.rootController.navigationBar.backgroundColor), enableMediaAccessoryPanel: true, locationBroadcastPanelSource: .none) + super.init(account: account, navigationBarPresentationData: NavigationBarPresentationData(theme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme).withUpdatedSeparatorColor(self.presentationData.theme.rootController.navigationBar.backgroundColor), strings: NavigationBarStrings(presentationStrings: self.presentationData.strings)), enableMediaAccessoryPanel: true, locationBroadcastPanelSource: .none) self.title = self.presentationData.strings.SharedMedia_TitleAll diff --git a/TelegramUI/PeerMediaCollectionControllerNode.swift b/TelegramUI/PeerMediaCollectionControllerNode.swift index eec6bd6ba4..e8d2354315 100644 --- a/TelegramUI/PeerMediaCollectionControllerNode.swift +++ b/TelegramUI/PeerMediaCollectionControllerNode.swift @@ -214,7 +214,7 @@ class PeerMediaCollectionControllerNode: ASDisplayNode { if let selectionPanel = self.selectionPanel { selectionPanel.selectedMessages = selectionState.selectedIds - let panelHeight = selectionPanel.updateLayout(width: layout.size.width, leftInset: layout.safeInsets.left, rightInset: layout.safeInsets.right, maxHeight: 0.0, transition: transition, interfaceState: interfaceState) + let panelHeight = selectionPanel.updateLayout(width: layout.size.width, leftInset: layout.safeInsets.left, rightInset: layout.safeInsets.right, maxHeight: 0.0, transition: transition, interfaceState: interfaceState, metrics: layout.metrics) transition.updateFrame(node: selectionPanel, frame: CGRect(origin: CGPoint(x: 0.0, y: layout.size.height - insets.bottom - panelHeight), size: CGSize(width: layout.size.width, height: panelHeight))) if let selectionPanelSeparatorNode = self.selectionPanelSeparatorNode { transition.updateFrame(node: selectionPanelSeparatorNode, frame: CGRect(origin: CGPoint(x: 0.0, y: layout.size.height - insets.bottom - panelHeight), size: CGSize(width: layout.size.width, height: UIScreenPixel))) @@ -234,7 +234,7 @@ class PeerMediaCollectionControllerNode: ASDisplayNode { selectionPanel.backgroundColor = self.presentationData.theme.chat.inputPanel.panelBackgroundColor selectionPanel.interfaceInteraction = self.interfaceInteraction selectionPanel.selectedMessages = selectionState.selectedIds - let panelHeight = selectionPanel.updateLayout(width: layout.size.width, leftInset: layout.safeInsets.left, rightInset: layout.safeInsets.right, maxHeight: 0.0, transition: .immediate, interfaceState: interfaceState) + let panelHeight = selectionPanel.updateLayout(width: layout.size.width, leftInset: layout.safeInsets.left, rightInset: layout.safeInsets.right, maxHeight: 0.0, transition: .immediate, interfaceState: interfaceState, metrics: layout.metrics) self.selectionPanel = selectionPanel self.addSubnode(selectionPanel) diff --git a/TelegramUI/PeerMessagesMediaPlaylist.swift b/TelegramUI/PeerMessagesMediaPlaylist.swift index 02695959c5..a40695aac7 100644 --- a/TelegramUI/PeerMessagesMediaPlaylist.swift +++ b/TelegramUI/PeerMessagesMediaPlaylist.swift @@ -74,6 +74,15 @@ final class MessageMediaPlaylistItem: SharedMediaPlaylistItem { break } } + if file.mimeType.hasPrefix("audio/") { + return SharedMediaPlaybackData(type: .music, source: .telegramFile(file)) + } + if let fileName = file.fileName { + let ext = (fileName as NSString).pathExtension.lowercased() + if ext == "wav" || ext == "opus" { + return SharedMediaPlaybackData(type: .music, source: .telegramFile(file)) + } + } } } return nil @@ -105,6 +114,8 @@ final class MessageMediaPlaylistItem: SharedMediaPlaylistItem { break } } + + return SharedMediaPlaybackDisplayData.music(title: file.fileName ?? "", performer: self.message.author?.displayTitle ?? "", albumArt: nil) } } return nil diff --git a/TelegramUI/PeerSelectionController.swift b/TelegramUI/PeerSelectionController.swift index df9b01d067..3cbe60e02e 100644 --- a/TelegramUI/PeerSelectionController.swift +++ b/TelegramUI/PeerSelectionController.swift @@ -28,7 +28,7 @@ public final class PeerSelectionController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style self.title = self.presentationData.strings.Conversation_ForwardTitle diff --git a/TelegramUI/PhotoResources.swift b/TelegramUI/PhotoResources.swift index a576946278..2dbcc94359 100644 --- a/TelegramUI/PhotoResources.swift +++ b/TelegramUI/PhotoResources.swift @@ -135,7 +135,8 @@ private let thumbnailGenerationMimeTypes: Set = Set([ "image/jpeg", "image/jpg", "image/png", - "image/gif" + "image/gif", + "image/heic" ]) private func chatMessageImageFileThumbnailDatas(account: Account, file: TelegramMediaFile, pathExtension: String? = nil, progressive: Bool = false) -> Signal<(Data?, String?, Bool), NoError> { @@ -566,11 +567,13 @@ func chatMessagePhoto(postbox: Postbox, photo: TelegramMediaImage) -> Signal<(Tr let fittedRect = CGRect(origin: CGPoint(x: drawingRect.origin.x + (drawingRect.size.width - fittedSize.width) / 2.0, y: drawingRect.origin.y + (drawingRect.size.height - fittedSize.height) / 2.0), size: fittedSize) var fullSizeImage: CGImage? + var imageOrientation: UIImageOrientation = .up if let fullSizeData = fullSizeData { if fullSizeComplete { let options = NSMutableDictionary() options[kCGImageSourceShouldCache as NSString] = false as NSNumber if let imageSource = CGImageSourceCreateWithData(fullSizeData as CFData, nil), let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image } } else { @@ -580,6 +583,7 @@ func chatMessagePhoto(postbox: Postbox, photo: TelegramMediaImage) -> Signal<(Tr let options = NSMutableDictionary() options[kCGImageSourceShouldCache as NSString] = false as NSNumber if let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image } } @@ -641,13 +645,13 @@ func chatMessagePhoto(postbox: Postbox, photo: TelegramMediaImage) -> Signal<(Tr c.setBlendMode(.copy) if let blurredThumbnailImage = blurredThumbnailImage, let cgImage = blurredThumbnailImage.cgImage { c.interpolationQuality = .low - c.draw(cgImage, in: fittedRect) + drawImage(context: c, image: cgImage, orientation: imageOrientation, in: fittedRect) c.setBlendMode(.normal) } if let fullSizeImage = fullSizeImage { c.interpolationQuality = .medium - c.draw(fullSizeImage, in: fittedRect) + drawImage(context: c, image: fullSizeImage, orientation: imageOrientation, in: fittedRect) } } } @@ -723,11 +727,13 @@ func chatMessagePhotoThumbnail(account: Account, photo: TelegramMediaImage) -> S let fittedRect = CGRect(origin: CGPoint(x: drawingRect.origin.x + (drawingRect.size.width - fittedSize.width) / 2.0, y: drawingRect.origin.y + (drawingRect.size.height - fittedSize.height) / 2.0), size: fittedSize) var fullSizeImage: CGImage? + var imageOrientation: UIImageOrientation = .up if let fullSizeData = fullSizeData { if fullSizeComplete { let options = NSMutableDictionary() options[kCGImageSourceShouldCache as NSString] = false as NSNumber if let imageSource = CGImageSourceCreateWithData(fullSizeData as CFData, nil), let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image } } else { @@ -737,6 +743,7 @@ func chatMessagePhotoThumbnail(account: Account, photo: TelegramMediaImage) -> S let options = NSMutableDictionary() options[kCGImageSourceShouldCache as NSString] = false as NSNumber if let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image } } @@ -771,13 +778,13 @@ func chatMessagePhotoThumbnail(account: Account, photo: TelegramMediaImage) -> S c.setBlendMode(.copy) if let blurredThumbnailImage = blurredThumbnailImage, let cgImage = blurredThumbnailImage.cgImage { c.interpolationQuality = .low - c.draw(cgImage, in: fittedRect) + drawImage(context: c, image: cgImage, orientation: imageOrientation, in: fittedRect) c.setBlendMode(.normal) } if let fullSizeImage = fullSizeImage { c.interpolationQuality = .medium - c.draw(fullSizeImage, in: fittedRect) + drawImage(context: c, image: fullSizeImage, orientation: imageOrientation, in: fittedRect) } } @@ -807,11 +814,13 @@ func chatMessageVideoThumbnail(account: Account, file: TelegramMediaFile) -> Sig let fittedRect = CGRect(origin: CGPoint(x: drawingRect.origin.x + (drawingRect.size.width - fittedSize.width) / 2.0, y: drawingRect.origin.y + (drawingRect.size.height - fittedSize.height) / 2.0), size: fittedSize) var fullSizeImage: CGImage? + var imageOrientation: UIImageOrientation = .up if let fullSizeData = fullSizeData?.0 { if fullSizeComplete { let options = NSMutableDictionary() options[kCGImageSourceShouldCache as NSString] = false as NSNumber if let imageSource = CGImageSourceCreateWithData(fullSizeData as CFData, nil), let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image } } else { @@ -821,6 +830,7 @@ func chatMessageVideoThumbnail(account: Account, file: TelegramMediaFile) -> Sig let options = NSMutableDictionary() options[kCGImageSourceShouldCache as NSString] = false as NSNumber if let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image } } @@ -855,13 +865,13 @@ func chatMessageVideoThumbnail(account: Account, file: TelegramMediaFile) -> Sig c.setBlendMode(.copy) if let blurredThumbnailImage = blurredThumbnailImage, let cgImage = blurredThumbnailImage.cgImage { c.interpolationQuality = .low - c.draw(cgImage, in: fittedRect) + drawImage(context: c, image: cgImage, orientation: imageOrientation, in: fittedRect) c.setBlendMode(.normal) } if let fullSizeImage = fullSizeImage { c.interpolationQuality = .medium - c.draw(fullSizeImage, in: fittedRect) + drawImage(context: c, image: fullSizeImage, orientation: imageOrientation, in: fittedRect) } } @@ -918,16 +928,7 @@ func chatSecretPhoto(account: Account, photo: TelegramMediaImage) -> Signal<(Tra blurredImage = thumbnailContext2.generateImage() } - }/* else { - let imageSource = CGImageSourceCreateIncremental(nil) - CGImageSourceUpdateData(imageSource, fullSizeData as CFData, fullSizeComplete) - - let options = NSMutableDictionary() - options[kCGImageSourceShouldCache as NSString] = false as NSNumber - if let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { - fullSizeImage = image - } - }*/ + } } if blurredImage == nil { @@ -965,7 +966,7 @@ func chatSecretPhoto(account: Account, photo: TelegramMediaImage) -> Signal<(Tra c.setBlendMode(.copy) if let blurredImage = blurredImage, let cgImage = blurredImage.cgImage { c.interpolationQuality = .low - c.draw(cgImage, in: fittedRect) + drawImage(context: c, image: cgImage, orientation: .up, in: fittedRect) } if !arguments.insets.left.isEqual(to: 0.0) { @@ -996,12 +997,14 @@ func mediaGridMessagePhoto(account: Account, photo: TelegramMediaImage) -> Signa let fittedRect = CGRect(origin: CGPoint(x: drawingRect.origin.x + (drawingRect.size.width - fittedSize.width) / 2.0, y: drawingRect.origin.y + (drawingRect.size.height - fittedSize.height) / 2.0), size: fittedSize) var fullSizeImage: CGImage? + var imageOrientation: UIImageOrientation = .up if let fullSizeData = fullSizeData { if fullSizeComplete { let options = NSMutableDictionary() options.setValue(max(fittedSize.width * context.scale, fittedSize.height * context.scale) as NSNumber, forKey: kCGImageSourceThumbnailMaxPixelSize as String) options.setValue(true as NSNumber, forKey: kCGImageSourceCreateThumbnailFromImageAlways as String) if let imageSource = CGImageSourceCreateWithData(fullSizeData as CFData, nil), let image = CGImageSourceCreateThumbnailAtIndex(imageSource, 0, options as CFDictionary) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image } } else { @@ -1011,6 +1014,7 @@ func mediaGridMessagePhoto(account: Account, photo: TelegramMediaImage) -> Signa let options = NSMutableDictionary() options[kCGImageSourceShouldCache as NSString] = false as NSNumber if let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image } } @@ -1044,13 +1048,13 @@ func mediaGridMessagePhoto(account: Account, photo: TelegramMediaImage) -> Signa c.setBlendMode(.copy) if let blurredThumbnailImage = blurredThumbnailImage, let cgImage = blurredThumbnailImage.cgImage { c.interpolationQuality = .low - c.draw(cgImage, in: fittedRect) + drawImage(context: c, image: cgImage, orientation: imageOrientation, in: fittedRect) c.setBlendMode(.normal) } if let fullSizeImage = fullSizeImage { c.interpolationQuality = .medium - c.draw(fullSizeImage, in: fittedRect) + drawImage(context: c, image: fullSizeImage, orientation: imageOrientation, in: fittedRect) } } @@ -1115,7 +1119,7 @@ func gifPaneVideoThumbnail(account: Account, video: TelegramMediaFile) -> Signal c.setBlendMode(.copy) if let blurredThumbnailImage = blurredThumbnailImage, let cgImage = blurredThumbnailImage.cgImage { c.interpolationQuality = .low - c.draw(cgImage, in: fittedRect) + drawImage(context: c, image: cgImage, orientation: .up, in: fittedRect) c.setBlendMode(.normal) } } @@ -1169,12 +1173,12 @@ func internalMediaGridMessageVideo(postbox: Postbox, video: TelegramMediaFile) - let fittedRect = CGRect(origin: CGPoint(x: drawingRect.origin.x + (drawingRect.size.width - fittedSize.width) / 2.0, y: drawingRect.origin.y + (drawingRect.size.height - fittedSize.height) / 2.0), size: fittedSize) var fullSizeImage: CGImage? + var imageOrientation: UIImageOrientation = .up if let fullSizeData = fullSizeData { if fullSizeComplete { let options = NSMutableDictionary() - //options.setValue(max(fittedSize.width * context.scale, fittedSize.height * context.scale) as NSNumber, forKey: kCGImageSourceThumbnailMaxPixelSize as String) - //options.setValue(true as NSNumber, forKey: kCGImageSourceCreateThumbnailFromImageAlways as String) if let imageSource = CGImageSourceCreateWithData(fullSizeData.0 as CFData, nil), let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image } } else { @@ -1184,6 +1188,7 @@ func internalMediaGridMessageVideo(postbox: Postbox, video: TelegramMediaFile) - let options = NSMutableDictionary() options[kCGImageSourceShouldCache as NSString] = false as NSNumber if let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image } } @@ -1247,13 +1252,13 @@ func internalMediaGridMessageVideo(postbox: Postbox, video: TelegramMediaFile) - c.setBlendMode(.copy) if let blurredThumbnailImage = blurredThumbnailImage, let cgImage = blurredThumbnailImage.cgImage { c.interpolationQuality = .low - c.draw(cgImage, in: fittedRect) + drawImage(context: c, image: cgImage, orientation: imageOrientation, in: fittedRect) c.setBlendMode(.normal) } if let fullSizeImage = fullSizeImage { c.interpolationQuality = .medium - c.draw(fullSizeImage, in: fittedRect) + drawImage(context: c, image: fullSizeImage, orientation: imageOrientation, in: fittedRect) } } @@ -1323,9 +1328,11 @@ func chatWebpageSnippetPhoto(account: Account, photo: TelegramMediaImage) -> Sig return signal |> map { fullSizeData in return { arguments in var fullSizeImage: CGImage? + var imageOrientation: UIImageOrientation = .up if let fullSizeData = fullSizeData { let options = NSMutableDictionary() if let imageSource = CGImageSourceCreateWithData(fullSizeData as CFData, nil), let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image } } @@ -1345,7 +1352,7 @@ func chatWebpageSnippetPhoto(account: Account, photo: TelegramMediaImage) -> Sig } c.interpolationQuality = .medium - c.draw(fullSizeImage, in: fittedRect) + drawImage(context: c, image: fullSizeImage, orientation: imageOrientation, in: fittedRect) } addCorners(context, arguments: arguments) @@ -1399,41 +1406,6 @@ func chatSecretMessageVideo(account: Account, video: TelegramMediaFile) -> Signa let fittedSize = arguments.imageSize.aspectFilled(arguments.boundingSize).fitted(arguments.imageSize) let fittedRect = CGRect(origin: CGPoint(x: drawingRect.origin.x + (drawingRect.size.width - fittedSize.width) / 2.0, y: drawingRect.origin.y + (drawingRect.size.height - fittedSize.height) / 2.0), size: fittedSize) - /*var fullSizeImage: CGImage? - if let fullSizeDataAndPath = fullSizeDataAndPath { - if fullSizeComplete { - if video.mimeType.hasPrefix("video/") { - let tempFilePath = NSTemporaryDirectory() + "\(arc4random()).mov" - - _ = try? FileManager.default.removeItem(atPath: tempFilePath) - _ = try? FileManager.default.linkItem(atPath: fullSizeDataAndPath.1, toPath: tempFilePath) - - let asset = AVAsset(url: URL(fileURLWithPath: tempFilePath)) - let imageGenerator = AVAssetImageGenerator(asset: asset) - imageGenerator.maximumSize = CGSize(width: 800.0, height: 800.0) - imageGenerator.appliesPreferredTrackTransform = true - if let image = try? imageGenerator.copyCGImage(at: CMTime(seconds: 0.0, preferredTimescale: asset.duration.timescale), actualTime: nil) { - fullSizeImage = image - } - } - /*let options: [NSString: NSObject] = [ - kCGImageSourceThumbnailMaxPixelSize: max(fittedSize.width * context.scale, fittedSize.height * context.scale), - kCGImageSourceCreateThumbnailFromImageAlways: true - ] - if let imageSource = CGImageSourceCreateWithData(fullSizeData, nil), image = CGImageSourceCreateThumbnailAtIndex(imageSource, 0, options) { - fullSizeImage = image - }*/ - } else { - /*let imageSource = CGImageSourceCreateIncremental(nil) - CGImageSourceUpdateData(imageSource, fullSizeData as CFDataRef, fullSizeData.length >= fullTotalSize) - - var options: [NSString : NSObject!] = [:] - options[kCGImageSourceShouldCache as NSString] = false as NSNumber - if let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionaryRef) { - fullSizeImage = image - }*/ - } - }*/ var blurredImage: UIImage? if blurredImage == nil { @@ -1464,14 +1436,13 @@ func chatSecretMessageVideo(account: Account, video: TelegramMediaFile) -> Signa context.withFlippedContext { c in c.setBlendMode(.copy) if arguments.imageSize.width < arguments.boundingSize.width || arguments.imageSize.height < arguments.boundingSize.height { - //c.setFillColor(UIColor(white: 0.0, alpha: 0.4).cgColor) c.fill(arguments.drawingRect) } c.setBlendMode(.copy) if let blurredImage = blurredImage, let cgImage = blurredImage.cgImage { c.interpolationQuality = .low - c.draw(cgImage, in: fittedRect) + drawImage(context: c, image: cgImage, orientation: .up, in: fittedRect) } if !arguments.insets.left.isEqual(to: 0.0) { @@ -1489,6 +1460,40 @@ func chatSecretMessageVideo(account: Account, video: TelegramMediaFile) -> Signa } } +private func imageOrientationFromSource(_ source: CGImageSource) -> UIImageOrientation { + if let properties = CGImageSourceCopyPropertiesAtIndex(source, 0, nil) { + let dict = properties as NSDictionary + if let value = dict.object(forKey: "Orientation") as? NSNumber { + return UIImageOrientation(rawValue: value.intValue) ?? .up + } + } + + return .up +} + +private func drawImage(context: CGContext, image: CGImage, orientation: UIImageOrientation, in rect: CGRect) { + var restore = true + var drawRect = rect + switch orientation { + case .leftMirrored: + context.saveGState() + context.translateBy(x: rect.midX, y: rect.midY) + context.rotate(by: -CGFloat.pi / 2.0) + context.translateBy(x: -rect.midX, y: -rect.midY) + var t = CGAffineTransform(translationX: rect.midX, y: rect.midY) + t = t.rotated(by: -CGFloat.pi / 2.0) + t = t.translatedBy(x: -rect.midX, y: -rect.midY) + + drawRect = rect.applying(t) + default: + restore = false + } + context.draw(image, in: drawRect) + if restore { + context.restoreGState() + } +} + func chatMessageImageFile(account: Account, file: TelegramMediaFile, thumbnail: Bool) -> Signal<(TransformImageArguments) -> DrawingContext?, NoError> { let signal: Signal<(Data?, String?, Bool), NoError> if thumbnail { @@ -1511,12 +1516,14 @@ func chatMessageImageFile(account: Account, file: TelegramMediaFile, thumbnail: } var fullSizeImage: CGImage? + var imageOrientation: UIImageOrientation = .up if let fullSizePath = fullSizePath { if fullSizeComplete { let options = NSMutableDictionary() options.setValue(max(fittedSize.width * context.scale, fittedSize.height * context.scale) as NSNumber, forKey: kCGImageSourceThumbnailMaxPixelSize as String) options.setValue(true as NSNumber, forKey: kCGImageSourceCreateThumbnailFromImageAlways as String) if let imageSource = CGImageSourceCreateWithURL(URL(fileURLWithPath: fullSizePath) as CFURL, nil), let image = CGImageSourceCreateThumbnailAtIndex(imageSource, 0, options) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image if thumbnail { fittedSize = CGSize(width: CGFloat(image.width), height: CGFloat(image.height)).aspectFilled(arguments.boundingSize) @@ -1558,13 +1565,13 @@ func chatMessageImageFile(account: Account, file: TelegramMediaFile, thumbnail: c.setBlendMode(.copy) if let blurredThumbnailImage = blurredThumbnailImage, let cgImage = blurredThumbnailImage.cgImage { c.interpolationQuality = .low - c.draw(cgImage, in: fittedRect) + drawImage(context: c, image: cgImage, orientation: imageOrientation, in: fittedRect) } if let fullSizeImage = fullSizeImage { c.setBlendMode(.normal) c.interpolationQuality = .medium - c.draw(fullSizeImage, in: fittedRect) + drawImage(context: c, image: fullSizeImage, orientation: imageOrientation, in: fittedRect) } } @@ -1654,11 +1661,13 @@ func chatAvatarGalleryPhoto(account: Account, representations: [TelegramMediaIma let fittedRect = CGRect(origin: CGPoint(x: drawingRect.origin.x + (drawingRect.size.width - fittedSize.width) / 2.0, y: drawingRect.origin.y + (drawingRect.size.height - fittedSize.height) / 2.0), size: fittedSize) var fullSizeImage: CGImage? + var imageOrientation: UIImageOrientation = .up if let fullSizeData = fullSizeData { if fullSizeComplete { let options = NSMutableDictionary() options[kCGImageSourceShouldCache as NSString] = false as NSNumber if let imageSource = CGImageSourceCreateWithData(fullSizeData as CFData, nil), let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image } } else { @@ -1668,6 +1677,7 @@ func chatAvatarGalleryPhoto(account: Account, representations: [TelegramMediaIma let options = NSMutableDictionary() options[kCGImageSourceShouldCache as NSString] = false as NSNumber if let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image } } @@ -1695,20 +1705,19 @@ func chatAvatarGalleryPhoto(account: Account, representations: [TelegramMediaIma context.withFlippedContext { c in c.setBlendMode(.copy) if arguments.imageSize.width < arguments.boundingSize.width || arguments.imageSize.height < arguments.boundingSize.height { - //c.setFillColor(UIColor(white: 0.0, alpha: 0.4).cgColor) c.fill(arguments.drawingRect) } c.setBlendMode(.copy) if let blurredThumbnailImage = blurredThumbnailImage, let cgImage = blurredThumbnailImage.cgImage { c.interpolationQuality = .low - c.draw(cgImage, in: fittedRect) + drawImage(context: c, image: cgImage, orientation: imageOrientation, in: fittedRect) c.setBlendMode(.normal) } if let fullSizeImage = fullSizeImage { c.interpolationQuality = .medium - c.draw(fullSizeImage, in: fittedRect) + drawImage(context: c, image: fullSizeImage, orientation: imageOrientation, in: fittedRect) } } @@ -1750,7 +1759,7 @@ func settingsBuiltinWallpaperImage(account: Account) -> Signal<(TransformImageAr c.setBlendMode(.copy) if let fullSizeImage = fullSizeImage.cgImage { c.interpolationQuality = .medium - c.draw(fullSizeImage, in: fittedRect) + drawImage(context: c, image: fullSizeImage, orientation: .up, in: fittedRect) } } @@ -1787,10 +1796,12 @@ func chatMapSnapshotImage(account: Account, resource: MapSnapshotMediaResource) let context = DrawingContext(size: arguments.drawingSize, clear: true) var fullSizeImage: CGImage? + var imageOrientation: UIImageOrientation = .up if let fullSizeData = fullSizeData { let options = NSMutableDictionary() options[kCGImageSourceShouldCache as NSString] = false as NSNumber if let imageSource = CGImageSourceCreateWithData(fullSizeData as CFData, nil), let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image } @@ -1815,7 +1826,7 @@ func chatMapSnapshotImage(account: Account, resource: MapSnapshotMediaResource) c.setBlendMode(.copy) c.interpolationQuality = .medium - c.draw(fullSizeImage, in: fittedRect) + drawImage(context: c, image: fullSizeImage, orientation: imageOrientation, in: fittedRect) c.setBlendMode(.normal) @@ -1852,10 +1863,12 @@ func chatWebFileImage(account: Account, file: TelegramMediaWebFile) -> Signal<(T let context = DrawingContext(size: arguments.drawingSize, clear: true) var fullSizeImage: CGImage? + var imageOrientation: UIImageOrientation = .up if fullSizeData.complete { let options = NSMutableDictionary() options[kCGImageSourceShouldCache as NSString] = false as NSNumber if let imageSource = CGImageSourceCreateWithURL(URL(fileURLWithPath: fullSizeData.path) as CFURL, nil), let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + imageOrientation = imageOrientationFromSource(imageSource) fullSizeImage = image } @@ -1880,7 +1893,7 @@ func chatWebFileImage(account: Account, file: TelegramMediaWebFile) -> Signal<(T c.setBlendMode(.copy) c.interpolationQuality = .medium - c.draw(fullSizeImage, in: fittedRect) + drawImage(context: c, image: fullSizeImage, orientation: imageOrientation, in: fittedRect) c.setBlendMode(.normal) } diff --git a/TelegramUI/PresentationResourcesRootController.swift b/TelegramUI/PresentationResourcesRootController.swift index 4ceba9edaf..d80157dc92 100644 --- a/TelegramUI/PresentationResourcesRootController.swift +++ b/TelegramUI/PresentationResourcesRootController.swift @@ -167,12 +167,13 @@ struct PresentationResourcesRootController { static func inAppNotificationBackground(_ theme: PresentationTheme) -> UIImage? { return theme.image(PresentationResourceKey.inAppNotificationBackground.rawValue, { theme in - return generateImage(CGSize(width: 30.0 + 8.0 * 2.0, height: 30.0 + 8.0 + 20.0), rotatedContext: { size, context in + let inset: CGFloat = 16.0 + return generateImage(CGSize(width: 30.0 + inset * 2.0, height: 30.0 + 8.0 * 2.0 + 20.0), rotatedContext: { size, context in context.clear(CGRect(origin: CGPoint(), size: size)) context.setShadow(offset: CGSize(width: 0.0, height: -4.0), blur: 40.0, color: UIColor(white: 0.0, alpha: 0.3).cgColor) context.setFillColor(theme.inAppNotification.fillColor.cgColor) - context.fillEllipse(in: CGRect(origin: CGPoint(x: 8.0, y: 8.0), size: CGSize(width: 30.0, height: 30.0))) - })?.stretchableImage(withLeftCapWidth: 8 + 15, topCapHeight: 8 + 15) + context.fillEllipse(in: CGRect(origin: CGPoint(x: inset, y: 8.0 * 2.0), size: CGSize(width: 30.0, height: 30.0))) + })?.stretchableImage(withLeftCapWidth: Int(inset) + 15, topCapHeight: 8 * 2 + 15) }) } diff --git a/TelegramUI/PresentationStrings.swift b/TelegramUI/PresentationStrings.swift index 55836758e4..461e214408 100644 --- a/TelegramUI/PresentationStrings.swift +++ b/TelegramUI/PresentationStrings.swift @@ -2288,7 +2288,7 @@ public final class PresentationStrings { private let _Checkout_LiabilityAlert: String private let _Checkout_LiabilityAlert_r: [(Int, NSRange)] public func Checkout_LiabilityAlert(_ _1: String, _ _2: String) -> (String, [(Int, NSRange)]) { - return formatWithArgumentRanges(_Checkout_LiabilityAlert, self._Checkout_LiabilityAlert_r, [_1, _1, _1, _2]) + return formatWithArgumentRanges(_Checkout_LiabilityAlert, self._Checkout_LiabilityAlert_r, [_1, _2]) } public let Channel_Info_BlackList: String public let Profile_BotInfo: String diff --git a/TelegramUI/ReplyAccessoryPanelNode.swift b/TelegramUI/ReplyAccessoryPanelNode.swift index 163aac3d51..50e7e3b8ce 100644 --- a/TelegramUI/ReplyAccessoryPanelNode.swift +++ b/TelegramUI/ReplyAccessoryPanelNode.swift @@ -67,7 +67,7 @@ final class ReplyAccessoryPanelNode: AccessoryPanelNode { authorName = author.displayTitle } if let message = message { - text = descriptionStringForMessage(message, strings: strings, accountPeerId: account.peerId) + (text, _) = descriptionStringForMessage(message, strings: strings, accountPeerId: account.peerId) } var updatedMedia: Media? diff --git a/TelegramUI/ScreenCaptureDetection.swift b/TelegramUI/ScreenCaptureDetection.swift new file mode 100644 index 0000000000..33d3767f40 --- /dev/null +++ b/TelegramUI/ScreenCaptureDetection.swift @@ -0,0 +1,75 @@ +import Foundation +import SwiftSignalKit + +enum ScreenCaptureEvent { + case still + case video +} + +private final class ScreenRecordingObserver: NSObject { + let f: (Bool) -> Void + + init(_ f: @escaping (Bool) -> Void) { + self.f = f + + super.init() + + UIScreen.main.addObserver(self, forKeyPath: "captured", options: [.new], context: nil) + } + + func clear() { + UIScreen.main.removeObserver(self, forKeyPath: "captured") + } + + override func observeValue(forKeyPath keyPath: String?, of object: Any?, change: [NSKeyValueChangeKey : Any]?, context: UnsafeMutableRawPointer?) { + if keyPath == "captured" { + if let value = change?[.newKey] as? Bool { + self.f(value) + } + } + } +} + +private func screenRecordingActive() -> Signal { + return Signal { subscriber in + if #available(iOSApplicationExtension 11.0, *) { + subscriber.putNext(UIScreen.main.isCaptured) + let observer = ScreenRecordingObserver({ value in + subscriber.putNext(value) + }) + return ActionDisposable { + Queue.mainQueue().async { + observer.clear() + } + } + } else { + subscriber.putNext(false) + return EmptyDisposable + } + } |> runOn(Queue.mainQueue()) +} + +func screenCaptureEvents() -> Signal { + return Signal { susbcriber in + let observer = NotificationCenter.default.addObserver(forName: NSNotification.Name.UIApplicationUserDidTakeScreenshot, object: nil, queue: .main, using: { _ in + susbcriber.putNext(.still) + }) + + var previous = false + let screenRecordingDisposable = screenRecordingActive().start(next: { value in + if value != previous { + previous = value + if value { + susbcriber.putNext(.video) + } + } + }) + + return ActionDisposable { + Queue.mainQueue().async { + NotificationCenter.default.removeObserver(observer) + screenRecordingDisposable.dispose() + } + } + } |> runOn(Queue.mainQueue()) +} diff --git a/TelegramUI/SearchBarNode.swift b/TelegramUI/SearchBarNode.swift index fa6e6280af..af5d6ecb8d 100644 --- a/TelegramUI/SearchBarNode.swift +++ b/TelegramUI/SearchBarNode.swift @@ -112,6 +112,7 @@ class SearchBarNode: ASDisplayNode, UITextFieldDelegate { private let backgroundNode: ASDisplayNode private let separatorNode: ASDisplayNode private let textBackgroundNode: ASImageNode + private var activityIndicator: ActivityIndicator? private let iconNode: ASImageNode private let textField: SearchBarTextField private let clearButton: HighlightableButtonNode @@ -155,6 +156,29 @@ class SearchBarNode: ASDisplayNode, UITextFieldDelegate { } } + var activity: Bool = false { + didSet { + if self.activity != oldValue { + if self.activity { + if self.activityIndicator == nil { + let activityIndicator = ActivityIndicator(type: .custom(self.theme.rootController.activeNavigationSearchBar.inputIconColor, 13.0, 1.0)) + self.activityIndicator = activityIndicator + self.addSubnode(activityIndicator) + if let (boundingSize, leftInset, rightInset) = self.validLayout { + self.updateLayout(boundingSize: boundingSize, leftInset: leftInset, rightInset: rightInset, transition: .immediate) + } + } + } else if let activityIndicator = self.activityIndicator { + self.activityIndicator = nil + activityIndicator.removeFromSupernode() + } + self.iconNode.isHidden = self.activity + } + } + } + + private var validLayout: (CGSize, CGFloat, CGFloat)? + private var theme: PresentationTheme private var strings: PresentationStrings @@ -243,6 +267,8 @@ class SearchBarNode: ASDisplayNode, UITextFieldDelegate { } func updateLayout(boundingSize: CGSize, leftInset: CGFloat, rightInset: CGFloat, transition: ContainedViewLayoutTransition) { + self.validLayout = (boundingSize, leftInset, rightInset) + self.backgroundNode.frame = self.bounds transition.updateFrame(node: self.separatorNode, frame: CGRect(origin: CGPoint(x: 0.0, y: self.bounds.size.height), size: CGSize(width: self.bounds.size.width, height: UIScreenPixel))) @@ -263,6 +289,11 @@ class SearchBarNode: ASDisplayNode, UITextFieldDelegate { transition.updateFrame(node: self.iconNode, frame: CGRect(origin: CGPoint(x: textBackgroundFrame.minX + 8.0, y: textBackgroundFrame.minY + floor((textBackgroundFrame.size.height - iconSize.height) / 2.0)), size: iconSize)) } + if let activityIndicator = self.activityIndicator { + let indicatorSize = activityIndicator.measure(CGSize(width: 32.0, height: 32.0)) + transition.updateFrame(node: activityIndicator, frame: CGRect(origin: CGPoint(x: textBackgroundFrame.minX + 7.0, y: textBackgroundFrame.minY + floor((textBackgroundFrame.size.height - indicatorSize.height) / 2.0)), size: indicatorSize)) + } + let clearSize = self.clearButton.measure(CGSize(width: 100.0, height: 100.0)) transition.updateFrame(node: self.clearButton, frame: CGRect(origin: CGPoint(x: textBackgroundFrame.maxX - 8.0 - clearSize.width, y: textBackgroundFrame.minY + floor((textBackgroundFrame.size.height - clearSize.height) / 2.0)), size: clearSize)) diff --git a/TelegramUI/SearchDisplayController.swift b/TelegramUI/SearchDisplayController.swift index 2fadcf3518..076bf5bd69 100644 --- a/TelegramUI/SearchDisplayController.swift +++ b/TelegramUI/SearchDisplayController.swift @@ -11,6 +11,8 @@ final class SearchDisplayController { private(set) var isDeactivating = false + private var isSearchingDisposable: Disposable? + init(theme: PresentationTheme, strings: PresentationStrings, contentNode: SearchDisplayControllerContentNode, cancel: @escaping () -> Void) { self.searchBar = SearchBarNode(theme: theme, strings: strings) self.contentNode = contentNode @@ -29,6 +31,11 @@ final class SearchDisplayController { self.contentNode.dismissInput = { [weak self] in self?.searchBar.deactivate(clear: false) } + + self.isSearchingDisposable = (contentNode.isSearching + |> deliverOnMainQueue).start(next: { [weak self] value in + self?.searchBar.activity = value + }) } func updateThemeAndStrings(theme: PresentationTheme, strings: PresentationStrings) { diff --git a/TelegramUI/SearchDisplayControllerContentNode.swift b/TelegramUI/SearchDisplayControllerContentNode.swift index 09282e3b2e..39003bd8d3 100644 --- a/TelegramUI/SearchDisplayControllerContentNode.swift +++ b/TelegramUI/SearchDisplayControllerContentNode.swift @@ -7,6 +7,10 @@ class SearchDisplayControllerContentNode: ASDisplayNode { final var dismissInput: (() -> Void)? final var cancel: (() -> Void)? + var isSearching: Signal { + return .single(false) + } + override init() { super.init() } diff --git a/TelegramUI/SecretChatHandshakeStatusInputPanelNode.swift b/TelegramUI/SecretChatHandshakeStatusInputPanelNode.swift index 3e9724cd4f..740cba0190 100644 --- a/TelegramUI/SecretChatHandshakeStatusInputPanelNode.swift +++ b/TelegramUI/SecretChatHandshakeStatusInputPanelNode.swift @@ -39,7 +39,7 @@ final class SecretChatHandshakeStatusInputPanelNode: ChatInputPanelNode { self.interfaceInteraction?.unblockPeer() } - override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func updateLayout(width: CGFloat, leftInset: CGFloat, rightInset: CGFloat, maxHeight: CGFloat, transition: ContainedViewLayoutTransition, interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { if self.presentationInterfaceState != interfaceState { self.presentationInterfaceState = interfaceState @@ -69,7 +69,7 @@ final class SecretChatHandshakeStatusInputPanelNode: ChatInputPanelNode { return panelHeight } - override func minimalHeight(interfaceState: ChatPresentationInterfaceState) -> CGFloat { + override func minimalHeight(interfaceState: ChatPresentationInterfaceState, metrics: LayoutMetrics) -> CGFloat { return 47.0 } } diff --git a/TelegramUI/SecretChatKeyController.swift b/TelegramUI/SecretChatKeyController.swift index 042093d151..338d90a44f 100644 --- a/TelegramUI/SecretChatKeyController.swift +++ b/TelegramUI/SecretChatKeyController.swift @@ -22,7 +22,7 @@ final class SecretChatKeyController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.title = self.presentationData.strings.EncryptionKey_Title } diff --git a/TelegramUI/SecretMediaPreviewController.swift b/TelegramUI/SecretMediaPreviewController.swift index e0f378261c..f9f7cd65d9 100644 --- a/TelegramUI/SecretMediaPreviewController.swift +++ b/TelegramUI/SecretMediaPreviewController.swift @@ -128,11 +128,13 @@ public final class SecretMediaPreviewController: ViewController { private let presentationData: PresentationData + private var screenCaptureEventsDisposable: Disposable? + public init(account: Account, messageId: MessageId) { self.account = account self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: GalleryController.darkNavigationTheme) + super.init(navigationBarPresentationData: NavigationBarPresentationData(theme: GalleryController.darkNavigationTheme, strings: NavigationBarStrings(presentationStrings: self.presentationData.strings))) let backItem = UIBarButtonItem(backButtonAppearanceWithTitle: presentationData.strings.Common_Back, target: self, action: #selector(self.donePressed)) self.navigationItem.leftBarButtonItem = backItem @@ -155,6 +157,10 @@ public final class SecretMediaPreviewController: ViewController { return nil } }) + + self.screenCaptureEventsDisposable = screenCaptureEvents().start(next: { _ in + let _ = addSecretChatMessageScreenshot(account: account, peerId: messageId.peerId).start() + }) } required public init(coder aDecoder: NSCoder) { @@ -348,7 +354,17 @@ public final class SecretMediaPreviewController: ViewController { } private func applyMessageView() { - if let messageView = self.messageView, let message = messageView.message { + var message: Message? + if let messageView = self.messageView, let m = messageView.message { + message = m + for media in m.media { + if media is TelegramMediaExpiredContent { + message = nil + break + } + } + } + if let message = message { if self.currentNodeMessageId != message.id { self.currentNodeMessageId = message.id guard let item = galleryItemForEntry(account: account, theme: self.presentationData.theme, strings: self.presentationData.strings, entry: .MessageEntry(message, false, nil, nil), streamVideos: false, hideControls: true, playbackCompleted: { [weak self] in diff --git a/TelegramUI/SettingsController.swift b/TelegramUI/SettingsController.swift index 85a387d5ca..bda3d6417d 100644 --- a/TelegramUI/SettingsController.swift +++ b/TelegramUI/SettingsController.swift @@ -369,6 +369,18 @@ public func settingsController(account: Account, accountManager: AccountManager) var changeProfilePhotoImpl: (() -> Void)? var openSavedMessagesImpl: (() -> Void)? + let openFaq: () -> Void = { + let presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } + var faqUrl = presentationData.strings.Settings_FAQ_URL + if faqUrl == "Settings.FAQ_URL" || faqUrl.isEmpty { + faqUrl = "http://telegram.org/faq#general" + } + + if let applicationContext = account.applicationContext as? TelegramApplicationContext { + applicationContext.applicationBindings.openUrl(faqUrl) + } + } + let arguments = SettingsItemArguments(account: account, accountManager: accountManager, avatarAndNameInfoContext: avatarAndNameInfoContext, avatarTapAction: { var updating = false updateState { @@ -418,21 +430,23 @@ public func settingsController(account: Account, accountManager: AccountManager) let controller = LanguageSelectionController(account: account) presentControllerImpl?(controller, nil) }, openSupport: { - supportPeerDisposable.set((supportPeerId(account: account) |> deliverOnMainQueue).start(next: { peerId in - if let peerId = peerId { - pushControllerImpl?(ChatController(account: account, chatLocation: .peer(peerId))) - } - })) - }, openFaq: { + let supportPeer = Promise() + supportPeer.set(supportPeerId(account: account)) let presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - var faqUrl = presentationData.strings.Settings_FAQ_URL - if faqUrl == "Settings.FAQ_URL" || faqUrl.isEmpty { - faqUrl = "http://telegram.org/faq#general" - } - - if let applicationContext = account.applicationContext as? TelegramApplicationContext { - applicationContext.applicationBindings.openUrl(faqUrl) - } + presentControllerImpl?(standardTextAlertController(theme: AlertControllerTheme(presentationTheme: presentationData.theme), title: nil, text: presentationData.strings.Settings_FAQ_Intro, actions: [ + TextAlertAction(type: .genericAction, title: presentationData.strings.Settings_FAQ_Button, action: { + openFaq() + }), + TextAlertAction(type: .defaultAction, title: presentationData.strings.Common_OK, action: { + supportPeerDisposable.set((supportPeer.get() |> take(1) |> deliverOnMainQueue).start(next: { peerId in + if let peerId = peerId { + pushControllerImpl?(ChatController(account: account, chatLocation: .peer(peerId))) + } + })) + }) + ]), nil) + }, openFaq: { + openFaq() }, openEditing: { let _ = (account.postbox.modify { modifier -> (Peer?, CachedPeerData?) in return (modifier.getPeer(account.peerId), modifier.getPeerCachedData(peerId: account.peerId)) diff --git a/TelegramUI/ShareController.swift b/TelegramUI/ShareController.swift index cdf0fef611..01da75da2c 100644 --- a/TelegramUI/ShareController.swift +++ b/TelegramUI/ShareController.swift @@ -17,6 +17,7 @@ public enum ShareControllerExternalStatus { public enum ShareControllerSubject { case url(String) + case text(String) case messages([Message]) case fromExternal(([PeerId], String) -> Signal) } @@ -173,7 +174,7 @@ public final class ShareController: ViewController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) switch subject { case let .url(text): @@ -181,6 +182,8 @@ public final class ShareController: ViewController { UIPasteboard.general.string = text self?.controllerNode.cancel?() }) + case .text: + break case let .messages(messages): if messages.count == 1, let message = messages.first { if saveToCameraRoll { @@ -256,6 +259,16 @@ public final class ShareController: ViewController { let _ = enqueueMessages(account: strongSelf.account, peerId: peerId, messages: messages).start() } return .complete() + case let .text(string): + for peerId in peerIds { + var messages: [EnqueueMessage] = [] + if !text.isEmpty { + messages.append(.message(text: text, attributes: [], media: nil, replyToMessageId: nil, localGroupingKey: nil)) + } + messages.append(.message(text: string, attributes: [], media: nil, replyToMessageId: nil, localGroupingKey: nil)) + let _ = enqueueMessages(account: strongSelf.account, peerId: peerId, messages: messages).start() + } + return .complete() case let .messages(messages): for peerId in peerIds { var messagesToEnqueue: [EnqueueMessage] = [] @@ -283,6 +296,8 @@ public final class ShareController: ViewController { switch strongSelf.subject { case let .url(text): collectableItems.append(CollectableExternalShareItem(url: text, text: "", media: nil)) + case let .text(string): + collectableItems.append(CollectableExternalShareItem(url: "", text: string, media: nil)) case let .messages(messages): for message in messages { var url: String? @@ -330,13 +345,16 @@ public final class ShareController: ViewController { activityItems.append(text as NSString) case let .image(image): activityItems.append(image) - case let .file(url, fileName, mimeType): + case let .file(url, _, _): activityItems.append(url) } } let activityController = UIActivityViewController(activityItems: activityItems, applicationActivities: nil) - if let window = strongSelf.view.window { - window.rootViewController?.present(activityController, animated: true, completion: nil) + + if let window = strongSelf.view.window, let rootViewController = window.rootViewController { + activityController.popoverPresentationController?.sourceView = window + activityController.popoverPresentationController?.sourceRect = CGRect(origin: CGPoint(x: window.bounds.width / 2.0, y: window.bounds.size.height - 1.0), size: CGSize(width: 1.0, height: 1.0)) + rootViewController.present(activityController, animated: true, completion: nil) } } return .done diff --git a/TelegramUI/ShareControllerNode.swift b/TelegramUI/ShareControllerNode.swift index e2b380652e..88a20c38e4 100644 --- a/TelegramUI/ShareControllerNode.swift +++ b/TelegramUI/ShareControllerNode.swift @@ -292,7 +292,8 @@ final class ShareControllerNode: ViewControllerTracingNode, UIScrollViewDelegate let maximumContentHeight = layout.size.height - insets.top - max(bottomInset + buttonHeight, insets.bottom) - sectionSpacing - let width = min(layout.size.width, layout.size.height) - 20.0 + let width = horizontalContainerFillingSizeForLayout(layout: layout, sideInset: 10.0 + layout.safeInsets.left) + let sideInset = floor((layout.size.width - width) / 2.0) let contentContainerFrame = CGRect(origin: CGPoint(x: sideInset, y: insets.top), size: CGSize(width: width, height: maximumContentHeight)) @@ -343,7 +344,7 @@ final class ShareControllerNode: ViewControllerTracingNode, UIScrollViewDelegate let buttonHeight: CGFloat = 57.0 let sectionSpacing: CGFloat = 8.0 - let width = min(layout.size.width, layout.size.height) - 20.0 + let width = horizontalContainerFillingSizeForLayout(layout: layout, sideInset: 10.0 + layout.safeInsets.left) let sideInset = floor((layout.size.width - width) / 2.0) @@ -367,7 +368,7 @@ final class ShareControllerNode: ViewControllerTracingNode, UIScrollViewDelegate self.animateContentNodeOffsetFromBackgroundOffset = nil let offset = backgroundFrame.minY - animateContentNodeOffsetFromBackgroundOffset if let contentNode = self.contentNode { - transition.animatePositionAdditive(node: contentNode, offset: -offset) + transition.animatePositionAdditive(node: contentNode, offset: CGPoint(x: 0.0, y: -offset)) } if let previousContentNode = self.previousContentNode { transition.updatePosition(node: previousContentNode, position: previousContentNode.position.offsetBy(dx: 0.0, dy: offset)) diff --git a/TelegramUI/ShareSearchContainerNode.swift b/TelegramUI/ShareSearchContainerNode.swift index 3f6cbc2103..42d5fba821 100644 --- a/TelegramUI/ShareSearchContainerNode.swift +++ b/TelegramUI/ShareSearchContainerNode.swift @@ -468,7 +468,7 @@ final class ShareSearchContainerNode: ASDisplayNode, ShareContentContainerNode { let gridSize = CGSize(width: size.width, height: size.height - 5.0) self.recentGridNode.transaction(GridNodeTransaction(deleteItems: [], insertItems: [], updateItems: [], scrollToItem: nil, updateLayout: GridNodeUpdateLayout(layout: GridNodeLayout(size: gridSize, insets: UIEdgeInsets(top: gridTopInset, left: 6.0, bottom: bottomInset, right: 6.0), preloadSize: 80.0, type: .fixed(itemSize: CGSize(width: itemWidth, height: itemWidth + 25.0), lineSpacing: 0.0)), transition: transition), itemTransition: .immediate, stationaryItems: .none, updateFirstIndexInSectionOffset: nil), completion: { _ in }) - transition.animatePositionAdditive(node: self.recentGridNode, offset: offset) + transition.animatePositionAdditive(node: self.recentGridNode, offset: CGPoint(x: 0.0, y: offset)) } } @@ -481,7 +481,7 @@ final class ShareSearchContainerNode: ASDisplayNode, ShareContentContainerNode { let gridSize = CGSize(width: size.width, height: size.height - 5.0) self.contentGridNode.transaction(GridNodeTransaction(deleteItems: [], insertItems: [], updateItems: [], scrollToItem: nil, updateLayout: GridNodeUpdateLayout(layout: GridNodeLayout(size: gridSize, insets: UIEdgeInsets(top: gridTopInset, left: 6.0, bottom: bottomInset, right: 6.0), preloadSize: 80.0, type: .fixed(itemSize: CGSize(width: itemWidth, height: itemWidth + 25.0), lineSpacing: 0.0)), transition: transition), itemTransition: .immediate, stationaryItems: .none, updateFirstIndexInSectionOffset: nil), completion: { _ in }) - transition.animatePositionAdditive(node: self.contentGridNode, offset: -offset) + transition.animatePositionAdditive(node: self.contentGridNode, offset: CGPoint(x: 0.0, y: -offset)) } } diff --git a/TelegramUI/SharedMediaPlayer.swift b/TelegramUI/SharedMediaPlayer.swift index a82b12c89a..c2546daa51 100644 --- a/TelegramUI/SharedMediaPlayer.swift +++ b/TelegramUI/SharedMediaPlayer.swift @@ -424,13 +424,13 @@ final class SharedMediaPlayer { case .voice, .music: switch playbackData.source { case let .telegramFile(file): - strongSelf.playbackItem = .audio(MediaPlayer(audioSessionManager: strongSelf.audioSession, postbox: strongSelf.postbox, resource: file.resource, streamable: playbackData.type == .music, video: false, preferSoftwareDecoding: false, enableSound: true, playAndRecord: controlPlaybackWithProximity)) + strongSelf.playbackItem = .audio(MediaPlayer(audioSessionManager: strongSelf.audioSession, postbox: strongSelf.postbox, resource: file.resource, streamable: playbackData.type == .music, video: false, preferSoftwareDecoding: false, enableSound: true, fetchAutomatically: true, playAndRecord: controlPlaybackWithProximity)) } case .instantVideo: if let mediaManager = strongSelf.mediaManager, let item = item as? MessageMediaPlaylistItem { switch playbackData.source { case let .telegramFile(file): - let videoNode = OverlayInstantVideoNode(postbox: strongSelf.postbox, audioSession: strongSelf.audioSession, manager: mediaManager.universalVideoManager, content: NativeVideoContent(id: .message(item.message.id, file.fileId), file: file, streamVideo: false, enableSound: false), close: { [weak mediaManager] in + let videoNode = OverlayInstantVideoNode(postbox: strongSelf.postbox, audioSession: strongSelf.audioSession, manager: mediaManager.universalVideoManager, content: NativeVideoContent(id: .message(item.message.id, item.message.stableId, file.fileId), file: file, streamVideo: false, enableSound: false), close: { [weak mediaManager] in mediaManager?.setPlaylist(nil, type: .voice) }) strongSelf.playbackItem = .instantVideo(videoNode) diff --git a/TelegramUI/StickerPackPreviewController.swift b/TelegramUI/StickerPackPreviewController.swift index c355430169..0c3f77d19e 100644 --- a/TelegramUI/StickerPackPreviewController.swift +++ b/TelegramUI/StickerPackPreviewController.swift @@ -13,6 +13,7 @@ final class StickerPackPreviewController: ViewController { private var animatedIn = false private let account: Account + private var presentationData: PresentationData private let stickerPack: StickerPackReference private let stickerPackDisposable = MetaDisposable() @@ -40,7 +41,9 @@ final class StickerPackPreviewController: ViewController { self.account = account self.stickerPack = stickerPack - super.init(navigationBarTheme: nil) + self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } + + super.init(navigationBarPresentationData: nil) self.statusBar.statusBarStyle = .Ignore @@ -75,7 +78,14 @@ final class StickerPackPreviewController: ViewController { } self.displayNodeDidLoad() self.stickerPackDisposable.set((self.stickerPackContents.get() |> deliverOnMainQueue).start(next: { [weak self] next in - self?.controllerNode.updateStickerPack(next) + if let strongSelf = self { + if case .none = next { + strongSelf.present(standardTextAlertController(theme: AlertControllerTheme(presentationTheme: strongSelf.presentationData.theme), title: nil, text: strongSelf.presentationData.strings.StickerPack_ErrorNotFound, actions: [TextAlertAction(type: .defaultAction, title: strongSelf.presentationData.strings.Common_OK, action: {})]), in: .window(.root)) + strongSelf.dismiss() + } else { + strongSelf.controllerNode.updateStickerPack(next) + } + } })) self.ready.set(self.controllerNode.ready.get()) } diff --git a/TelegramUI/StickerPackPreviewControllerNode.swift b/TelegramUI/StickerPackPreviewControllerNode.swift index 1f9e123916..4339e8b70b 100644 --- a/TelegramUI/StickerPackPreviewControllerNode.swift +++ b/TelegramUI/StickerPackPreviewControllerNode.swift @@ -24,7 +24,7 @@ final class StickerPackPreviewControllerNode: ViewControllerTracingNode, UIScrol private let contentTitleNode: ASTextNode private let contentSeparatorNode: ASDisplayNode - private var activityIndicatorView: UIActivityIndicatorView? + private var activityIndicator: ActivityIndicator? private var interaction: StickerPackPreviewInteraction! @@ -235,7 +235,7 @@ final class StickerPackPreviewControllerNode: ViewControllerTracingNode, UIScrol let sectionSpacing: CGFloat = 8.0 let titleAreaHeight: CGFloat = 51.0 - let width = min(layout.size.width, layout.size.height) - 20.0 + let width = horizontalContainerFillingSizeForLayout(layout: layout, sideInset: 10.0 + layout.safeInsets.left) let sideInset = floor((layout.size.width - width) / 2.0) @@ -254,16 +254,15 @@ final class StickerPackPreviewControllerNode: ViewControllerTracingNode, UIScrol if let stickerPack = self.stickerPack { switch stickerPack { case .fetching, .none: - if self.activityIndicatorView == nil { - let activityIndicatorView = UIActivityIndicatorView(activityIndicatorStyle: .gray) - self.activityIndicatorView = activityIndicatorView - self.view.addSubview(activityIndicatorView) - activityIndicatorView.startAnimating() + if self.activityIndicator == nil { + let activityIndicator = ActivityIndicator(type: ActivityIndicatorType.custom(self.presentationData.theme.actionSheet.controlAccentColor, 50.0, 2.0)) + self.activityIndicator = activityIndicator + self.addSubnode(activityIndicator) } case let .result(info, items, _): - if let activityIndicatorView = self.activityIndicatorView { - activityIndicatorView.removeFromSuperview() - activityIndicatorView.stopAnimating() + if let activityIndicator = self.activityIndicator { + activityIndicator.removeFromSupernode() + self.activityIndicator = nil } itemCount = items.count if !self.didSetItems { @@ -299,8 +298,10 @@ final class StickerPackPreviewControllerNode: ViewControllerTracingNode, UIScrol transition.updateFrame(node: self.contentContainerNode, frame: contentContainerFrame) - if let activityIndicatorView = activityIndicatorView { - transition.updateFrame(layer: activityIndicatorView.layer, frame: CGRect(origin: CGPoint(x: contentFrame.minX + floor((contentFrame.width - activityIndicatorView.bounds.size.width) / 2.0), y: contentFrame.maxY - activityIndicatorView.bounds.size.height - 34.0), size: activityIndicatorView.bounds.size)) + if let activityIndicator = self.activityIndicator { + let indicatorSize = activityIndicator.calculateSizeThatFits(layout.size) + + transition.updateFrame(node: activityIndicator, frame: CGRect(origin: CGPoint(x: contentFrame.minX + floor((contentFrame.width - indicatorSize.width) / 2.0), y: contentFrame.maxY - indicatorSize.height - 20.0), size: indicatorSize)) } transition.updateFrame(node: self.installActionButtonNode, frame: CGRect(origin: CGPoint(x: 0.0, y: contentContainerFrame.size.height - buttonHeight), size: CGSize(width: contentContainerFrame.size.width, height: buttonHeight))) @@ -346,7 +347,7 @@ final class StickerPackPreviewControllerNode: ViewControllerTracingNode, UIScrol let sectionSpacing: CGFloat = 8.0 let titleAreaHeight: CGFloat = 51.0 - let width = min(layout.size.width, layout.size.height) - 20.0 + let width = horizontalContainerFillingSizeForLayout(layout: layout, sideInset: 10.0 + layout.safeInsets.left) let sideInset = floor((layout.size.width - width) / 2.0) diff --git a/TelegramUI/StickerPreviewController.swift b/TelegramUI/StickerPreviewController.swift index bd7d857273..366d5b6575 100644 --- a/TelegramUI/StickerPreviewController.swift +++ b/TelegramUI/StickerPreviewController.swift @@ -27,7 +27,7 @@ final class StickerPreviewController: ViewController { self.account = account self.item = item - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) self.statusBar.statusBarStyle = .Ignore } diff --git a/TelegramUI/TelegramController.swift b/TelegramUI/TelegramController.swift index 45aa4608c0..a9058cb316 100644 --- a/TelegramUI/TelegramController.swift +++ b/TelegramUI/TelegramController.swift @@ -51,6 +51,7 @@ public class TelegramController: ViewController { private var locationBroadcastMode: LocationBroadcastNavigationAccessoryPanelMode? private var locationBroadcastPeers: [Peer]? + private var locationBroadcastMessages: [MessageId: Message]? private var locationBroadcastAccessoryPanel: LocationBroadcastNavigationAccessoryPanel? private var dismissingPanel: ASDisplayNode? @@ -66,12 +67,12 @@ public class TelegramController: ViewController { return height } - init(account: Account, navigationBarTheme: NavigationBarTheme?, enableMediaAccessoryPanel: Bool, locationBroadcastPanelSource: LocationBroadcastPanelSource) { + init(account: Account, navigationBarPresentationData: NavigationBarPresentationData?, enableMediaAccessoryPanel: Bool, locationBroadcastPanelSource: LocationBroadcastPanelSource) { self.account = account self.enableMediaAccessoryPanel = enableMediaAccessoryPanel self.locationBroadcastPanelSource = locationBroadcastPanelSource - super.init(navigationBarTheme: navigationBarTheme) + super.init(navigationBarPresentationData: navigationBarPresentationData) if enableMediaAccessoryPanel { self.mediaStatusDisposable = (account.telegramApplicationContext.mediaManager.globalMediaPlayerState @@ -102,20 +103,33 @@ public class TelegramController: ViewController { case .none: self.locationBroadcastMode = nil case .summary, .peer: - let signal: Signal<[Peer]?, NoError> + let signal: Signal<([Peer]?, [MessageId: Message]?), NoError> switch locationBroadcastPanelSource { case let .peer(peerId): self.locationBroadcastMode = .peer signal = liveLocationManager.summaryManager.peersBroadcastingTo(peerId: peerId) + |> map { ($0, nil) } default: self.locationBroadcastMode = .summary - signal = liveLocationManager.summaryManager.broadcastingToPeers() - |> map { $0.isEmpty ? nil : $0 } + signal = liveLocationManager.summaryManager.broadcastingToMessages() + |> map { messages -> ([Peer]?, [MessageId: Message]?) in + if messages.isEmpty { + return (nil, nil) + } else { + var peers: [Peer] = [] + for message in messages.values.sorted(by: { MessageIndex($0) < MessageIndex($1) }) { + if let peer = message.peers[message.id.peerId] { + peers.append(peer) + } + } + return (peers, messages) + } + } } self.locationBroadcastDisposable = (signal - |> deliverOnMainQueue).start(next: { [weak self] peers in + |> deliverOnMainQueue).start(next: { [weak self] peers, messages in if let strongSelf = self { var updated = false if let current = strongSelf.locationBroadcastPeers, let peers = peers { @@ -124,6 +138,8 @@ public class TelegramController: ViewController { updated = true } + strongSelf.locationBroadcastMessages = messages + if updated { let wasEmpty = strongSelf.locationBroadcastPeers == nil strongSelf.locationBroadcastPeers = peers @@ -173,9 +189,11 @@ public class TelegramController: ViewController { case .none: break case .summary: - if let locationBroadcastPeers = strongSelf.locationBroadcastPeers { - if locationBroadcastPeers.count == 1 { - presentLiveLocationController(account: strongSelf.account, peerId: locationBroadcastPeers[0].id, controller: strongSelf) + if let locationBroadcastMessages = strongSelf.locationBroadcastMessages { + let messages = locationBroadcastMessages.values.sorted(by: { MessageIndex($0) > MessageIndex($1) }) + + if messages.count == 1 { + presentLiveLocationController(account: strongSelf.account, peerId: messages[0].id.peerId, controller: strongSelf) } else { let presentationData = strongSelf.account.telegramApplicationContext.currentPresentationData.with { $0 } let controller = ActionSheetController(presentationTheme: presentationData.theme) @@ -183,15 +201,26 @@ public class TelegramController: ViewController { controller?.dismissAnimated() } var items: [ActionSheetItem] = [] - if !locationBroadcastPeers.isEmpty { + if !messages.isEmpty { items.append(ActionSheetTextItem(title: presentationData.strings.LiveLocation_MenuChatsCount(Int32(locationBroadcastPeers.count)))) - for peer in locationBroadcastPeers { - items.append(ActionSheetButtonItem(title: peer.displayTitle, action: { - dismissAction() - if let strongSelf = self { - presentLiveLocationController(account: strongSelf.account, peerId: peer.id, controller: strongSelf) + for message in messages { + if let peer = message.peers[message.id.peerId] { + var beginTimeAndTimeout: (Double, Double)? + for media in message.media { + if let media = media as? TelegramMediaMap, let timeout = media.liveBroadcastingTimeout { + beginTimeAndTimeout = (Double(message.timestamp), Double(timeout)) + } } - })) + + if let beginTimeAndTimeout = beginTimeAndTimeout { + items.append(LocationBroadcastActionSheetItem(title: peer.displayTitle, beginTimestamp: beginTimeAndTimeout.0, timeout: beginTimeAndTimeout.1, strings: presentationData.strings, action: { + dismissAction() + if let strongSelf = self { + presentLiveLocationController(account: strongSelf.account, peerId: peer.id, controller: strongSelf) + } + })) + } + } } items.append(ActionSheetButtonItem(title: presentationData.strings.LiveLocation_MenuStopAll, color: .destructive, action: { dismissAction() diff --git a/TelegramUI/TelegramRootController.swift b/TelegramUI/TelegramRootController.swift index eb431a86cb..e7c96cbcc6 100644 --- a/TelegramUI/TelegramRootController.swift +++ b/TelegramUI/TelegramRootController.swift @@ -22,7 +22,7 @@ public final class TelegramRootController: NavigationController { self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init() + super.init(mode: .automaticMasterDetail, theme: NavigationControllerTheme(presentationTheme: self.presentationData.theme)) self.presentationDataDisposable = (account.telegramApplicationContext.presentationData |> deliverOnMainQueue).start(next: { [weak self] presentationData in @@ -30,7 +30,7 @@ public final class TelegramRootController: NavigationController { let previousTheme = strongSelf.presentationData.theme strongSelf.presentationData = presentationData if previousTheme !== presentationData.theme { - strongSelf.rootTabController?.updateTheme(navigationBarTheme: NavigationBarTheme(rootControllerTheme: presentationData.theme), theme: TabBarControllerTheme(rootControllerTheme: presentationData.theme)) + strongSelf.rootTabController?.updateTheme(navigationBarPresentationData: NavigationBarPresentationData(presentationData: presentationData), theme: TabBarControllerTheme(rootControllerTheme: presentationData.theme)) strongSelf.rootTabController?.statusBar.statusBarStyle = presentationData.theme.rootController.statusBar.style.style } } @@ -46,7 +46,7 @@ public final class TelegramRootController: NavigationController { } public func addRootControllers(showCallsTab: Bool) { - let tabBarController = TabBarController(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme), theme: TabBarControllerTheme(rootControllerTheme: self.presentationData.theme)) + let tabBarController = TabBarController(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData), theme: TabBarControllerTheme(rootControllerTheme: self.presentationData.theme)) let chatListController = ChatListController(account: self.account, groupId: nil, controlsHistoryPreload: true) let callListController = CallListController(account: self.account, mode: .tab) diff --git a/TelegramUI/TelegramUIPrivate/module.modulemap b/TelegramUI/TelegramUIPrivate/module.modulemap index e179011863..372c985175 100644 --- a/TelegramUI/TelegramUIPrivate/module.modulemap +++ b/TelegramUI/TelegramUIPrivate/module.modulemap @@ -1,10 +1,10 @@ module TelegramUIPrivateModule { - header "../../third-party/FFmpeg-iOS/include/libavcodec/avcodec.h" - header "../../third-party/FFmpeg-iOS/include/libavformat/avformat.h" - header "../../third-party/FFmpeg-iOS/include/libavformat/avio.h" - header "../../third-party/FFmpeg-iOS/include/libavutil/avutil.h" - header "../../third-party/FFmpeg-iOS/include/libavutil/pixdesc.h" - header "../../third-party/FFmpeg-iOS/include/libswresample/swresample.h" + private header "../../third-party/FFmpeg-iOS/include/libavcodec/avcodec.h" + private header "../../third-party/FFmpeg-iOS/include/libavformat/avformat.h" + private header "../../third-party/FFmpeg-iOS/include/libavformat/avio.h" + private header "../../third-party/FFmpeg-iOS/include/libavutil/avutil.h" + private header "../../third-party/FFmpeg-iOS/include/libavutil/pixdesc.h" + private header "../../third-party/FFmpeg-iOS/include/libswresample/swresample.h" header "../../third-party/opusenc/opusenc.h" header "../TGDataItem.h" header "../FFMpegSwResample.h" diff --git a/TelegramUI/TelegramVideoNode.swift b/TelegramUI/TelegramVideoNode.swift index c968ed240b..067441a22c 100644 --- a/TelegramUI/TelegramVideoNode.swift +++ b/TelegramUI/TelegramVideoNode.swift @@ -28,7 +28,7 @@ private final class SharedTelegramVideoContext: SharedVideoContext { private let playbackCompletedListeners = Bag<() -> Void>() init(audioSessionManager: ManagedAudioSession, postbox: Postbox, resource: MediaResource) { - self.player = MediaPlayer(audioSessionManager: audioSessionManager, postbox: postbox, resource: resource, streamable: false, video: true, preferSoftwareDecoding: false, enableSound: false) + self.player = MediaPlayer(audioSessionManager: audioSessionManager, postbox: postbox, resource: resource, streamable: false, video: true, preferSoftwareDecoding: false, enableSound: false, fetchAutomatically: true) var actionAtEndImpl: (() -> Void)? self.player.actionAtEnd = .stop self.playerNode = MediaPlayerNode(backgroundThread: false) diff --git a/TelegramUI/ThemeGalleryController.swift b/TelegramUI/ThemeGalleryController.swift index 7a7ad58cf6..bf83344b25 100644 --- a/TelegramUI/ThemeGalleryController.swift +++ b/TelegramUI/ThemeGalleryController.swift @@ -65,7 +65,7 @@ class ThemeGalleryController: ViewController { init(account: Account, wallpapers: [TelegramWallpaper], at centralWallpaper: TelegramWallpaper) { self.account = account - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) self.navigationItem.leftBarButtonItem = UIBarButtonItem(title: "Done", style: .done, target: self, action: #selector(self.donePressed)) diff --git a/TelegramUI/ThemeGridController.swift b/TelegramUI/ThemeGridController.swift index 2f66519ac3..8a429ef4c3 100644 --- a/TelegramUI/ThemeGridController.swift +++ b/TelegramUI/ThemeGridController.swift @@ -24,7 +24,7 @@ final class ThemeGridController: ViewController { self.account = account self.presentationData = account.telegramApplicationContext.currentPresentationData.with { $0 } - super.init(navigationBarTheme: NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + super.init(navigationBarPresentationData: NavigationBarPresentationData(presentationData: self.presentationData)) self.title = self.presentationData.strings.Wallpaper_Title self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style @@ -60,7 +60,7 @@ final class ThemeGridController: ViewController { self.title = self.presentationData.strings.Wallpaper_Title self.statusBar.statusBarStyle = self.presentationData.theme.rootController.statusBar.style.style - self.navigationBar?.updateTheme(NavigationBarTheme(rootControllerTheme: self.presentationData.theme)) + self.navigationBar?.updatePresentationData(NavigationBarPresentationData(presentationData: self.presentationData)) if self.isNodeLoaded { self.controllerNode.updatePresentationData(self.presentationData) diff --git a/TelegramUI/ThemeSettingsController.swift b/TelegramUI/ThemeSettingsController.swift index 98699575c4..35b0394cff 100644 --- a/TelegramUI/ThemeSettingsController.swift +++ b/TelegramUI/ThemeSettingsController.swift @@ -254,7 +254,7 @@ private final class ThemeSettingsCrossfadeController: ViewController { init() { self.snapshotView = UIScreen.main.snapshotView(afterScreenUpdates: false) - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) self.statusBar.statusBarStyle = .Hide } diff --git a/TelegramUI/TransformImageArguments.swift b/TelegramUI/TransformImageArguments.swift index c95c1278e9..08061757b6 100644 --- a/TelegramUI/TransformImageArguments.swift +++ b/TelegramUI/TransformImageArguments.swift @@ -13,13 +13,15 @@ public struct TransformImageArguments: Equatable { public let boundingSize: CGSize public let intrinsicInsets: UIEdgeInsets public let resizeMode: TransformImageResizeMode + public let emptyColor: UIColor - public init(corners: ImageCorners, imageSize: CGSize, boundingSize: CGSize, intrinsicInsets: UIEdgeInsets, resizeMode: TransformImageResizeMode = .fill(.black)) { + public init(corners: ImageCorners, imageSize: CGSize, boundingSize: CGSize, intrinsicInsets: UIEdgeInsets, resizeMode: TransformImageResizeMode = .fill(.black), emptyColor: UIColor = .white) { self.corners = corners self.imageSize = imageSize self.boundingSize = boundingSize self.intrinsicInsets = intrinsicInsets self.resizeMode = resizeMode + self.emptyColor = emptyColor } public var drawingSize: CGSize { diff --git a/TelegramUI/UniversalVideoContentManager.swift b/TelegramUI/UniversalVideoContentManager.swift index 339d772b77..ef475c4a02 100644 --- a/TelegramUI/UniversalVideoContentManager.swift +++ b/TelegramUI/UniversalVideoContentManager.swift @@ -18,7 +18,8 @@ private final class UniversalVideoContentSubscriber { private final class UniversalVideoContentHolder { private var nextId: Int32 = 0 private var subscribers: [UniversalVideoContentSubscriber] = [] - let content: UniversalVideoContentNode & ASDisplayNode + let content: UniversalVideoContent + let contentNode: UniversalVideoContentNode & ASDisplayNode var statusDisposable: Disposable? var statusValue: MediaPlayerStatus? @@ -28,24 +29,25 @@ private final class UniversalVideoContentHolder { var playbackCompletedIndex: Int? - init(content: UniversalVideoContentNode & ASDisplayNode, statusUpdated: @escaping (MediaPlayerStatus?) -> Void, bufferingStatusUpdated: @escaping ((IndexSet, Int)?) -> Void, playbackCompleted: @escaping () -> Void) { + init(content: UniversalVideoContent, contentNode: UniversalVideoContentNode & ASDisplayNode, statusUpdated: @escaping (MediaPlayerStatus?) -> Void, bufferingStatusUpdated: @escaping ((IndexSet, Int)?) -> Void, playbackCompleted: @escaping () -> Void) { self.content = content + self.contentNode = contentNode - self.statusDisposable = (content.status |> deliverOnMainQueue).start(next: { [weak self] value in + self.statusDisposable = (contentNode.status |> deliverOnMainQueue).start(next: { [weak self] value in if let strongSelf = self { strongSelf.statusValue = value statusUpdated(value) } }) - self.bufferingStatusDisposable = (content.bufferingStatus |> deliverOnMainQueue).start(next: { [weak self] value in + self.bufferingStatusDisposable = (contentNode.bufferingStatus |> deliverOnMainQueue).start(next: { [weak self] value in if let strongSelf = self { strongSelf.bufferingStatusValue = value bufferingStatusUpdated(value) } }) - self.playbackCompletedIndex = content.addPlaybackCompleted { + self.playbackCompletedIndex = contentNode.addPlaybackCompleted { playbackCompleted() } } @@ -54,7 +56,7 @@ private final class UniversalVideoContentHolder { self.statusDisposable?.dispose() self.bufferingStatusDisposable?.dispose() if let playbackCompletedIndex = self.playbackCompletedIndex { - self.content.removePlaybackCompleted(playbackCompletedIndex) + self.contentNode.removePlaybackCompleted(playbackCompletedIndex) } } @@ -96,7 +98,7 @@ private final class UniversalVideoContentHolder { if i == self.subscribers.count - 1 { if !self.subscribers[i].active { self.subscribers[i].active = true - self.subscribers[i].update((self.content, initiatedCreation: initiatedCreation == self.subscribers[i].id)) + self.subscribers[i].update((self.contentNode, initiatedCreation: initiatedCreation == self.subscribers[i].id)) } } else { if self.subscribers[i].active { @@ -137,47 +139,58 @@ final class UniversalVideoContentManager { private var holders: [AnyHashable: UniversalVideoContentHolder] = [:] private var holderCallbacks: [AnyHashable: UniversalVideoContentHolderCallbacks] = [:] - func attachUniversalVideoContent(id: AnyHashable, priority: UniversalVideoPriority, create: () -> UniversalVideoContentNode & ASDisplayNode, update: @escaping (((UniversalVideoContentNode & ASDisplayNode), Bool)?) -> Void) -> Int32 { + func attachUniversalVideoContent(content: UniversalVideoContent, priority: UniversalVideoPriority, create: () -> UniversalVideoContentNode & ASDisplayNode, update: @escaping (((UniversalVideoContentNode & ASDisplayNode), Bool)?) -> Void) -> (AnyHashable, Int32) { assert(Queue.mainQueue().isCurrent()) var initiatedCreation = false let holder: UniversalVideoContentHolder - if let current = self.holders[id] { + if let current = self.holders[content.id] { holder = current } else { - initiatedCreation = true - holder = UniversalVideoContentHolder(content: create(), statusUpdated: { [weak self] value in - if let strongSelf = self { - if let current = strongSelf.holderCallbacks[id] { - for subscriber in current.status.copyItems() { - subscriber(value) + var foundHolder: UniversalVideoContentHolder? + for (_, current) in self.holders { + if current.content.isEqual(to: content) { + //foundHolder = current + break + } + } + if let foundHolder = foundHolder { + holder = foundHolder + } else { + initiatedCreation = true + holder = UniversalVideoContentHolder(content: content, contentNode: create(), statusUpdated: { [weak self] value in + if let strongSelf = self { + if let current = strongSelf.holderCallbacks[content.id] { + for subscriber in current.status.copyItems() { + subscriber(value) + } } } - } - }, bufferingStatusUpdated: { [weak self] value in - if let strongSelf = self { - if let current = strongSelf.holderCallbacks[id] { - for subscriber in current.bufferingStatus.copyItems() { - subscriber(value) + }, bufferingStatusUpdated: { [weak self] value in + if let strongSelf = self { + if let current = strongSelf.holderCallbacks[content.id] { + for subscriber in current.bufferingStatus.copyItems() { + subscriber(value) + } } } - } - }, playbackCompleted: { [weak self] in - if let strongSelf = self { - if let current = strongSelf.holderCallbacks[id] { - for subscriber in current.playbackCompleted.copyItems() { - subscriber() + }, playbackCompleted: { [weak self] in + if let strongSelf = self { + if let current = strongSelf.holderCallbacks[content.id] { + for subscriber in current.playbackCompleted.copyItems() { + subscriber() + } } } - } - }) - self.holders[id] = holder + }) + self.holders[content.id] = holder + } } let id = holder.addSubscriber(priority: priority, update: update) holder.update(forceUpdateId: id, initiatedCreation: initiatedCreation ? id : nil) - return id + return (holder.content.id, id) } func detachUniversalVideoContent(id: AnyHashable, index: Int32) { @@ -186,7 +199,6 @@ final class UniversalVideoContentManager { if let holder = self.holders[id] { holder.removeSubscriberAndUpdate(id: index) if holder.isEmpty { - //holder.content.dispose() self.holders.removeValue(forKey: id) if let current = self.holderCallbacks[id] { @@ -200,7 +212,7 @@ final class UniversalVideoContentManager { func withUniversalVideoContent(id: AnyHashable, _ f: ((UniversalVideoContentNode & ASDisplayNode)?) -> Void) { if let holder = self.holders[id] { - f(holder.content) + f(holder.contentNode) } else { f(nil) } diff --git a/TelegramUI/UniversalVideoNode.swift b/TelegramUI/UniversalVideoNode.swift index 6d09ce5b45..b227858634 100644 --- a/TelegramUI/UniversalVideoNode.swift +++ b/TelegramUI/UniversalVideoNode.swift @@ -31,6 +31,14 @@ protocol UniversalVideoContent { var duration: Int32 { get } func makeContentNode(postbox: Postbox, audioSession: ManagedAudioSession) -> UniversalVideoContentNode & ASDisplayNode + + func isEqual(to other: UniversalVideoContent) -> Bool +} + +extension UniversalVideoContent { + func isEqual(to other: UniversalVideoContent) -> Bool { + return false + } } protocol UniversalVideoDecoration: class { @@ -80,7 +88,7 @@ final class UniversalVideoNode: ASDisplayNode { private var contentNodeId: Int32? private var playbackCompletedIndex: Int? - private var contentRequestIndex: Int32? + private var contentRequestIndex: (AnyHashable, Int32)? var playbackCompleted: (() -> Void)? @@ -111,7 +119,7 @@ final class UniversalVideoNode: ASDisplayNode { let content = self.content let postbox = self.postbox let audioSession = self.audioSession - self.contentRequestIndex = self.manager.attachUniversalVideoContent(id: self.content.id, priority: self.priority, create: { + self.contentRequestIndex = self.manager.attachUniversalVideoContent(content: self.content, priority: self.priority, create: { return content.makeContentNode(postbox: postbox, audioSession: audioSession) }, update: { [weak self] contentNodeAndFlags in if let strongSelf = self { @@ -120,9 +128,9 @@ final class UniversalVideoNode: ASDisplayNode { }) } else { assert(self.contentRequestIndex != nil) - if let contentRequestIndex = self.contentRequestIndex { + if let (id, index) = self.contentRequestIndex { self.contentRequestIndex = nil - self.manager.detachUniversalVideoContent(id: self.content.id, index: contentRequestIndex) + self.manager.detachUniversalVideoContent(id: id, index: index) } } } @@ -178,9 +186,9 @@ final class UniversalVideoNode: ASDisplayNode { self.manager.removePlaybackCompleted(id: self.content.id, index: playbackCompletedIndex) } - if let contentRequestIndex = self.contentRequestIndex { + if let (id, index) = self.contentRequestIndex { self.contentRequestIndex = nil - self.manager.detachUniversalVideoContent(id: self.content.id, index: contentRequestIndex) + self.manager.detachUniversalVideoContent(id: id, index: index) } } diff --git a/TelegramUI/UserInfoController.swift b/TelegramUI/UserInfoController.swift index 9ca676f00e..c61e03bdc9 100644 --- a/TelegramUI/UserInfoController.swift +++ b/TelegramUI/UserInfoController.swift @@ -433,11 +433,18 @@ private func userInfoEntries(account: Account, presentationData: PresentationDat let formattedNumber = formatPhoneNumber(phoneNumber) let normalizedNumber = DeviceContactNormalizedPhoneNumber(rawValue: formattedNumber) + var existingNumbers = Set() + var index = 0 var found = false for contact in deviceContacts { - for number in contact.phoneNumbers { + inner: for number in contact.phoneNumbers { var isMain = false + if !existingNumbers.contains(number.number.normalized) { + existingNumbers.insert(number.number.normalized) + } else { + continue inner + } if number.number.normalized == normalizedNumber { found = true isMain = true diff --git a/TelegramUI/UserInfoEditingPhoneItem.swift b/TelegramUI/UserInfoEditingPhoneItem.swift index 248e574ddd..7346bdae13 100644 --- a/TelegramUI/UserInfoEditingPhoneItem.swift +++ b/TelegramUI/UserInfoEditingPhoneItem.swift @@ -226,6 +226,9 @@ class UserInfoEditingPhoneItemNode: ItemListRevealOptionsItemNode { strongSelf.phoneNode.frame = phoneFrame strongSelf.phoneNode.updateLayout(size: phoneFrame.size) strongSelf.phoneNode.number = item.value + + strongSelf.updateLayout(size: layout.contentSize, leftInset: params.leftInset, rightInset: params.rightInset) + strongSelf.setRevealOptions([ItemListRevealOption(key: 0, title: item.strings.Common_Delete, icon: nil, color: item.theme.list.itemDisclosureActions.destructive.fillColor, textColor: item.theme.list.itemDisclosureActions.destructive.foregroundColor)]) } }) diff --git a/TelegramUI/UsernameSetupController.swift b/TelegramUI/UsernameSetupController.swift index 10a651157c..3733d6d4d8 100644 --- a/TelegramUI/UsernameSetupController.swift +++ b/TelegramUI/UsernameSetupController.swift @@ -21,7 +21,7 @@ private enum UsernameSetupSection: Int32 { private enum UsernameSetupEntry: ItemListNodeEntry { case editablePublicLink(PresentationTheme, String?, String) - case publicLinkStatus(PresentationTheme, String, AddressNameValidationStatus) + case publicLinkStatus(PresentationTheme, String, AddressNameValidationStatus, String) case publicLinkInfo(PresentationTheme, String) var section: ItemListSectionId { @@ -56,8 +56,8 @@ private enum UsernameSetupEntry: ItemListNodeEntry { } else { return false } - case let .publicLinkStatus(lhsTheme, lhsAddressName, lhsStatus): - if case let .publicLinkStatus(rhsTheme, rhsAddressName, rhsStatus) = rhs, lhsTheme === rhsTheme, lhsAddressName == rhsAddressName, lhsStatus == rhsStatus { + case let .publicLinkStatus(lhsTheme, lhsAddressName, lhsStatus, lhsText): + if case let .publicLinkStatus(rhsTheme, rhsAddressName, rhsStatus, rhsText) = rhs, lhsTheme === rhsTheme, lhsAddressName == rhsAddressName, lhsStatus == rhsStatus, lhsText == rhsText { return true } else { return false @@ -78,38 +78,25 @@ private enum UsernameSetupEntry: ItemListNodeEntry { }) case let .publicLinkInfo(theme, text): - return ItemListTextItem(theme: theme, text: .plain(text), sectionId: self.section) - case let .publicLinkStatus(theme, addressName, status): + return ItemListTextItem(theme: theme, text: .markdown(text), sectionId: self.section) + case let .publicLinkStatus(theme, _, status, text): var displayActivity = false - let text: NSAttributedString + let string: NSAttributedString switch status { - case let .invalidFormat(error): - switch error { - case .startsWithDigit: - text = NSAttributedString(string: "Names can't start with a digit.", textColor: UIColor(rgb: 0xcf3030)) - case .startsWithUnderscore: - text = NSAttributedString(string: "Names can't start with an underscore.", textColor: UIColor(rgb: 0xcf3030)) - case .endsWithUnderscore: - text = NSAttributedString(string: "Names can't end with an underscore.", textColor: UIColor(rgb: 0xcf3030)) - case .tooShort: - text = NSAttributedString(string: "Names must have at least 5 characters.", textColor: UIColor(rgb: 0xcf3030)) - case .invalidCharacters: - text = NSAttributedString(string: "Sorry, this name is invalid.", textColor: UIColor(rgb: 0xcf3030)) - } + case .invalidFormat: + string = NSAttributedString(string: text, textColor: theme.list.freeTextSuccessColor) case let .availability(availability): switch availability { - case .available: - text = NSAttributedString(string: "\(addressName) is available.", textColor: UIColor(rgb: 0x26972c)) - case .invalid: - text = NSAttributedString(string: "Sorry, this name is invalid.", textColor: UIColor(rgb: 0xcf3030)) - case .taken: - text = NSAttributedString(string: "\(addressName) is already taken.", textColor: UIColor(rgb: 0xcf3030)) + case .available: + string = NSAttributedString(string: text, textColor: theme.list.freeTextSuccessColor) + case .invalid, .taken: + string = NSAttributedString(string: text, textColor: theme.list.freeTextSuccessColor) } case .checking: - text = NSAttributedString(string: "Checking name...", textColor: UIColor(rgb: 0x6d6d72)) + string = NSAttributedString(string: text, textColor: theme.list.freeTextColor) displayActivity = true } - return ItemListActivityTextItem(displayActivity: displayActivity, theme: theme, text: text, sectionId: self.section) + return ItemListActivityTextItem(displayActivity: displayActivity, theme: theme, text: string, sectionId: self.section) } } } @@ -175,7 +162,30 @@ private func usernameSetupControllerEntries(presentationData: PresentationData, entries.append(.editablePublicLink(presentationData.theme, peer.addressName, currentAddressName)) if let status = state.addressNameValidationStatus { - entries.append(.publicLinkStatus(presentationData.theme, currentAddressName, status)) + let statusText: String + switch status { + case let .invalidFormat(error): + switch error { + case .startsWithDigit: + statusText = presentationData.strings.Username_InvalidStartsWithNumber + case .startsWithUnderscore, .endsWithUnderscore, .invalidCharacters: + statusText = presentationData.strings.Username_InvalidCharacters + case .tooShort: + statusText = presentationData.strings.Username_InvalidTooShort + } + case let .availability(availability): + switch availability { + case .available: + statusText = presentationData.strings.Username_UsernameIsAvailable(currentAddressName).0 + case .invalid: + statusText = presentationData.strings.Username_InvalidCharacters + case .taken: + statusText = presentationData.strings.Username_InvalidTaken + } + case .checking: + statusText = presentationData.strings.Username_CheckingUsername + } + entries.append(.publicLinkStatus(presentationData.theme, currentAddressName, status, statusText)) } entries.append(.publicLinkInfo(presentationData.theme, presentationData.strings.Username_Help)) } diff --git a/TelegramUI/VerticalListContextResultsChatInputPanelItem.swift b/TelegramUI/VerticalListContextResultsChatInputPanelItem.swift index c647e1bac6..10d7b35935 100644 --- a/TelegramUI/VerticalListContextResultsChatInputPanelItem.swift +++ b/TelegramUI/VerticalListContextResultsChatInputPanelItem.swift @@ -187,7 +187,7 @@ final class VerticalListContextResultsChatInputPanelItemNode: ListViewItemNode { } case let .internalReference(_, _, title, _, image, file, _): if let image = image { - imageResource = smallestImageRepresentation(image.representations)?.resource + imageResource = imageRepresentationLargerThan(image.representations, size: CGSize(width: 200.0, height: 200.0))?.resource } else if let file = file { imageResource = smallestImageRepresentation(file.previewRepresentations)?.resource } diff --git a/TelegramUI/WebController.swift b/TelegramUI/WebController.swift index ceee7f376a..7893c82120 100644 --- a/TelegramUI/WebController.swift +++ b/TelegramUI/WebController.swift @@ -12,7 +12,7 @@ final class WebController: ViewController { init(url: URL) { self.url = url - super.init(navigationBarTheme: nil) + super.init(navigationBarPresentationData: nil) self.edgesForExtendedLayout = [] } diff --git a/TelegramUI/ZoomableContentGalleryItemNode.swift b/TelegramUI/ZoomableContentGalleryItemNode.swift index 2e63b1e3c5..fb4ccb16bf 100644 --- a/TelegramUI/ZoomableContentGalleryItemNode.swift +++ b/TelegramUI/ZoomableContentGalleryItemNode.swift @@ -181,7 +181,7 @@ class ZoomableContentGalleryItemNode: GalleryItemNode, UIScrollViewDelegate { contentFrame.origin.x = 0.0 } - if boundsSize.height > contentFrame.size.height { + if boundsSize.height >= contentFrame.size.height { contentFrame.origin.y = (boundsSize.height - contentFrame.size.height) / 2.0 } else { contentFrame.origin.y = 0.0 @@ -190,8 +190,6 @@ class ZoomableContentGalleryItemNode: GalleryItemNode, UIScrollViewDelegate { if !self.ignoreZoom { transition.updateFrame(view: contentNode.view, frame: contentFrame) } - - //self.scrollView.scrollEnabled = ABS(_scrollView.zoomScale - _scrollView.normalZoomScale) > FLT_EPSILON; } func viewForZooming(in scrollView: UIScrollView) -> UIView? { diff --git a/third-party/FFmpeg-iOS/include/libavcodec/avcodec.h b/third-party/FFmpeg-iOS/include/libavcodec/avcodec.h index 6385220252..7e60333cb6 100644 --- a/third-party/FFmpeg-iOS/include/libavcodec/avcodec.h +++ b/third-party/FFmpeg-iOS/include/libavcodec/avcodec.h @@ -43,7 +43,9 @@ #include "version.h" /** - * @defgroup libavc Encoding/Decoding Library + * @defgroup libavc libavcodec + * Encoding/Decoding Library + * * @{ * * @defgroup lavc_decoding Decoding @@ -87,7 +89,7 @@ * - Send valid input: * - For decoding, call avcodec_send_packet() to give the decoder raw * compressed data in an AVPacket. - * - For encoding, call avcodec_send_frame() to give the decoder an AVFrame + * - For encoding, call avcodec_send_frame() to give the encoder an AVFrame * containing uncompressed audio or video. * In both cases, it is recommended that AVPackets and AVFrames are * refcounted, or libavcodec might have to copy the input data. (libavformat @@ -110,6 +112,12 @@ * are filled. This situation is handled transparently if you follow the steps * outlined above. * + * In theory, sending input can result in EAGAIN - this should happen only if + * not all output was received. You can use this to structure alternative decode + * or encode loops other than the one suggested above. For example, you could + * try sending new input on each iteration, and try to receive output if that + * returns EAGAIN. + * * End of stream situations. These require "flushing" (aka draining) the codec, * as the codec might buffer multiple frames or packets internally for * performance or out of necessity (consider B-frames). @@ -134,8 +142,9 @@ * * Not all codecs will follow a rigid and predictable dataflow; the only * guarantee is that an AVERROR(EAGAIN) return value on a send/receive call on - * one end implies that a receive/send call on the other end will succeed. In - * general, no codec will permit unlimited buffering of input or output. + * one end implies that a receive/send call on the other end will succeed, or + * at least will not fail with AVERROR(EAGAIN). In general, no codec will + * permit unlimited buffering of input or output. * * This API replaces the following legacy functions: * - avcodec_decode_video2() and avcodec_decode_audio4(): @@ -144,7 +153,8 @@ * Unlike with the old video decoding API, multiple frames might result from * a packet. For audio, splitting the input packet into frames by partially * decoding packets becomes transparent to the API user. You never need to - * feed an AVPacket to the API twice. + * feed an AVPacket to the API twice (unless it is rejected with AVERROR(EAGAIN) - then + * no data was read from the packet). * Additionally, sending a flush/draining packet is required only once. * - avcodec_encode_video2()/avcodec_encode_audio2(): * Use avcodec_send_frame() to feed input to the encoder, then use @@ -157,7 +167,22 @@ * and will result in undefined behavior. * * Some codecs might require using the new API; using the old API will return - * an error when calling it. + * an error when calling it. All codecs support the new API. + * + * A codec is not allowed to return AVERROR(EAGAIN) for both sending and receiving. This + * would be an invalid state, which could put the codec user into an endless + * loop. The API has no concept of time either: it cannot happen that trying to + * do avcodec_send_packet() results in AVERROR(EAGAIN), but a repeated call 1 second + * later accepts the packet (with no other receive/flush API calls involved). + * The API is a strict state machine, and the passage of time is not supposed + * to influence it. Some timing-dependent behavior might still be deemed + * acceptable in certain cases. But it must never result in both send/receive + * returning EAGAIN at the same time at any point. It must also absolutely be + * avoided that the current state is "unstable" and can "flip-flop" between + * the send/receive APIs allowing progress. For example, it's not allowed that + * the codec randomly decides that it actually wants to consume a packet now + * instead of returning a frame, after it just returned AVERROR(EAGAIN) on an + * avcodec_send_packet() call. * @} */ @@ -409,6 +434,20 @@ enum AVCodecID { AV_CODEC_ID_MAGICYUV, AV_CODEC_ID_SHEERVIDEO, AV_CODEC_ID_YLC, + AV_CODEC_ID_PSD, + AV_CODEC_ID_PIXLET, + AV_CODEC_ID_SPEEDHQ, + AV_CODEC_ID_FMVC, + AV_CODEC_ID_SCPR, + AV_CODEC_ID_CLEARVIDEO, + AV_CODEC_ID_XPM, + AV_CODEC_ID_AV1, + AV_CODEC_ID_BITPACKED, + AV_CODEC_ID_MSCC, + AV_CODEC_ID_SRGC, + AV_CODEC_ID_SVG, + AV_CODEC_ID_GDV, + AV_CODEC_ID_FITS, /* various PCM "codecs" */ AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs @@ -443,9 +482,11 @@ enum AVCodecID { AV_CODEC_ID_PCM_S24LE_PLANAR, AV_CODEC_ID_PCM_S32LE_PLANAR, AV_CODEC_ID_PCM_S16BE_PLANAR, - /* new PCM "codecs" should be added right below this line starting with - * an explicit value of for example 0x10800 - */ + + AV_CODEC_ID_PCM_S64LE = 0x10800, + AV_CODEC_ID_PCM_S64BE, + AV_CODEC_ID_PCM_F16LE, + AV_CODEC_ID_PCM_F24LE, /* various ADPCM codecs */ AV_CODEC_ID_ADPCM_IMA_QT = 0x11000, @@ -509,6 +550,7 @@ enum AVCodecID { AV_CODEC_ID_SOL_DPCM, AV_CODEC_ID_SDX2_DPCM = 0x14800, + AV_CODEC_ID_GREMLIN_DPCM, /* audio codecs */ AV_CODEC_ID_MP2 = 0x15000, @@ -596,6 +638,9 @@ enum AVCodecID { AV_CODEC_ID_XMA1, AV_CODEC_ID_XMA2, AV_CODEC_ID_DST, + AV_CODEC_ID_ATRAC3AL, + AV_CODEC_ID_ATRAC3PAL, + AV_CODEC_ID_DOLBY_E, /* subtitle codecs */ AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. @@ -629,6 +674,7 @@ enum AVCodecID { AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. AV_CODEC_ID_TTF = 0x18000, + AV_CODEC_ID_SCTE_35, ///< Contain timestamp estimated through PCR of program stream. AV_CODEC_ID_BINTEXT = 0x18800, AV_CODEC_ID_XBIN, AV_CODEC_ID_IDF, @@ -686,7 +732,7 @@ typedef struct AVCodecDescriptor { /** * Codec uses only intra compression. - * Video codecs only. + * Video and audio codecs only. */ #define AV_CODEC_PROP_INTRA_ONLY (1 << 0) /** @@ -1033,6 +1079,16 @@ typedef struct RcOverride{ * Audio encoder supports receiving a different number of samples in each call. */ #define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16) +/** + * Decoder is not a preferred choice for probing. + * This indicates that the decoder is not a good choice for probing. + * It could for example be an expensive to spin up hardware decoder, + * or it could simply not provide a lot of useful information about + * the stream. + * A decoder marked with this flag should only be used as last resort + * choice for probing. + */ +#define AV_CODEC_CAP_AVOID_PROBING (1 << 17) /** * Codec is intra only. */ @@ -1347,7 +1403,20 @@ typedef struct AVCPBProperties { * @{ */ enum AVPacketSideDataType { + /** + * An AV_PKT_DATA_PALETTE side data packet contains exactly AVPALETTE_SIZE + * bytes worth of palette. This side data signals that a new palette is + * present. + */ AV_PKT_DATA_PALETTE, + + /** + * The AV_PKT_DATA_NEW_EXTRADATA is used to notify the codec or the format + * that the extradata buffer was changed and the receiving side should + * act upon it appropriately. The new extradata is embedded in the side + * data buffer and should be immediately used for processing the current + * frame or packet. + */ AV_PKT_DATA_NEW_EXTRADATA, /** @@ -1512,10 +1581,40 @@ enum AVPacketSideDataType { /** * Mastering display metadata (based on SMPTE-2086:2014). This metadata - * should be associated with a video stream and containts data in the form + * should be associated with a video stream and contains data in the form * of the AVMasteringDisplayMetadata struct. */ - AV_PKT_DATA_MASTERING_DISPLAY_METADATA + AV_PKT_DATA_MASTERING_DISPLAY_METADATA, + + /** + * This side data should be associated with a video stream and corresponds + * to the AVSphericalMapping structure. + */ + AV_PKT_DATA_SPHERICAL, + + /** + * Content light level (based on CTA-861.3). This metadata should be + * associated with a video stream and contains data in the form of the + * AVContentLightMetadata struct. + */ + AV_PKT_DATA_CONTENT_LIGHT_LEVEL, + + /** + * ATSC A53 Part 4 Closed Captions. This metadata should be associated with + * a video stream. A53 CC bitstream is stored as uint8_t in AVPacketSideData.data. + * The number of bytes of CC data is AVPacketSideData.size. + */ + AV_PKT_DATA_A53_CC, + + /** + * The number of side data elements (in fact a bit more than it). + * This is not part of the public API/ABI in the sense that it may + * change when new side data types are added. + * This must stay the last enum value. + * If its value becomes huge, some code using it + * needs to be updated as it assumes it to be smaller than other limits. + */ + AV_PKT_DATA_NB }; #define AV_PKT_DATA_QUALITY_FACTOR AV_PKT_DATA_QUALITY_STATS //DEPRECATED @@ -1611,6 +1710,19 @@ typedef struct AVPacket { } AVPacket; #define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe #define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted +/** + * Flag is used to discard packets which are required to maintain valid + * decoder state but are not required for output and should be dropped + * after decoding. + **/ +#define AV_PKT_FLAG_DISCARD 0x0004 +/** + * The packet comes from a trusted source. + * + * Otherwise-unsafe constructs such as arbitrary pointers to data + * outside the packet may be followed. + */ +#define AV_PKT_FLAG_TRUSTED 0x0008 enum AVSideDataParamChangeFlags { AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001, @@ -1638,7 +1750,7 @@ enum AVFieldOrder { * New fields can be added to the end with minor version bumps. * Removal, reordering and changes to existing fields require a major * version bump. - * Please use AVOptions (av_opt* / av_set/get*()) to access these fields from user + * You can use AVOptions (av_opt* / av_set/get*()) to access these fields from user * applications. * The name string for AVOptions options matches the associated command line * parameter name and can be found in libavcodec/options_table.h @@ -2083,22 +2195,23 @@ typedef struct AVCodecContext { * - decoding: unused */ int ildct_cmp; -#define FF_CMP_SAD 0 -#define FF_CMP_SSE 1 -#define FF_CMP_SATD 2 -#define FF_CMP_DCT 3 -#define FF_CMP_PSNR 4 -#define FF_CMP_BIT 5 -#define FF_CMP_RD 6 -#define FF_CMP_ZERO 7 -#define FF_CMP_VSAD 8 -#define FF_CMP_VSSE 9 -#define FF_CMP_NSSE 10 -#define FF_CMP_W53 11 -#define FF_CMP_W97 12 -#define FF_CMP_DCTMAX 13 -#define FF_CMP_DCT264 14 -#define FF_CMP_CHROMA 256 +#define FF_CMP_SAD 0 +#define FF_CMP_SSE 1 +#define FF_CMP_SATD 2 +#define FF_CMP_DCT 3 +#define FF_CMP_PSNR 4 +#define FF_CMP_BIT 5 +#define FF_CMP_RD 6 +#define FF_CMP_ZERO 7 +#define FF_CMP_VSAD 8 +#define FF_CMP_VSSE 9 +#define FF_CMP_NSSE 10 +#define FF_CMP_W53 11 +#define FF_CMP_W97 12 +#define FF_CMP_DCTMAX 13 +#define FF_CMP_DCT264 14 +#define FF_CMP_MEDIAN_SAD 15 +#define FF_CMP_CHROMA 256 /** * ME diamond size & shape @@ -2577,6 +2690,7 @@ typedef struct AVCodecContext { * - encoding: unused * - decoding: set by the caller before avcodec_open2(). */ + attribute_deprecated int refcounted_frames; /* - encoding parameters */ @@ -2850,6 +2964,7 @@ typedef struct AVCodecContext { #define FF_BUG_DC_CLIP 4096 #define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders. #define FF_BUG_TRUNCATED 16384 +#define FF_BUG_IEDGE 32768 /** * strictly follow the standard (MPEG-4, ...). @@ -2907,8 +3022,8 @@ typedef struct AVCodecContext { #define FF_DEBUG_MMCO 0x00000800 #define FF_DEBUG_BUGS 0x00001000 #if FF_API_DEBUG_MV -#define FF_DEBUG_VIS_QP 0x00002000 ///< only access through AVOptions from outside libavcodec -#define FF_DEBUG_VIS_MB_TYPE 0x00004000 ///< only access through AVOptions from outside libavcodec +#define FF_DEBUG_VIS_QP 0x00002000 +#define FF_DEBUG_VIS_MB_TYPE 0x00004000 #endif #define FF_DEBUG_BUFFERS 0x00008000 #define FF_DEBUG_THREADS 0x00010000 @@ -2918,7 +3033,6 @@ typedef struct AVCodecContext { #if FF_API_DEBUG_MV /** * debug - * Code outside libavcodec should access this field using AVOptions * - encoding: Set by user. * - decoding: Set by user. */ @@ -3032,6 +3146,7 @@ typedef struct AVCodecContext { #if FF_API_ARCH_ALPHA #define FF_IDCT_SIMPLEALPHA 23 #endif +#define FF_IDCT_NONE 24 /* Used by XvMC to extract IDCT coefficients with FF_IDCT_PERM_NONE */ #define FF_IDCT_SIMPLEAUTO 128 /** @@ -3053,8 +3168,6 @@ typedef struct AVCodecContext { * low resolution decoding, 1-> 1/2 size, 2->1/4 size * - encoding: unused * - decoding: Set by user. - * Code outside libavcodec should access this field using: - * av_codec_{get,set}_lowres(avctx) */ int lowres; #endif @@ -3165,6 +3278,13 @@ typedef struct AVCodecContext { #define FF_PROFILE_MPEG2_AAC_LOW 128 #define FF_PROFILE_MPEG2_AAC_HE 131 +#define FF_PROFILE_DNXHD 0 +#define FF_PROFILE_DNXHR_LB 1 +#define FF_PROFILE_DNXHR_SQ 2 +#define FF_PROFILE_DNXHR_HQ 3 +#define FF_PROFILE_DNXHR_HQX 4 +#define FF_PROFILE_DNXHR_444 5 + #define FF_PROFILE_DTS 20 #define FF_PROFILE_DTS_ES 30 #define FF_PROFILE_DTS_96_24 40 @@ -3189,8 +3309,10 @@ typedef struct AVCodecContext { #define FF_PROFILE_H264_HIGH 100 #define FF_PROFILE_H264_HIGH_10 110 #define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_MULTIVIEW_HIGH 118 #define FF_PROFILE_H264_HIGH_422 122 #define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_STEREO_HIGH 128 #define FF_PROFILE_H264_HIGH_444 144 #define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244 #define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA) @@ -3346,8 +3468,6 @@ typedef struct AVCodecContext { /** * Timebase in which pkt_dts/pts and AVPacket.dts/pts are. - * Code outside libavcodec should access this field using: - * av_codec_{get,set}_pkt_timebase(avctx) * - encoding unused. * - decoding set by user. */ @@ -3355,8 +3475,6 @@ typedef struct AVCodecContext { /** * AVCodecDescriptor - * Code outside libavcodec should access this field using: - * av_codec_{get,set}_codec_descriptor(avctx) * - encoding: unused. * - decoding: set by libavcodec. */ @@ -3367,8 +3485,6 @@ typedef struct AVCodecContext { * low resolution decoding, 1-> 1/2 size, 2->1/4 size * - encoding: unused * - decoding: Set by user. - * Code outside libavcodec should access this field using: - * av_codec_{get,set}_lowres(avctx) */ int lowres; #endif @@ -3409,7 +3525,6 @@ typedef struct AVCodecContext { * However for formats that do not use pre-multiplied alpha * there might be serious artefacts (though e.g. libswscale currently * assumes pre-multiplied alpha anyway). - * Code outside libavcodec should access this field using AVOptions * * - decoding: set by user * - encoding: unused @@ -3426,7 +3541,6 @@ typedef struct AVCodecContext { #if !FF_API_DEBUG_MV /** * debug motion vectors - * Code outside libavcodec should access this field using AVOptions * - encoding: Set by user. * - decoding: Set by user. */ @@ -3438,7 +3552,6 @@ typedef struct AVCodecContext { /** * custom intra quantization matrix - * Code outside libavcodec should access this field using av_codec_g/set_chroma_intra_matrix() * - encoding: Set by user, can be NULL. * - decoding: unused. */ @@ -3447,8 +3560,6 @@ typedef struct AVCodecContext { /** * dump format separator. * can be ", " or "\n " or anything else - * Code outside libavcodec should access this field using AVOptions - * (NO direct access). * - encoding: Set by user. * - decoding: Set by user. */ @@ -3458,13 +3569,12 @@ typedef struct AVCodecContext { * ',' separated list of allowed decoders. * If NULL then all are allowed * - encoding: unused - * - decoding: set by user through AVOPtions (NO direct access) + * - decoding: set by user */ char *codec_whitelist; - /* + /** * Properties of the stream that gets decoded - * To be accessed through av_codec_get_properties() (NO direct access) * - encoding: unused * - decoding: set by libavcodec */ @@ -3482,15 +3592,26 @@ typedef struct AVCodecContext { int nb_coded_side_data; /** - * Encoding only. + * A reference to the AVHWFramesContext describing the input (for encoding) + * or output (decoding) frames. The reference is set by the caller and + * afterwards owned (and freed) by libavcodec - it should never be read by + * the caller after being set. * - * For hardware encoders configured to use a hwaccel pixel format, this - * field should be set by the caller to a reference to the AVHWFramesContext - * describing input frames. AVHWFramesContext.format must be equal to - * AVCodecContext.pix_fmt. + * - decoding: This field should be set by the caller from the get_format() + * callback. The previous reference (if any) will always be + * unreffed by libavcodec before the get_format() call. * - * This field should be set before avcodec_open2() is called and is - * afterwards owned and managed by libavcodec. + * If the default get_buffer2() is used with a hwaccel pixel + * format, then this AVHWFramesContext will be used for + * allocating the frame buffers. + * + * - encoding: For hardware encoders configured to use a hwaccel pixel + * format, this field should be set by the caller to a reference + * to the AVHWFramesContext describing input frames. + * AVHWFramesContext.format must be equal to + * AVCodecContext.pix_fmt. + * + * This field should be set before avcodec_open2() is called. */ AVBufferRef *hw_frames_ctx; @@ -3505,6 +3626,82 @@ typedef struct AVCodecContext { #define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS 1 #endif + /** + * Audio only. The amount of padding (in samples) appended by the encoder to + * the end of the audio. I.e. this number of decoded samples must be + * discarded by the caller from the end of the stream to get the original + * audio without any trailing padding. + * + * - decoding: unused + * - encoding: unused + */ + int trailing_padding; + + /** + * The number of pixels per image to maximally accept. + * + * - decoding: set by user + * - encoding: set by user + */ + int64_t max_pixels; + + /** + * A reference to the AVHWDeviceContext describing the device which will + * be used by a hardware encoder/decoder. The reference is set by the + * caller and afterwards owned (and freed) by libavcodec. + * + * This should be used if either the codec device does not require + * hardware frames or any that are used are to be allocated internally by + * libavcodec. If the user wishes to supply any of the frames used as + * encoder input or decoder output then hw_frames_ctx should be used + * instead. When hw_frames_ctx is set in get_format() for a decoder, this + * field will be ignored while decoding the associated stream segment, but + * may again be used on a following one after another get_format() call. + * + * For both encoders and decoders this field should be set before + * avcodec_open2() is called and must not be written to thereafter. + * + * Note that some decoders may require this field to be set initially in + * order to support hw_frames_ctx at all - in that case, all frames + * contexts used must be created on the same device. + */ + AVBufferRef *hw_device_ctx; + + /** + * Bit set of AV_HWACCEL_FLAG_* flags, which affect hardware accelerated + * decoding (if active). + * - encoding: unused + * - decoding: Set by user (either before avcodec_open2(), or in the + * AVCodecContext.get_format callback) + */ + int hwaccel_flags; + + /** + * Video decoding only. Certain video codecs support cropping, meaning that + * only a sub-rectangle of the decoded frame is intended for display. This + * option controls how cropping is handled by libavcodec. + * + * When set to 1 (the default), libavcodec will apply cropping internally. + * I.e. it will modify the output frame width/height fields and offset the + * data pointers (only by as much as possible while preserving alignment, or + * by the full amount if the AV_CODEC_FLAG_UNALIGNED flag is set) so that + * the frames output by the decoder refer only to the cropped area. The + * crop_* fields of the output frames will be zero. + * + * When set to 0, the width/height fields of the output frames will be set + * to the coded dimensions and the crop_* fields will describe the cropping + * rectangle. Applying the cropping is left to the caller. + * + * @warning When hardware acceleration with opaque output frames is used, + * libavcodec is unable to apply cropping from the top/left border. + * + * @note when this option is set to zero, the width/height fields of the + * AVCodecContext and output AVFrames have different meanings. The codec + * context fields store display dimensions (with the coded dimensions in + * coded_width/height), while the frame fields store the coded dimensions + * (with the display dimensions being determined by the crop_* fields). + */ + int apply_cropping; } AVCodecContext; AVRational av_codec_get_pkt_timebase (const AVCodecContext *avctx); @@ -3564,7 +3761,7 @@ typedef struct AVCodec { const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0 const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1 const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0 - uint8_t max_lowres; ///< maximum value for lowres supported by the decoder, no direct access, use av_codec_get_max_lowres() + uint8_t max_lowres; ///< maximum value for lowres supported by the decoder const AVClass *priv_class; ///< AVClass for the private context const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN} @@ -3625,20 +3822,22 @@ typedef struct AVCodec { int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt); int (*close)(AVCodecContext *); /** - * Decode/encode API with decoupled packet/frame dataflow. The API is the + * Encode API with decoupled packet/frame dataflow. The API is the * same as the avcodec_ prefixed APIs (avcodec_send_frame() etc.), except * that: * - never called if the codec is closed or the wrong type, - * - AVPacket parameter change side data is applied right before calling - * AVCodec->send_packet, - * - if AV_CODEC_CAP_DELAY is not set, drain packets or frames are never sent, - * - only one drain packet is ever passed down (until the next flush()), - * - a drain AVPacket is always NULL (no need to check for avpkt->size). + * - if AV_CODEC_CAP_DELAY is not set, drain frames are never sent, + * - only one drain frame is ever passed down, */ int (*send_frame)(AVCodecContext *avctx, const AVFrame *frame); - int (*send_packet)(AVCodecContext *avctx, const AVPacket *avpkt); - int (*receive_frame)(AVCodecContext *avctx, AVFrame *frame); int (*receive_packet)(AVCodecContext *avctx, AVPacket *avpkt); + + /** + * Decode API with decoupled packet/frame dataflow. This function is called + * to get one output frame. It should call ff_decode_get_packet() to obtain + * input data. + */ + int (*receive_frame)(AVCodecContext *avctx, AVFrame *frame); /** * Flush buffers. * Will be called when seeking @@ -3649,6 +3848,12 @@ typedef struct AVCodec { * See FF_CODEC_CAP_* in internal.h */ int caps_internal; + + /** + * Decoding only, a comma-separated list of bitstream filters to apply to + * packets before decoding. + */ + const char *bsfs; } AVCodec; int av_codec_get_max_lowres(const AVCodec *codec); @@ -3690,7 +3895,7 @@ typedef struct AVHWAccel { /** * Hardware accelerated codec capabilities. - * see HWACCEL_CODEC_CAP_* + * see AV_HWACCEL_CODEC_CAP_* */ int capabilities; @@ -3761,7 +3966,7 @@ typedef struct AVHWAccel { /** * Called for every Macroblock in a slice. * - * XvMC uses it to replace the ff_mpv_decode_mb(). + * XvMC uses it to replace the ff_mpv_reconstruct_mb(). * Instead of decoding to raw picture, MB parameters are * stored in an array provided by the video driver. * @@ -3791,8 +3996,19 @@ typedef struct AVHWAccel { * AVCodecInternal.hwaccel_priv_data. */ int priv_data_size; + + /** + * Internal hwaccel capabilities. + */ + int caps_internal; } AVHWAccel; +/** + * HWAccel is experimental and is thus avoided in favor of non experimental + * codecs + */ +#define AV_HWACCEL_CODEC_CAP_EXPERIMENTAL 0x0200 + /** * Hardware acceleration should be used for decoding even if the codec level * used is unknown or higher than the maximum supported level reported by the @@ -3809,6 +4025,20 @@ typedef struct AVHWAccel { */ #define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH (1 << 1) +/** + * Hardware acceleration should still be attempted for decoding when the + * codec profile does not match the reported capabilities of the hardware. + * + * For example, this can be used to try to decode baseline profile H.264 + * streams in hardware - it will often succeed, because many streams marked + * as baseline profile actually conform to constrained baseline profile. + * + * @warning If the stream is actually not supported then the behaviour is + * undefined, and may include returning entirely incorrect output + * while indicating success. + */ +#define AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH (1 << 2) + /** * @} */ @@ -4318,13 +4548,13 @@ AVPacket *av_packet_alloc(void); * @see av_packet_alloc * @see av_packet_ref */ -AVPacket *av_packet_clone(AVPacket *src); +AVPacket *av_packet_clone(const AVPacket *src); /** * Free the packet, if the packet is reference counted, it will be * unreferenced first. * - * @param packet packet to be freed. The pointer will be set to NULL. + * @param pkt packet to be freed. The pointer will be set to NULL. * @note passing NULL is a no-op. */ void av_packet_free(AVPacket **pkt); @@ -4393,14 +4623,20 @@ int av_dup_packet(AVPacket *pkt); * Copy packet, including contents * * @return 0 on success, negative AVERROR on fail + * + * @deprecated Use av_packet_ref */ +attribute_deprecated int av_copy_packet(AVPacket *dst, const AVPacket *src); /** * Copy packet side data * * @return 0 on success, negative AVERROR on fail + * + * @deprecated Use av_packet_copy_props */ +attribute_deprecated int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src); /** @@ -4459,12 +4695,16 @@ int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, * @param size pointer for side information size to store (optional) * @return pointer to data if present or NULL otherwise */ -uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, +uint8_t* av_packet_get_side_data(const AVPacket *pkt, enum AVPacketSideDataType type, int *size); +#if FF_API_MERGE_SD_API +attribute_deprecated int av_packet_merge_side_data(AVPacket *pkt); +attribute_deprecated int av_packet_split_side_data(AVPacket *pkt); +#endif const char *av_packet_side_data_name(enum AVPacketSideDataType type); @@ -4764,13 +5004,13 @@ int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, * and reusing a get_buffer written for video codecs would probably perform badly * due to a potentially very different allocation pattern. * - * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input + * Some decoders (those marked with AV_CODEC_CAP_DELAY) have a delay between input * and output. This means that for some packets they will not immediately * produce decoded output and need to be flushed at the end of decoding to get * all the decoded data. Flushing is done by calling this function with packets * with avpkt->data set to NULL and avpkt->size set to 0 until it stops * returning subtitles. It is safe to flush even those decoders that are not - * marked with CODEC_CAP_DELAY, then no subtitles will be returned. + * marked with AV_CODEC_CAP_DELAY, then no subtitles will be returned. * * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() * before packets may be fed to the decoder. @@ -4824,8 +5064,10 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, * a flush packet. * * @return 0 on success, otherwise negative error code: - * AVERROR(EAGAIN): input is not accepted right now - the packet must be - * resent after trying to read output + * AVERROR(EAGAIN): input is not accepted in the current state - user + * must read output with avcodec_receive_frame() (once + * all output is read, the packet should be resent, and + * the call will not fail with EAGAIN). * AVERROR_EOF: the decoder has been flushed, and no new packets can * be sent to it (also returned if more than 1 flush * packet is sent) @@ -4846,7 +5088,7 @@ int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt); * * @return * 0: success, a frame was returned - * AVERROR(EAGAIN): output is not available right now - user must try + * AVERROR(EAGAIN): output is not available in this state - user must try * to send new input * AVERROR_EOF: the decoder has been fully flushed, and there will be * no more output frames @@ -4879,8 +5121,10 @@ int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame); * avctx->frame_size for all frames except the last. * The final frame may be smaller than avctx->frame_size. * @return 0 on success, otherwise negative error code: - * AVERROR(EAGAIN): input is not accepted right now - the frame must be - * resent after trying to read output packets + * AVERROR(EAGAIN): input is not accepted in the current state - user + * must read output with avcodec_receive_packet() (once + * all output is read, the packet should be resent, and + * the call will not fail with EAGAIN). * AVERROR_EOF: the encoder has been flushed, and no new frames can * be sent to it * AVERROR(EINVAL): codec not opened, refcounted_frames not set, it is a @@ -4898,8 +5142,8 @@ int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame); * encoder. Note that the function will always call * av_frame_unref(frame) before doing anything else. * @return 0 on success, otherwise negative error code: - * AVERROR(EAGAIN): output is not available right now - user must try - * to send input + * AVERROR(EAGAIN): output is not available in the current state - user + * must try to send input * AVERROR_EOF: the encoder has been fully flushed, and there will be * no more output packets * AVERROR(EINVAL): codec not opened, or it is an encoder @@ -5115,7 +5359,10 @@ AVCodecParserContext *av_parser_init(int codec_id); * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished. * @param poutbuf_size set to size of parsed buffer or zero if not yet finished. * @param buf input buffer. - * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output). + * @param buf_size buffer size in bytes without the padding. I.e. the full buffer + size is assumed to be buf_size + AV_INPUT_BUFFER_PADDING_SIZE. + To signal EOF, this should be 0 (so that the last frame + can be output). * @param pts input presentation timestamp. * @param dts input decoding timestamp. * @param pos input byte position in stream. @@ -5447,22 +5694,14 @@ int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, * @{ */ +#if FF_API_GETCHROMA /** - * Utility function to access log2_chroma_w log2_chroma_h from - * the pixel format AVPixFmtDescriptor. - * - * This function asserts that pix_fmt is valid. See av_pix_fmt_get_chroma_sub_sample - * for one that returns a failure code and continues in case of invalid - * pix_fmts. - * - * @param[in] pix_fmt the pixel format - * @param[out] h_shift store log2_chroma_w - * @param[out] v_shift store log2_chroma_h - * - * @see av_pix_fmt_get_chroma_sub_sample + * @deprecated Use av_pix_fmt_get_chroma_sub_sample */ +attribute_deprecated void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift); +#endif /** * Return a value representing the fourCC code associated to the @@ -5505,15 +5744,8 @@ enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); attribute_deprecated -#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI -enum AVPixelFormat avcodec_find_best_pix_fmt2(const enum AVPixelFormat *pix_fmt_list, - enum AVPixelFormat src_pix_fmt, - int has_alpha, int *loss_ptr); -#else enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); -#endif - enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt); @@ -5529,6 +5761,7 @@ attribute_deprecated void avcodec_set_dimensions(AVCodecContext *s, int width, int height); #endif +#if FF_API_TAG_STRING /** * Put a string representing the codec tag codec_tag in buf. * @@ -5537,8 +5770,12 @@ void avcodec_set_dimensions(AVCodecContext *s, int width, int height); * @param codec_tag codec tag to assign * @return the length of the string that would have been generated if * enough space had been available, excluding the trailing null + * + * @deprecated see av_fourcc_make_string() and av_fourcc2str(). */ +attribute_deprecated size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag); +#endif void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); @@ -5651,7 +5888,7 @@ int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes); #if FF_API_OLD_BSF typedef struct AVBitStreamFilterContext { void *priv_data; - struct AVBitStreamFilter *filter; + const struct AVBitStreamFilter *filter; AVCodecParserContext *parser; struct AVBitStreamFilterContext *next; /** @@ -5698,12 +5935,15 @@ typedef struct AVBSFContext { void *priv_data; /** - * Parameters of the input stream. Set by the caller before av_bsf_init(). + * Parameters of the input stream. This field is allocated in + * av_bsf_alloc(), it needs to be filled by the caller before + * av_bsf_init(). */ AVCodecParameters *par_in; /** - * Parameters of the output stream. Set by the filter in av_bsf_init(). + * Parameters of the output stream. This field is allocated in + * av_bsf_alloc(), it is set by the filter in av_bsf_init(). */ AVCodecParameters *par_out; @@ -5931,6 +6171,91 @@ void av_bsf_free(AVBSFContext **ctx); */ const AVClass *av_bsf_get_class(void); +/** + * Structure for chain/list of bitstream filters. + * Empty list can be allocated by av_bsf_list_alloc(). + */ +typedef struct AVBSFList AVBSFList; + +/** + * Allocate empty list of bitstream filters. + * The list must be later freed by av_bsf_list_free() + * or finalized by av_bsf_list_finalize(). + * + * @return Pointer to @ref AVBSFList on success, NULL in case of failure + */ +AVBSFList *av_bsf_list_alloc(void); + +/** + * Free list of bitstream filters. + * + * @param lst Pointer to pointer returned by av_bsf_list_alloc() + */ +void av_bsf_list_free(AVBSFList **lst); + +/** + * Append bitstream filter to the list of bitstream filters. + * + * @param lst List to append to + * @param bsf Filter context to be appended + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int av_bsf_list_append(AVBSFList *lst, AVBSFContext *bsf); + +/** + * Construct new bitstream filter context given it's name and options + * and append it to the list of bitstream filters. + * + * @param lst List to append to + * @param bsf_name Name of the bitstream filter + * @param options Options for the bitstream filter, can be set to NULL + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int av_bsf_list_append2(AVBSFList *lst, const char * bsf_name, AVDictionary **options); +/** + * Finalize list of bitstream filters. + * + * This function will transform @ref AVBSFList to single @ref AVBSFContext, + * so the whole chain of bitstream filters can be treated as single filter + * freshly allocated by av_bsf_alloc(). + * If the call is successful, @ref AVBSFList structure is freed and lst + * will be set to NULL. In case of failure, caller is responsible for + * freeing the structure by av_bsf_list_free() + * + * @param lst Filter list structure to be transformed + * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure + * representing the chain of bitstream filters + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int av_bsf_list_finalize(AVBSFList **lst, AVBSFContext **bsf); + +/** + * Parse string describing list of bitstream filters and create single + * @ref AVBSFContext describing the whole chain of bitstream filters. + * Resulting @ref AVBSFContext can be treated as any other @ref AVBSFContext freshly + * allocated by av_bsf_alloc(). + * + * @param str String describing chain of bitstream filters in format + * `bsf1[=opt1=val1:opt2=val2][,bsf2]` + * @param[out] bsf Pointer to be set to newly created @ref AVBSFContext structure + * representing the chain of bitstream filters + * + * @return >=0 on success, negative AVERROR in case of failure + */ +int av_bsf_list_parse_str(const char *str, AVBSFContext **bsf); + +/** + * Get null/pass-through bitstream filter. + * + * @param[out] bsf Pointer to be set to new instance of pass-through bitstream filter + * + * @return + */ +int av_bsf_get_null_filter(AVBSFContext **bsf); + /* memory */ /** diff --git a/third-party/FFmpeg-iOS/include/libavcodec/avdct.h b/third-party/FFmpeg-iOS/include/libavcodec/avdct.h index 59408f8e71..8ddf4a5512 100644 --- a/third-party/FFmpeg-iOS/include/libavcodec/avdct.h +++ b/third-party/FFmpeg-iOS/include/libavcodec/avdct.h @@ -19,7 +19,7 @@ #ifndef AVCODEC_AVDCT_H #define AVCODEC_AVDCT_H -#include "../libavutilopt.h" +#include "../libavutil/opt.h" /** * AVDCT context. diff --git a/third-party/FFmpeg-iOS/include/libavcodec/dv_profile.h b/third-party/FFmpeg-iOS/include/libavcodec/dv_profile.h index 121cccfb03..266e24fcb5 100644 --- a/third-party/FFmpeg-iOS/include/libavcodec/dv_profile.h +++ b/third-party/FFmpeg-iOS/include/libavcodec/dv_profile.h @@ -21,8 +21,8 @@ #include -#include "../libavutilpixfmt.h" -#include "../libavutilrational.h" +#include "../libavutil/pixfmt.h" +#include "../libavutil/rational.h" #include "avcodec.h" /* minimum number of bytes to read from a DV stream in order to diff --git a/third-party/FFmpeg-iOS/include/libavcodec/mediacodec.h b/third-party/FFmpeg-iOS/include/libavcodec/mediacodec.h new file mode 100644 index 0000000000..87bcfeb2d6 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/mediacodec.h @@ -0,0 +1,88 @@ +/* + * Android MediaCodec public API + * + * Copyright (c) 2016 Matthieu Bouron + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_MEDIACODEC_H +#define AVCODEC_MEDIACODEC_H + +#include "../libavcodec/avcodec.h" + +/** + * This structure holds a reference to a android/view/Surface object that will + * be used as output by the decoder. + * + */ +typedef struct AVMediaCodecContext { + + /** + * android/view/Surface object reference. + */ + void *surface; + +} AVMediaCodecContext; + +/** + * Allocate and initialize a MediaCodec context. + * + * When decoding with MediaCodec is finished, the caller must free the + * MediaCodec context with av_mediacodec_default_free. + * + * @return a pointer to a newly allocated AVMediaCodecContext on success, NULL otherwise + */ +AVMediaCodecContext *av_mediacodec_alloc_context(void); + +/** + * Convenience function that sets up the MediaCodec context. + * + * @param avctx codec context + * @param ctx MediaCodec context to initialize + * @param surface reference to an android/view/Surface + * @return 0 on success, < 0 otherwise + */ +int av_mediacodec_default_init(AVCodecContext *avctx, AVMediaCodecContext *ctx, void *surface); + +/** + * This function must be called to free the MediaCodec context initialized with + * av_mediacodec_default_init(). + * + * @param avctx codec context + */ +void av_mediacodec_default_free(AVCodecContext *avctx); + +/** + * Opaque structure representing a MediaCodec buffer to render. + */ +typedef struct MediaCodecBuffer AVMediaCodecBuffer; + +/** + * Release a MediaCodec buffer and render it to the surface that is associated + * with the decoder. This function should only be called once on a given + * buffer, once released the underlying buffer returns to the codec, thus + * subsequent calls to this function will have no effect. + * + * @param buffer the buffer to render + * @param render 1 to release and render the buffer to the surface or 0 to + * discard the buffer + * @return 0 on success, < 0 otherwise + */ +int av_mediacodec_release_buffer(AVMediaCodecBuffer *buffer, int render); + +#endif /* AVCODEC_MEDIACODEC_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/qsv.h b/third-party/FFmpeg-iOS/include/libavcodec/qsv.h index 1fe9a54bed..227a79022d 100644 --- a/third-party/FFmpeg-iOS/include/libavcodec/qsv.h +++ b/third-party/FFmpeg-iOS/include/libavcodec/qsv.h @@ -23,7 +23,7 @@ #include -#include "../libavutilbuffer.h" +#include "../libavutil/buffer.h" /** * This struct is used for communicating QSV parameters between libavcodec and diff --git a/third-party/FFmpeg-iOS/include/libavcodec/vaapi.h b/third-party/FFmpeg-iOS/include/libavcodec/vaapi.h index 1a64817b51..0c1879e596 100644 --- a/third-party/FFmpeg-iOS/include/libavcodec/vaapi.h +++ b/third-party/FFmpeg-iOS/include/libavcodec/vaapi.h @@ -31,9 +31,11 @@ */ #include -#include "../libavutilattributes.h" +#include "../libavutil/attributes.h" #include "version.h" +#if FF_API_STRUCT_VAAPI_CONTEXT + /** * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding * @ingroup lavc_codec_hwaccel @@ -48,8 +50,10 @@ * during initialization or through each AVCodecContext.get_buffer() * function call. In any case, they must be valid prior to calling * decoding functions. + * + * Deprecated: use AVCodecContext.hw_frames_ctx instead. */ -struct vaapi_context { +struct attribute_deprecated vaapi_context { /** * Window system dependent data * @@ -186,4 +190,6 @@ struct vaapi_context { /* @} */ +#endif /* FF_API_STRUCT_VAAPI_CONTEXT */ + #endif /* AVCODEC_VAAPI_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/vda.h b/third-party/FFmpeg-iOS/include/libavcodec/vda.h index cb840b0021..275ff8386f 100644 --- a/third-party/FFmpeg-iOS/include/libavcodec/vda.h +++ b/third-party/FFmpeg-iOS/include/libavcodec/vda.h @@ -29,7 +29,7 @@ * Public libavcodec VDA header. */ -#include "../libavcodecavcodec.h" +#include "../libavcodec/avcodec.h" #include @@ -41,7 +41,7 @@ #include #undef Picture -#include "../libavcodecversion.h" +#include "../libavcodec/version.h" // extra flags not defined in VDADecoder.h enum { diff --git a/third-party/FFmpeg-iOS/include/libavcodec/vdpau.h b/third-party/FFmpeg-iOS/include/libavcodec/vdpau.h index 5f3dbdc328..afcda19a04 100644 --- a/third-party/FFmpeg-iOS/include/libavcodec/vdpau.h +++ b/third-party/FFmpeg-iOS/include/libavcodec/vdpau.h @@ -50,9 +50,9 @@ */ #include -#include -#include "../libavutilavconfig.h" -#include "../libavutilattributes.h" + +#include "../libavutil/avconfig.h" +#include "../libavutil/attributes.h" #include "avcodec.h" #include "version.h" diff --git a/third-party/FFmpeg-iOS/include/libavcodec/version.h b/third-party/FFmpeg-iOS/include/libavcodec/version.h index d57d3cb6ed..3c884f1db6 100644 --- a/third-party/FFmpeg-iOS/include/libavcodec/version.h +++ b/third-party/FFmpeg-iOS/include/libavcodec/version.h @@ -28,8 +28,8 @@ #include "../libavutil/version.h" #define LIBAVCODEC_VERSION_MAJOR 57 -#define LIBAVCODEC_VERSION_MINOR 48 -#define LIBAVCODEC_VERSION_MICRO 101 +#define LIBAVCODEC_VERSION_MINOR 107 +#define LIBAVCODEC_VERSION_MICRO 100 #define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ LIBAVCODEC_VERSION_MINOR, \ @@ -60,9 +60,6 @@ #ifndef FF_API_AVCODEC_RESAMPLE #define FF_API_AVCODEC_RESAMPLE FF_API_AUDIO_CONVERT #endif -#ifndef FF_API_GETCHROMA -#define FF_API_GETCHROMA (LIBAVCODEC_VERSION_MAJOR < 58) -#endif #ifndef FF_API_MISSING_SAMPLE #define FF_API_MISSING_SAMPLE (LIBAVCODEC_VERSION_MAJOR < 58) #endif @@ -157,6 +154,9 @@ #ifndef FF_API_VAAPI_CONTEXT #define FF_API_VAAPI_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 58) #endif +#ifndef FF_API_MERGE_SD +#define FF_API_MERGE_SD (LIBAVCODEC_VERSION_MAJOR < 58) +#endif #ifndef FF_API_AVCTX_TIMEBASE #define FF_API_AVCTX_TIMEBASE (LIBAVCODEC_VERSION_MAJOR < 59) #endif @@ -226,5 +226,18 @@ #ifndef FF_API_NVENC_OLD_NAME #define FF_API_NVENC_OLD_NAME (LIBAVCODEC_VERSION_MAJOR < 59) #endif +#ifndef FF_API_STRUCT_VAAPI_CONTEXT +#define FF_API_STRUCT_VAAPI_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_MERGE_SD_API +#define FF_API_MERGE_SD_API (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_TAG_STRING +#define FF_API_TAG_STRING (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_GETCHROMA +#define FF_API_GETCHROMA (LIBAVCODEC_VERSION_MAJOR < 59) +#endif + #endif /* AVCODEC_VERSION_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/videotoolbox.h b/third-party/FFmpeg-iOS/include/libavcodec/videotoolbox.h index f3ffa8754d..d126d43c72 100644 --- a/third-party/FFmpeg-iOS/include/libavcodec/videotoolbox.h +++ b/third-party/FFmpeg-iOS/include/libavcodec/videotoolbox.h @@ -35,7 +35,7 @@ #include #undef Picture -#include "../libavcodecavcodec.h" +#include "../libavcodec/avcodec.h" /** * This struct holds all the information that needs to be passed @@ -58,7 +58,8 @@ typedef struct AVVideotoolboxContext { /** * CVPixelBuffer Format Type that Videotoolbox will use for decoded frames. - * set by the caller. + * set by the caller. If this is set to 0, then no specific format is + * requested from the decoder, and its native format is output. */ OSType cv_pix_fmt_type; diff --git a/third-party/FFmpeg-iOS/include/libavcodec/vorbis_parser.h b/third-party/FFmpeg-iOS/include/libavcodec/vorbis_parser.h index 92050277ed..789932ac49 100644 --- a/third-party/FFmpeg-iOS/include/libavcodec/vorbis_parser.h +++ b/third-party/FFmpeg-iOS/include/libavcodec/vorbis_parser.h @@ -32,9 +32,6 @@ typedef struct AVVorbisParseContext AVVorbisParseContext; /** * Allocate and initialize the Vorbis parser using headers in the extradata. - * - * @param avctx codec context - * @param s Vorbis parser context */ AVVorbisParseContext *av_vorbis_parse_init(const uint8_t *extradata, int extradata_size); diff --git a/third-party/FFmpeg-iOS/include/libavcodec/xvmc.h b/third-party/FFmpeg-iOS/include/libavcodec/xvmc.h index 5c0f7e5432..57a968059d 100644 --- a/third-party/FFmpeg-iOS/include/libavcodec/xvmc.h +++ b/third-party/FFmpeg-iOS/include/libavcodec/xvmc.h @@ -29,7 +29,7 @@ #include -#include "../libavutilattributes.h" +#include "../libavutil/attributes.h" #include "version.h" #include "avcodec.h" diff --git a/third-party/FFmpeg-iOS/include/libavformat/avformat.h b/third-party/FFmpeg-iOS/include/libavformat/avformat.h index e21ba8858a..23c82b477c 100644 --- a/third-party/FFmpeg-iOS/include/libavformat/avformat.h +++ b/third-party/FFmpeg-iOS/include/libavformat/avformat.h @@ -28,8 +28,8 @@ */ /** - * @defgroup libavf I/O and Muxing/Demuxing Library - * @{ + * @defgroup libavf libavformat + * I/O and Muxing/Demuxing Library * * Libavformat (lavf) is a library for dealing with various media container * formats. Its main two purposes are demuxing - i.e. splitting a media file @@ -90,6 +90,8 @@ * both local and remote files, parts of them, concatenations of them, local * audio and video devices and so on. * + * @{ + * * @defgroup lavf_decoding Demuxing * @{ * Demuxers read a media file and split it into chunks of data (@em packets). A @@ -616,6 +618,8 @@ typedef struct AVOutputFormat { * AVStream parameters that need to be set before packets are sent. * This method must not write output. * + * Return 0 if streams were fully configured, 1 if not, negative AVERROR on failure + * * Any allocations made here must be freed in deinit(). */ int (*init)(struct AVFormatContext *); @@ -812,6 +816,9 @@ typedef struct AVIndexEntry { * is known */ #define AVINDEX_KEYFRAME 0x0001 +#define AVINDEX_DISCARD_FRAME 0x0002 /** + * Flag is used to indicate which frame should be discarded after decoding. + */ int flags:2; int size:30; //Yeah, trying to keep the size of this small to reduce memory requirements (it is 24 vs. 32 bytes due to possible 8-byte alignment). int min_distance; /**< Minimum distance between this and the previous keyframe, used to avoid unneeded searching. */ @@ -835,11 +842,17 @@ typedef struct AVIndexEntry { #define AV_DISPOSITION_CLEAN_EFFECTS 0x0200 /**< stream without voice */ /** * The stream is stored in the file as an attached picture/"cover art" (e.g. - * APIC frame in ID3v2). The single packet associated with it will be returned - * among the first few packets read from the file unless seeking takes place. - * It can also be accessed at any time in AVStream.attached_pic. + * APIC frame in ID3v2). The first (usually only) packet associated with it + * will be returned among the first few packets read from the file unless + * seeking takes place. It can also be accessed at any time in + * AVStream.attached_pic. */ #define AV_DISPOSITION_ATTACHED_PIC 0x0400 +/** + * The stream is sparse, and contains thumbnail images, often corresponding + * to chapter markers. Only ever used with AV_DISPOSITION_ATTACHED_PIC. + */ +#define AV_DISPOSITION_TIMED_THUMBNAILS 0x0800 typedef struct AVStreamInternal AVStreamInternal; @@ -917,6 +930,9 @@ typedef struct AVStream { * Decoding: duration of the stream, in stream time base. * If a source file does not specify a duration, but does specify * a bitrate, this value will be estimated from bitrate and file size. + * + * Encoding: May be set by the caller before avformat_write_header() to + * provide a hint to the muxer about the estimated duration. */ int64_t duration; @@ -989,12 +1005,14 @@ typedef struct AVStream { * All fields below this line are not part of the public API. They * may not be used outside of libavformat and can be changed and * removed at will. - * New public fields should be added right above. + * Internal note: be aware that physically removing these fields + * will break ABI. Replace removed fields with dummy fields, and + * add new fields to AVStreamInternal. ***************************************************************** */ /** - * Stream information used internally by av_find_stream_info() + * Stream information used internally by avformat_find_stream_info() */ #define MAX_STD_TIMEBASES (30*12+30+3+6) struct { @@ -1046,7 +1064,7 @@ typedef struct AVStream { int probe_packets; /** - * Number of frames that have been demuxed during av_find_stream_info() + * Number of frames that have been demuxed during avformat_find_stream_info() */ int codec_info_nb_frames; @@ -1185,6 +1203,12 @@ typedef struct AVStream { */ int inject_global_side_data; + /***************************************************************** + * All fields above this line are not part of the public API. + * Fields below are part of the public API and ABI again. + ***************************************************************** + */ + /** * String containing paris of key and values describing recommended encoder configuration. * Paris are separated by ','. @@ -1444,8 +1468,12 @@ typedef struct AVFormatContext { #define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload #define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down) #define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted) -#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Don't merge side data but keep it separate. +#if FF_API_LAVF_KEEPSIDE_FLAG +#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Don't merge side data but keep it separate. Deprecated, will be the default. +#endif #define AVFMT_FLAG_FAST_SEEK 0x80000 ///< Enable fast, but inaccurate seeks for some formats +#define AVFMT_FLAG_SHORTEST 0x100000 ///< Stop muxing when the shortest stream stops. +#define AVFMT_FLAG_AUTO_BSF 0x200000 ///< Wait for packet data before writing a header, and add bitstream filters as requested by the muxer /** * Maximum size of the data read from input for determining @@ -1631,7 +1659,7 @@ typedef struct AVFormatContext { /** * Audio preload in microseconds. * Note, not all formats support this and unpredictable things may happen if it is used when not supported. - * - encoding: Set by user via AVOptions (NO direct access) + * - encoding: Set by user * - decoding: unused */ int audio_preload; @@ -1639,7 +1667,7 @@ typedef struct AVFormatContext { /** * Max chunk time in microseconds. * Note, not all formats support this and unpredictable things may happen if it is used when not supported. - * - encoding: Set by user via AVOptions (NO direct access) + * - encoding: Set by user * - decoding: unused */ int max_chunk_duration; @@ -1647,7 +1675,7 @@ typedef struct AVFormatContext { /** * Max chunk size in bytes * Note, not all formats support this and unpredictable things may happen if it is used when not supported. - * - encoding: Set by user via AVOptions (NO direct access) + * - encoding: Set by user * - decoding: unused */ int max_chunk_size; @@ -1656,14 +1684,14 @@ typedef struct AVFormatContext { * forces the use of wallclock timestamps as pts/dts of packets * This has undefined results in the presence of B frames. * - encoding: unused - * - decoding: Set by user via AVOptions (NO direct access) + * - decoding: Set by user */ int use_wallclock_as_timestamps; /** * avio flags, used to force AVIO_FLAG_DIRECT. * - encoding: unused - * - decoding: Set by user via AVOptions (NO direct access) + * - decoding: Set by user */ int avio_flags; @@ -1671,34 +1699,34 @@ typedef struct AVFormatContext { * The duration field can be estimated through various ways, and this field can be used * to know how the duration was estimated. * - encoding: unused - * - decoding: Read by user via AVOptions (NO direct access) + * - decoding: Read by user */ enum AVDurationEstimationMethod duration_estimation_method; /** * Skip initial bytes when opening stream * - encoding: unused - * - decoding: Set by user via AVOptions (NO direct access) + * - decoding: Set by user */ int64_t skip_initial_bytes; /** * Correct single timestamp overflows * - encoding: unused - * - decoding: Set by user via AVOptions (NO direct access) + * - decoding: Set by user */ unsigned int correct_ts_overflow; /** * Force seeking to any (also non key) frames. * - encoding: unused - * - decoding: Set by user via AVOptions (NO direct access) + * - decoding: Set by user */ int seek2any; /** * Flush the I/O context after each packet. - * - encoding: Set by user via AVOptions (NO direct access) + * - encoding: Set by user * - decoding: unused */ int flush_packets; @@ -1708,14 +1736,14 @@ typedef struct AVFormatContext { * The maximal score is AVPROBE_SCORE_MAX, its set when the demuxer probes * the format. * - encoding: unused - * - decoding: set by avformat, read by user via av_format_get_probe_score() (NO direct access) + * - decoding: set by avformat, read by user */ int probe_score; /** * number of bytes to read maximally to identify format. * - encoding: unused - * - decoding: set by user through AVOPtions (NO direct access) + * - decoding: set by user */ int format_probesize; @@ -1723,7 +1751,7 @@ typedef struct AVFormatContext { * ',' separated list of allowed decoders. * If NULL then all are allowed * - encoding: unused - * - decoding: set by user through AVOptions (NO direct access) + * - decoding: set by user */ char *codec_whitelist; @@ -1731,7 +1759,7 @@ typedef struct AVFormatContext { * ',' separated list of allowed demuxers. * If NULL then all are allowed * - encoding: unused - * - decoding: set by user through AVOptions (NO direct access) + * - decoding: set by user */ char *format_whitelist; @@ -1753,7 +1781,7 @@ typedef struct AVFormatContext { * Forced video codec. * This allows forcing a specific decoder, even when there are multiple with * the same codec_id. - * Demuxing: Set by user via av_format_set_video_codec (NO direct access). + * Demuxing: Set by user */ AVCodec *video_codec; @@ -1761,7 +1789,7 @@ typedef struct AVFormatContext { * Forced audio codec. * This allows forcing a specific decoder, even when there are multiple with * the same codec_id. - * Demuxing: Set by user via av_format_set_audio_codec (NO direct access). + * Demuxing: Set by user */ AVCodec *audio_codec; @@ -1769,7 +1797,7 @@ typedef struct AVFormatContext { * Forced subtitle codec. * This allows forcing a specific decoder, even when there are multiple with * the same codec_id. - * Demuxing: Set by user via av_format_set_subtitle_codec (NO direct access). + * Demuxing: Set by user */ AVCodec *subtitle_codec; @@ -1777,7 +1805,7 @@ typedef struct AVFormatContext { * Forced data codec. * This allows forcing a specific decoder, even when there are multiple with * the same codec_id. - * Demuxing: Set by user via av_format_set_data_codec (NO direct access). + * Demuxing: Set by user */ AVCodec *data_codec; @@ -1801,15 +1829,13 @@ typedef struct AVFormatContext { /** * Output timestamp offset, in microseconds. - * Muxing: set by user via AVOptions (NO direct access) + * Muxing: set by user */ int64_t output_ts_offset; /** * dump format separator. * can be ", " or "\n " or anything else - * Code outside libavformat should access this field using AVOptions - * (NO direct access). * - muxing: Set by user. * - demuxing: Set by user. */ @@ -1846,7 +1872,7 @@ typedef struct AVFormatContext { /** * ',' separated list of allowed protocols. * - encoding: unused - * - decoding: set by user through AVOptions (NO direct access) + * - decoding: set by user */ char *protocol_whitelist; @@ -1881,11 +1907,22 @@ typedef struct AVFormatContext { /** * ',' separated list of disallowed protocols. * - encoding: unused - * - decoding: set by user through AVOptions (NO direct access) + * - decoding: set by user */ char *protocol_blacklist; + + /** + * The maximum number of streams. + * - encoding: unused + * - decoding: set by user + */ + int max_streams; } AVFormatContext; +/** + * Accessors for some AVFormatContext fields. These used to be provided for ABI + * compatibility, and do not need to be used anymore. + */ int av_format_get_probe_score(const AVFormatContext *s); AVCodec * av_format_get_video_codec(const AVFormatContext *s); void av_format_set_video_codec(AVFormatContext *s, AVCodec *c); @@ -2033,6 +2070,21 @@ const AVClass *avformat_get_class(void); */ AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c); +/** + * Wrap an existing array as stream side data. + * + * @param st stream + * @param type side information type + * @param data the side data array. It must be allocated with the av_malloc() + * family of functions. The ownership of the data is transferred to + * st. + * @param size side information size + * @return zero on success, a negative AVERROR code on failure. On failure, + * the stream is unchanged and the data remains owned by the caller. + */ +int av_stream_add_side_data(AVStream *st, enum AVPacketSideDataType type, + uint8_t *data, size_t size); + /** * Allocate new information from stream. * @@ -2051,8 +2103,13 @@ uint8_t *av_stream_new_side_data(AVStream *stream, * @param size pointer for side information size to store (optional) * @return pointer to data if present or NULL otherwise */ +#if FF_API_NOCONST_GET_SIDE_DATA uint8_t *av_stream_get_side_data(AVStream *stream, enum AVPacketSideDataType type, int *size); +#else +uint8_t *av_stream_get_side_data(const AVStream *stream, + enum AVPacketSideDataType type, int *size); +#endif AVProgram *av_new_program(AVFormatContext *s, int id); @@ -2362,6 +2419,10 @@ void avformat_close_input(AVFormatContext **s); * @addtogroup lavf_encoding * @{ */ + +#define AVSTREAM_INIT_IN_WRITE_HEADER 0 ///< stream parameters initialized in avformat_write_header +#define AVSTREAM_INIT_IN_INIT_OUTPUT 1 ///< stream parameters initialized in avformat_init_output + /** * Allocate the stream private data and write the stream header to * an output media file. @@ -2373,13 +2434,37 @@ void avformat_close_input(AVFormatContext **s); * On return this parameter will be destroyed and replaced with a dict containing * options that were not found. May be NULL. * - * @return 0 on success, negative AVERROR on failure. + * @return AVSTREAM_INIT_IN_WRITE_HEADER on success if the codec had not already been fully initialized in avformat_init, + * AVSTREAM_INIT_IN_INIT_OUTPUT on success if the codec had already been fully initialized in avformat_init, + * negative AVERROR on failure. * - * @see av_opt_find, av_dict_set, avio_open, av_oformat_next. + * @see av_opt_find, av_dict_set, avio_open, av_oformat_next, avformat_init_output. */ av_warn_unused_result int avformat_write_header(AVFormatContext *s, AVDictionary **options); +/** + * Allocate the stream private data and initialize the codec, but do not write the header. + * May optionally be used before avformat_write_header to initialize stream parameters + * before actually writing the header. + * If using this function, do not pass the same options to avformat_write_header. + * + * @param s Media file handle, must be allocated with avformat_alloc_context(). + * Its oformat field must be set to the desired output format; + * Its pb field must be set to an already opened AVIOContext. + * @param options An AVDictionary filled with AVFormatContext and muxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return AVSTREAM_INIT_IN_WRITE_HEADER on success if the codec requires avformat_write_header to fully initialize, + * AVSTREAM_INIT_IN_INIT_OUTPUT on success if the codec has been fully initialized, + * negative AVERROR on failure. + * + * @see av_opt_find, av_dict_set, avio_open, av_oformat_next, avformat_write_header. + */ +av_warn_unused_result +int avformat_init_output(AVFormatContext *s, AVDictionary **options); + /** * Write a packet to an output media file. * @@ -2718,6 +2803,9 @@ void av_dump_format(AVFormatContext *ic, const char *url, int is_output); + +#define AV_FRAME_FILENAME_FLAGS_MULTIPLE 1 ///< Allow multiple %d + /** * Return in 'buf' the path with '%d' replaced by a number. * @@ -2728,8 +2816,12 @@ void av_dump_format(AVFormatContext *ic, * @param buf_size destination buffer size * @param path numbered sequence string * @param number frame number + * @param flags AV_FRAME_FILENAME_FLAGS_* * @return 0 if OK, -1 on format error */ +int av_get_frame_filename2(char *buf, int buf_size, + const char *path, int number, int flags); + int av_get_frame_filename(char *buf, int buf_size, const char *path, int number); @@ -2862,6 +2954,7 @@ int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, int avformat_queue_attached_pictures(AVFormatContext *s); +#if FF_API_OLD_BSF /** * Apply a list of bitstream filters to a packet. * @@ -2873,12 +2966,41 @@ int avformat_queue_attached_pictures(AVFormatContext *s); * @return >=0 on success; * AVERROR code on failure */ -#if FF_API_OLD_BSF attribute_deprecated int av_apply_bitstream_filters(AVCodecContext *codec, AVPacket *pkt, AVBitStreamFilterContext *bsfc); #endif +enum AVTimebaseSource { + AVFMT_TBCF_AUTO = -1, + AVFMT_TBCF_DECODER, + AVFMT_TBCF_DEMUXER, +#if FF_API_R_FRAME_RATE + AVFMT_TBCF_R_FRAMERATE, +#endif +}; + +/** + * Transfer internal timing information from one stream to another. + * + * This function is useful when doing stream copy. + * + * @param ofmt target output format for ost + * @param ost output stream which needs timings copy and adjustments + * @param ist reference input stream to copy timings from + * @param copy_tb define from where the stream codec timebase needs to be imported + */ +int avformat_transfer_internal_stream_timing_info(const AVOutputFormat *ofmt, + AVStream *ost, const AVStream *ist, + enum AVTimebaseSource copy_tb); + +/** + * Get the internal codec timebase from a stream. + * + * @param st input stream to extract the timebase from + */ +AVRational av_stream_get_codec_timebase(const AVStream *st); + /** * @} */ diff --git a/third-party/FFmpeg-iOS/include/libavformat/avio.h b/third-party/FFmpeg-iOS/include/libavformat/avio.h index 889e0a9abc..b4eeffcb50 100644 --- a/third-party/FFmpeg-iOS/include/libavformat/avio.h +++ b/third-party/FFmpeg-iOS/include/libavformat/avio.h @@ -34,7 +34,15 @@ #include "../libavformat/version.h" -#define AVIO_SEEKABLE_NORMAL 0x0001 /**< Seeking works like for a local file */ +/** + * Seeking works like for a local file. + */ +#define AVIO_SEEKABLE_NORMAL (1 << 0) + +/** + * Seeking by timestamp with avio_seek_time() is possible. + */ +#define AVIO_SEEKABLE_TIME (1 << 1) /** * Callback for checking whether to abort blocking functions. @@ -129,7 +137,13 @@ enum AVIODataMarkerType { * Trailer data, which doesn't contain actual content, but only for * finalizing the output file. */ - AVIO_DATA_MARKER_TRAILER + AVIO_DATA_MARKER_TRAILER, + /** + * A point in the output bytestream where the underlying AVIOContext might + * flush the buffer depending on latency or buffering requirements. Typically + * means the end of a packet. + */ + AVIO_DATA_MARKER_FLUSH_POINT, }; /** @@ -160,8 +174,9 @@ typedef struct AVIOContext { const AVClass *av_class; /* - * The following shows the relationship between buffer, buf_ptr, buf_end, buf_size, - * and pos, when reading and when writing (since AVIOContext is used for both): + * The following shows the relationship between buffer, buf_ptr, + * buf_ptr_max, buf_end, buf_size, and pos, when reading and when writing + * (since AVIOContext is used for both): * ********************************************************************************** * READING @@ -188,21 +203,24 @@ typedef struct AVIOContext { * WRITING ********************************************************************************** * - * | buffer_size | - * |-------------------------------| - * | | + * | buffer_size | + * |--------------------------------------| + * | | * - * buffer buf_ptr buf_end - * +-------------------+-----------+ - * |/ / / / / / / / / /| | - * write buffer: | / to be flushed / | | - * |/ / / / / / / / / /| | - * +-------------------+-----------+ + * buf_ptr_max + * buffer (buf_ptr) buf_end + * +-----------------------+--------------+ + * |/ / / / / / / / / / / /| | + * write buffer: | / / to be flushed / / | | + * |/ / / / / / / / / / / /| | + * +-----------------------+--------------+ + * buf_ptr can be in this + * due to a backward seek * - * pos - * +--------------------------+-----------------------------------+ - * output file: | | | - * +--------------------------+-----------------------------------+ + * pos + * +-------------+----------------------------------------------+ + * output file: | | | + * +-------------+----------------------------------------------+ * */ unsigned char *buffer; /**< Start of the buffer. */ @@ -218,7 +236,7 @@ typedef struct AVIOContext { int (*write_packet)(void *opaque, uint8_t *buf, int buf_size); int64_t (*seek)(void *opaque, int64_t offset, int whence); int64_t pos; /**< position in the file of the current buffer */ - int must_flush; /**< true if the next seek should flush */ + int must_flush; /**< unused */ int eof_reached; /**< true if eof reached */ int write_flag; /**< true if open for writing */ int max_packet_size; @@ -313,6 +331,25 @@ typedef struct AVIOContext { */ enum AVIODataMarkerType current_type; int64_t last_time; + + /** + * A callback that is used instead of short_seek_threshold. + * This is current internal only, do not use from outside. + */ + int (*short_seek_get)(void *opaque); + + int64_t written; + + /** + * Maximum reached position before a backward seek in the write buffer, + * used keeping track of already written data for a later flush. + */ + unsigned char *buf_ptr_max; + + /** + * Try to buffer at least this amount of data before flushing it + */ + int min_packet_size; } AVIOContext; /** @@ -402,7 +439,7 @@ void avio_free_directory_entry(AVIODirEntry **entry); /** * Allocate and initialize an AVIOContext for buffered I/O. It must be later - * freed with av_free(). + * freed with avio_context_free(). * * @param buffer Memory block for input/output operations via AVIOContext. * The buffer must be allocated with av_malloc() and friends. @@ -430,6 +467,14 @@ AVIOContext *avio_alloc_context( int (*write_packet)(void *opaque, uint8_t *buf, int buf_size), int64_t (*seek)(void *opaque, int64_t offset, int whence)); +/** + * Free the supplied IO context and everything associated with it. + * + * @param s Double pointer to the IO context. This function will write NULL + * into s. + */ +void avio_context_free(AVIOContext **s); + void avio_w8(AVIOContext *s, int b); void avio_write(AVIOContext *s, const unsigned char *buf, int size); void avio_wl64(AVIOContext *s, uint64_t val); @@ -553,6 +598,15 @@ void avio_flush(AVIOContext *s); */ int avio_read(AVIOContext *s, unsigned char *buf, int size); +/** + * Read size bytes from AVIOContext into buf. Unlike avio_read(), this is allowed + * to read fewer bytes than requested. The missing bytes can be read in the next + * call. This always tries to read at least 1 byte. + * Useful to reduce latency in certain cases. + * @return number of bytes read or AVERROR + */ +int avio_read_partial(AVIOContext *s, unsigned char *buf, int size); + /** * @name Functions for reading from AVIOContext * @{ @@ -703,6 +757,18 @@ int avio_closep(AVIOContext **s); */ int avio_open_dyn_buf(AVIOContext **s); +/** + * Return the written size and a pointer to the buffer. + * The AVIOContext stream is left intact. + * The buffer must NOT be freed. + * No padding is added to the buffer. + * + * @param s IO context + * @param pbuffer pointer to a byte buffer + * @return the length of the byte buffer + */ +int avio_get_dyn_buf(AVIOContext *s, uint8_t **pbuffer); + /** * Return the written size and a pointer to the buffer. The buffer * must be freed with av_free(). diff --git a/third-party/FFmpeg-iOS/include/libavformat/version.h b/third-party/FFmpeg-iOS/include/libavformat/version.h index 645ce04720..c2f33f84ae 100644 --- a/third-party/FFmpeg-iOS/include/libavformat/version.h +++ b/third-party/FFmpeg-iOS/include/libavformat/version.h @@ -30,9 +30,9 @@ #include "../libavutil/version.h" // Major bumping may affect Ticket5467, 5421, 5451(compatibility with Chromium) -// Also please add any ticket numbers that you belive might be affected here +// Also please add any ticket numbers that you believe might be affected here #define LIBAVFORMAT_VERSION_MAJOR 57 -#define LIBAVFORMAT_VERSION_MINOR 41 +#define LIBAVFORMAT_VERSION_MINOR 83 #define LIBAVFORMAT_VERSION_MICRO 100 #define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ @@ -79,6 +79,25 @@ #ifndef FF_API_LAVF_AVCTX #define FF_API_LAVF_AVCTX (LIBAVFORMAT_VERSION_MAJOR < 58) #endif +#ifndef FF_API_NOCONST_GET_SIDE_DATA +#define FF_API_NOCONST_GET_SIDE_DATA (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_HTTP_USER_AGENT +#define FF_API_HTTP_USER_AGENT (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_HLS_WRAP +#define FF_API_HLS_WRAP (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_LAVF_MERGE_SD +#define FF_API_LAVF_MERGE_SD (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_LAVF_KEEPSIDE_FLAG +#define FF_API_LAVF_KEEPSIDE_FLAG (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_OLD_ROTATE_API +#define FF_API_OLD_ROTATE_API (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif + #ifndef FF_API_R_FRAME_RATE #define FF_API_R_FRAME_RATE 1 diff --git a/third-party/FFmpeg-iOS/include/libavutil/adler32.h b/third-party/FFmpeg-iOS/include/libavutil/adler32.h index 0dc69ec0a8..a1f035b734 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/adler32.h +++ b/third-party/FFmpeg-iOS/include/libavutil/adler32.h @@ -18,6 +18,12 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * @ingroup lavu_adler32 + * Public header for Adler-32 hash function implementation. + */ + #ifndef AVUTIL_ADLER32_H #define AVUTIL_ADLER32_H @@ -25,11 +31,10 @@ #include "attributes.h" /** - * @file - * Public header for libavutil Adler32 hasher + * @defgroup lavu_adler32 Adler-32 + * @ingroup lavu_hash + * Adler-32 hash function implementation. * - * @defgroup lavu_adler32 Adler32 - * @ingroup lavu_crypto * @{ */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/attributes.h b/third-party/FFmpeg-iOS/include/libavutil/attributes.h index 5c6b9deecb..54d1901116 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/attributes.h +++ b/third-party/FFmpeg-iOS/include/libavutil/attributes.h @@ -121,8 +121,7 @@ #endif #endif - -#if defined(__GNUC__) +#if defined(__GNUC__) || defined(__clang__) # define av_unused __attribute__((unused)) #else # define av_unused @@ -133,7 +132,7 @@ * away. This is useful for variables accessed only from inline * assembler without the compiler being aware. */ -#if AV_GCC_VERSION_AT_LEAST(3,1) +#if AV_GCC_VERSION_AT_LEAST(3,1) || defined(__clang__) # define av_used __attribute__((used)) #else # define av_used diff --git a/third-party/FFmpeg-iOS/include/libavutil/avassert.h b/third-party/FFmpeg-iOS/include/libavutil/avassert.h index f473637649..46f3fea580 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/avassert.h +++ b/third-party/FFmpeg-iOS/include/libavutil/avassert.h @@ -59,8 +59,17 @@ */ #if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1 #define av_assert2(cond) av_assert0(cond) +#define av_assert2_fpu() av_assert0_fpu() #else #define av_assert2(cond) ((void)0) +#define av_assert2_fpu() ((void)0) #endif +/** + * Assert that floating point opperations can be executed. + * + * This will av_assert0() that the cpu is not in MMX state on X86 + */ +void av_assert0_fpu(void); + #endif /* AVUTIL_AVASSERT_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/avconfig.h b/third-party/FFmpeg-iOS/include/libavutil/avconfig.h index 36a8cd14da..f10aa6186b 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/avconfig.h +++ b/third-party/FFmpeg-iOS/include/libavutil/avconfig.h @@ -3,5 +3,4 @@ #define AVUTIL_AVCONFIG_H #define AV_HAVE_BIGENDIAN 0 #define AV_HAVE_FAST_UNALIGNED 1 -#define AV_HAVE_INCOMPATIBLE_LIBAV_ABI 0 #endif /* AVUTIL_AVCONFIG_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/avstring.h b/third-party/FFmpeg-iOS/include/libavutil/avstring.h index dd2876990f..04d2695640 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/avstring.h +++ b/third-party/FFmpeg-iOS/include/libavutil/avstring.h @@ -266,6 +266,11 @@ int av_strcasecmp(const char *a, const char *b); */ int av_strncasecmp(const char *a, const char *b, size_t n); +/** + * Locale-independent strings replace. + * @note This means only ASCII-range characters are replace + */ +char *av_strireplace(const char *str, const char *from, const char *to); /** * Thread safe basename. diff --git a/third-party/FFmpeg-iOS/include/libavutil/avutil.h b/third-party/FFmpeg-iOS/include/libavutil/avutil.h index c1f76c16ea..4d633156d1 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/avutil.h +++ b/third-party/FFmpeg-iOS/include/libavutil/avutil.h @@ -23,7 +23,8 @@ /** * @file - * external API header + * @ingroup lavu + * Convenience header that includes @ref lavu "libavutil"'s core. */ /** @@ -78,14 +79,15 @@ */ /** - * @defgroup lavu Common utility functions + * @defgroup lavu libavutil + * Common code shared across all FFmpeg libraries. * - * @brief - * libavutil contains the code shared across all the other FFmpeg - * libraries - * - * @note In order to use the functions provided by avutil you must include - * the specific header. + * @note + * libavutil is designed to be modular. In most cases, in order to use the + * functions provided by one component of libavutil you must explicitly include + * the specific header containing that feature. If you are only using + * media-related components, you could simply include libavutil/avutil.h, which + * brings in most of the "core" components. * * @{ * @@ -94,7 +96,7 @@ * @{ * @} * - * @defgroup lavu_math Maths + * @defgroup lavu_math Mathematics * @{ * * @} @@ -116,6 +118,12 @@ * * @} * + * @defgroup lavu_video Video related + * + * @{ + * + * @} + * * @defgroup lavu_audio Audio related * * @{ @@ -335,6 +343,20 @@ FILE *av_fopen_utf8(const char *path, const char *mode); */ AVRational av_get_time_base_q(void); +#define AV_FOURCC_MAX_STRING_SIZE 32 + +#define av_fourcc2str(fourcc) av_fourcc_make_string((char[AV_FOURCC_MAX_STRING_SIZE]){0}, fourcc) + +/** + * Fill the provided buffer with a string containing a FourCC (four-character + * code) representation. + * + * @param buf a buffer with size in bytes of at least AV_FOURCC_MAX_STRING_SIZE + * @param fourcc the fourcc to represent + * @return the buffer in input + */ +char *av_fourcc_make_string(char *buf, uint32_t fourcc); + /** * @} * @} diff --git a/third-party/FFmpeg-iOS/include/libavutil/bswap.h b/third-party/FFmpeg-iOS/include/libavutil/bswap.h index a21a03483f..af2dd7fa1a 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/bswap.h +++ b/third-party/FFmpeg-iOS/include/libavutil/bswap.h @@ -27,7 +27,7 @@ #define AVUTIL_BSWAP_H #include -#include "../libavutilavconfig.h" +#include "../libavutil/avconfig.h" #include "attributes.h" #ifdef HAVE_AV_CONFIG_H diff --git a/third-party/FFmpeg-iOS/include/libavutil/buffer.h b/third-party/FFmpeg-iOS/include/libavutil/buffer.h index 0c0ce12cf2..73b6bd0b14 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/buffer.h +++ b/third-party/FFmpeg-iOS/include/libavutil/buffer.h @@ -256,9 +256,10 @@ AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size)); * @param alloc a function that will be used to allocate new buffers when the * pool is empty. * @param pool_free a function that will be called immediately before the pool - * is freed. I.e. after av_buffer_pool_can_uninit() is called - * by the pool and all the frames are returned to the pool and - * freed. It is intended to uninitialize the user opaque data. + * is freed. I.e. after av_buffer_pool_uninit() is called + * by the caller and all the frames are returned to the pool + * and freed. It is intended to uninitialize the user opaque + * data. * @return newly created buffer pool on success, NULL on error. */ AVBufferPool *av_buffer_pool_init2(int size, void *opaque, diff --git a/third-party/FFmpeg-iOS/include/libavutil/channel_layout.h b/third-party/FFmpeg-iOS/include/libavutil/channel_layout.h index ec7effead1..50bb8f03c5 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/channel_layout.h +++ b/third-party/FFmpeg-iOS/include/libavutil/channel_layout.h @@ -131,21 +131,30 @@ enum AVMatrixEncoding { * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix); * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC, * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR); - * - a number of channels, in decimal, optionally followed by 'c', yielding + * - a number of channels, in decimal, followed by 'c', yielding * the default channel layout for that number of channels (@see * av_get_default_channel_layout); * - a channel layout mask, in hexadecimal starting with "0x" (see the * AV_CH_* macros). * - * @warning Starting from the next major bump the trailing character - * 'c' to specify a number of channels will be required, while a - * channel layout mask could also be specified as a decimal number - * (if and only if not followed by "c"). - * * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7" */ uint64_t av_get_channel_layout(const char *name); +/** + * Return a channel layout and the number of channels based on the specified name. + * + * This function is similar to (@see av_get_channel_layout), but can also parse + * unknown channel layout specifications. + * + * @param[in] name channel layout specification string + * @param[out] channel_layout parsed channel layout (0 if unknown) + * @param[out] nb_channels number of channels + * + * @return 0 on success, AVERROR(EINVAL) if the parsing fails. + */ +int av_get_extended_channel_layout(const char *name, uint64_t* channel_layout, int* nb_channels); + /** * Return a description of a channel layout. * If nb_channels is <= 0, it is guessed from the channel_layout. diff --git a/third-party/FFmpeg-iOS/include/libavutil/cpu.h b/third-party/FFmpeg-iOS/include/libavutil/cpu.h index 4bff16714a..9e5d40affe 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/cpu.h +++ b/third-party/FFmpeg-iOS/include/libavutil/cpu.h @@ -21,6 +21,8 @@ #ifndef AVUTIL_CPU_H #define AVUTIL_CPU_H +#include + #include "attributes.h" #define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */ @@ -39,6 +41,7 @@ #define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster ///< than regular MMX/SSE (e.g. Core1) #define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions +#define AV_CPU_FLAG_SSSE3SLOW 0x4000000 ///< SSSE3 supported, but usually not faster #define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower #define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions #define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions @@ -85,8 +88,6 @@ void av_force_cpu_flags(int flags); * Set a mask on flags returned by av_get_cpu_flags(). * This function is mainly useful for testing. * Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible - * - * @warning this function is not thread safe. */ attribute_deprecated void av_set_cpu_flags_mask(int mask); @@ -114,4 +115,15 @@ int av_parse_cpu_caps(unsigned *flags, const char *s); */ int av_cpu_count(void); +/** + * Get the maximum data alignment that may be required by FFmpeg. + * + * Note that this is affected by the build configuration and the CPU flags mask, + * so e.g. if the CPU supports AVX, but libavutil has been built with + * --disable-avx or the AV_CPU_FLAG_AVX flag has been disabled through + * av_set_cpu_flags_mask(), then this function will behave as if AVX is not + * present. + */ +size_t av_cpu_max_align(void); + #endif /* AVUTIL_CPU_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/crc.h b/third-party/FFmpeg-iOS/include/libavutil/crc.h index ef8a7137e4..2a1b0d7624 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/crc.h +++ b/third-party/FFmpeg-iOS/include/libavutil/crc.h @@ -18,6 +18,12 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * @ingroup lavu_crc32 + * Public header for CRC hash function implementation. + */ + #ifndef AVUTIL_CRC_H #define AVUTIL_CRC_H @@ -27,8 +33,14 @@ #include "version.h" /** - * @defgroup lavu_crc32 CRC32 - * @ingroup lavu_crypto + * @defgroup lavu_crc32 CRC + * @ingroup lavu_hash + * CRC (Cyclic Redundancy Check) hash function implementation. + * + * This module supports numerous CRC polynomials, in addition to the most + * widely used CRC-32-IEEE. See @ref AVCRCId for a list of available + * polynomials. + * * @{ */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/display.h b/third-party/FFmpeg-iOS/include/libavutil/display.h index 39c15ee6b8..515adad795 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/display.h +++ b/third-party/FFmpeg-iOS/include/libavutil/display.h @@ -18,6 +18,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * Display matrix + */ + #ifndef AVUTIL_DISPLAY_H #define AVUTIL_DISPLAY_H @@ -25,15 +30,26 @@ #include "common.h" /** + * @addtogroup lavu_video + * @{ + * + * @defgroup lavu_video_display Display transformation matrix functions + * @{ + */ + +/** + * @addtogroup lavu_video_display * The display transformation matrix specifies an affine transformation that * should be applied to video frames for correct presentation. It is compatible * with the matrices stored in the ISO/IEC 14496-12 container format. * * The data is a 3x3 matrix represented as a 9-element array: * + * @code{.unparsed} * | a b u | * (a, b, u, c, d, v, x, y, w) -> | c d v | * | x y w | + * @endcode * * All numbers are stored in native endianness, as 16.16 fixed-point values, * except for u, v and w, which are stored as 2.30 fixed-point values. @@ -41,15 +57,21 @@ * The transformation maps a point (p, q) in the source (pre-transformation) * frame to the point (p', q') in the destination (post-transformation) frame as * follows: + * + * @code{.unparsed} * | a b u | * (p, q, 1) . | c d v | = z * (p', q', 1) * | x y w | + * @endcode * * The transformation can also be more explicitly written in components as * follows: + * + * @code{.unparsed} * p' = (a * p + c * q + x) / z; * q' = (b * p + d * q + y) / z; * z = u * p + v * q + w + * @endcode */ /** @@ -84,4 +106,9 @@ void av_display_rotation_set(int32_t matrix[9], double angle); */ void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip); +/** + * @} + * @} + */ + #endif /* AVUTIL_DISPLAY_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/ffversion.h b/third-party/FFmpeg-iOS/include/libavutil/ffversion.h index 1c6356a94f..78d9a7ed41 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/ffversion.h +++ b/third-party/FFmpeg-iOS/include/libavutil/ffversion.h @@ -1,5 +1,5 @@ /* Automatically generated by version.sh, do not manually edit! */ #ifndef AVUTIL_FFVERSION_H #define AVUTIL_FFVERSION_H -#define FFMPEG_VERSION "3.1.1" +#define FFMPEG_VERSION "3.4" #endif /* AVUTIL_FFVERSION_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/frame.h b/third-party/FFmpeg-iOS/include/libavutil/frame.h index 2b5c3320c3..abe4f4fd17 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/frame.h +++ b/third-party/FFmpeg-iOS/include/libavutil/frame.h @@ -25,6 +25,7 @@ #ifndef AVUTIL_FRAME_H #define AVUTIL_FRAME_H +#include #include #include "avutil.h" @@ -120,7 +121,26 @@ enum AVFrameSideDataType { * The GOP timecode in 25 bit timecode format. Data format is 64-bit integer. * This is set on the first frame of a GOP that has a temporal reference of 0. */ - AV_FRAME_DATA_GOP_TIMECODE + AV_FRAME_DATA_GOP_TIMECODE, + + /** + * The data represents the AVSphericalMapping structure defined in + * libavutil/spherical.h. + */ + AV_FRAME_DATA_SPHERICAL, + + /** + * Content light level (based on CTA-861.3). This payload contains data in + * the form of the AVContentLightMetadata struct. + */ + AV_FRAME_DATA_CONTENT_LIGHT_LEVEL, + + /** + * The data contains an ICC profile as an opaque octet buffer following the + * format described by ISO 15076-1 with an optional name defined in the + * metadata key entry "name". + */ + AV_FRAME_DATA_ICC_PROFILE, }; enum AVActiveFormatDescription { @@ -173,12 +193,9 @@ typedef struct AVFrameSideData { * * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added * to the end with a minor bump. - * Similarly fields that are marked as to be only accessed by - * av_opt_ptr() can be reordered. This allows 2 forks to add fields - * without breaking compatibility with each other. * * Fields can be accessed through AVOptions, the name string used, matches the - * C structure field name for fields accessable through AVOptions. The AVClass + * C structure field name for fields accessible through AVOptions. The AVClass * for AVFrame can be obtained from avcodec_get_frame_class() */ typedef struct AVFrame { @@ -231,9 +248,18 @@ typedef struct AVFrame { uint8_t **extended_data; /** - * width and height of the video frame + * @name Video dimensions + * Video frames only. The coded dimensions (in pixels) of the video frame, + * i.e. the size of the rectangle that contains some well-defined values. + * + * @note The part of the frame intended for display/presentation is further + * restricted by the @ref cropping "Cropping rectangle". + * @{ */ int width, height; + /** + * @} + */ /** * number of audio samples (per channel) described by this frame @@ -267,10 +293,14 @@ typedef struct AVFrame { */ int64_t pts; +#if FF_API_PKT_PTS /** * PTS copied from the AVPacket that was decoded to produce this frame. + * @deprecated use the pts field instead */ + attribute_deprecated int64_t pkt_pts; +#endif /** * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used) @@ -385,6 +415,7 @@ typedef struct AVFrame { /** * @defgroup lavu_frame_flags AV_FRAME_FLAGS + * @ingroup lavu_frame * Flags describing additional frame properties. * * @{ @@ -394,6 +425,10 @@ typedef struct AVFrame { * The frame data may be corrupted, e.g. due to decoding errors. */ #define AV_FRAME_FLAG_CORRUPT (1 << 0) +/** + * A flag to mark the frames which need to be decoded, but shouldn't be output. + */ +#define AV_FRAME_FLAG_DISCARD (1 << 2) /** * @} */ @@ -405,8 +440,6 @@ typedef struct AVFrame { /** * MPEG vs JPEG YUV range. - * It must be accessed using av_frame_get_color_range() and - * av_frame_set_color_range(). * - encoding: Set by user * - decoding: Set by libavcodec */ @@ -418,8 +451,6 @@ typedef struct AVFrame { /** * YUV colorspace type. - * It must be accessed using av_frame_get_colorspace() and - * av_frame_set_colorspace(). * - encoding: Set by user * - decoding: Set by libavcodec */ @@ -429,8 +460,6 @@ typedef struct AVFrame { /** * frame timestamp estimated using various heuristics, in stream time base - * Code outside libavutil should access this field using: - * av_frame_get_best_effort_timestamp(frame) * - encoding: unused * - decoding: set by libavcodec, read by user. */ @@ -438,8 +467,6 @@ typedef struct AVFrame { /** * reordered pos from the last AVPacket that has been input into the decoder - * Code outside libavutil should access this field using: - * av_frame_get_pkt_pos(frame) * - encoding: unused * - decoding: Read by user. */ @@ -448,8 +475,6 @@ typedef struct AVFrame { /** * duration of the corresponding packet, expressed in * AVStream->time_base units, 0 if unknown. - * Code outside libavutil should access this field using: - * av_frame_get_pkt_duration(frame) * - encoding: unused * - decoding: Read by user. */ @@ -457,8 +482,6 @@ typedef struct AVFrame { /** * metadata. - * Code outside libavutil should access this field using: - * av_frame_get_metadata(frame) * - encoding: Set by user. * - decoding: Set by libavcodec. */ @@ -468,8 +491,6 @@ typedef struct AVFrame { * decode error flags of the frame, set to a combination of * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there * were errors during the decoding. - * Code outside libavutil should access this field using: - * av_frame_get_decode_error_flags(frame) * - encoding: unused * - decoding: set by libavcodec, read by user. */ @@ -479,8 +500,6 @@ typedef struct AVFrame { /** * number of audio channels, only used for audio. - * Code outside libavutil should access this field using: - * av_frame_get_channels(frame) * - encoding: unused * - decoding: Read by user. */ @@ -488,8 +507,7 @@ typedef struct AVFrame { /** * size of the corresponding packet containing the compressed - * frame. It must be accessed using av_frame_get_pkt_size() and - * av_frame_set_pkt_size(). + * frame. * It is set to a negative value if unknown. * - encoding: unused * - decoding: set by libavcodec, read by user. @@ -499,13 +517,11 @@ typedef struct AVFrame { #if FF_API_FRAME_QP /** * QP table - * Not to be accessed directly from outside libavutil */ attribute_deprecated int8_t *qscale_table; /** * QP store stride - * Not to be accessed directly from outside libavutil */ attribute_deprecated int qstride; @@ -513,9 +529,6 @@ typedef struct AVFrame { attribute_deprecated int qscale_type; - /** - * Not to be accessed directly from outside libavutil - */ AVBufferRef *qp_table_buf; #endif /** @@ -523,12 +536,38 @@ typedef struct AVFrame { * AVHWFramesContext describing the frame. */ AVBufferRef *hw_frames_ctx; + + /** + * AVBufferRef for free use by the API user. FFmpeg will never check the + * contents of the buffer ref. FFmpeg calls av_buffer_unref() on it when + * the frame is unreferenced. av_frame_copy_props() calls create a new + * reference with av_buffer_ref() for the target frame's opaque_ref field. + * + * This is unrelated to the opaque field, although it serves a similar + * purpose. + */ + AVBufferRef *opaque_ref; + + /** + * @anchor cropping + * @name Cropping + * Video frames only. The number of pixels to discard from the the + * top/bottom/left/right border of the frame to obtain the sub-rectangle of + * the frame intended for presentation. + * @{ + */ + size_t crop_top; + size_t crop_bottom; + size_t crop_left; + size_t crop_right; + /** + * @} + */ } AVFrame; /** - * Accessors for some AVFrame fields. - * The position of these field in the structure is not part of the ABI, - * they should not be accessed directly outside libavutil. + * Accessors for some AVFrame fields. These used to be provided for ABI + * compatibility, and do not need to be used anymore. */ int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame); void av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val); @@ -642,7 +681,9 @@ void av_frame_move_ref(AVFrame *dst, AVFrame *src); * cases. * * @param frame frame in which to store the new buffers. - * @param align required buffer size alignment + * @param align Required buffer size alignment. If equal to 0, alignment will be + * chosen automatically for the current CPU. It is highly + * recommended to pass 0 here unless you know what you are doing. * * @return 0 on success, a negative AVERROR on error. */ @@ -734,6 +775,40 @@ AVFrameSideData *av_frame_get_side_data(const AVFrame *frame, */ void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type); + +/** + * Flags for frame cropping. + */ +enum { + /** + * Apply the maximum possible cropping, even if it requires setting the + * AVFrame.data[] entries to unaligned pointers. Passing unaligned data + * to FFmpeg API is generally not allowed, and causes undefined behavior + * (such as crashes). You can pass unaligned data only to FFmpeg APIs that + * are explicitly documented to accept it. Use this flag only if you + * absolutely know what you are doing. + */ + AV_FRAME_CROP_UNALIGNED = 1 << 0, +}; + +/** + * Crop the given video AVFrame according to its crop_left/crop_top/crop_right/ + * crop_bottom fields. If cropping is successful, the function will adjust the + * data pointers and the width/height fields, and set the crop fields to 0. + * + * In all cases, the cropping boundaries will be rounded to the inherent + * alignment of the pixel format. In some cases, such as for opaque hwaccel + * formats, the left/top cropping is ignored. The crop fields are set to 0 even + * if the cropping was rounded or ignored. + * + * @param frame the frame which should be cropped + * @param flags Some combination of AV_FRAME_CROP_* flags, or 0. + * + * @return >= 0 on success, a negative AVERROR on error. If the cropping fields + * were invalid, AVERROR(ERANGE) is returned, and nothing is changed. + */ +int av_frame_apply_cropping(AVFrame *frame, int flags); + /** * @return a string identifying the side data type */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/hash.h b/third-party/FFmpeg-iOS/include/libavutil/hash.h index d4bcbf8cc8..a20b8934f1 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/hash.h +++ b/third-party/FFmpeg-iOS/include/libavutil/hash.h @@ -18,18 +18,108 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * @ingroup lavu_hash_generic + * Generic hashing API + */ + #ifndef AVUTIL_HASH_H #define AVUTIL_HASH_H #include +/** + * @defgroup lavu_hash Hash Functions + * @ingroup lavu_crypto + * Hash functions useful in multimedia. + * + * Hash functions are widely used in multimedia, from error checking and + * concealment to internal regression testing. libavutil has efficient + * implementations of a variety of hash functions that may be useful for + * FFmpeg and other multimedia applications. + * + * @{ + * + * @defgroup lavu_hash_generic Generic Hashing API + * An abstraction layer for all hash functions supported by libavutil. + * + * If your application needs to support a wide range of different hash + * functions, then the Generic Hashing API is for you. It provides a generic, + * reusable API for @ref lavu_hash "all hash functions" implemented in libavutil. + * If you just need to use one particular hash function, use the @ref lavu_hash + * "individual hash" directly. + * + * @section Sample Code + * + * A basic template for using the Generic Hashing API follows: + * + * @code + * struct AVHashContext *ctx = NULL; + * const char *hash_name = NULL; + * uint8_t *output_buf = NULL; + * + * // Select from a string returned by av_hash_names() + * hash_name = ...; + * + * // Allocate a hash context + * ret = av_hash_alloc(&ctx, hash_name); + * if (ret < 0) + * return ret; + * + * // Initialize the hash context + * av_hash_init(ctx); + * + * // Update the hash context with data + * while (data_left) { + * av_hash_update(ctx, data, size); + * } + * + * // Now we have no more data, so it is time to finalize the hash and get the + * // output. But we need to first allocate an output buffer. Note that you can + * // use any memory allocation function, including malloc(), not just + * // av_malloc(). + * output_buf = av_malloc(av_hash_get_size(ctx)); + * if (!output_buf) + * return AVERROR(ENOMEM); + * + * // Finalize the hash context. + * // You can use any of the av_hash_final*() functions provided, for other + * // output formats. If you do so, be sure to adjust the memory allocation + * // above. See the function documentation below for the exact amount of extra + * // memory needed. + * av_hash_final(ctx, output_buffer); + * + * // Free the context + * av_hash_freep(&ctx); + * @endcode + * + * @section Hash Function-Specific Information + * If the CRC32 hash is selected, the #AV_CRC_32_IEEE polynomial will be + * used. + * + * If the Murmur3 hash is selected, the default seed will be used. See @ref + * lavu_murmur3_seedinfo "Murmur3" for more information. + * + * @{ + */ + +/** + * @example ffhash.c + * This example is a simple command line application that takes one or more + * arguments. It demonstrates a typical use of the hashing API with allocation, + * initialization, updating, and finalizing. + */ + struct AVHashContext; /** * Allocate a hash context for the algorithm specified by name. * * @return >= 0 for success, a negative error code for failure - * @note The context is not initialized, you must call av_hash_init(). + * + * @note The context is not initialized after a call to this function; you must + * call av_hash_init() to do so. */ int av_hash_alloc(struct AVHashContext **ctx, const char *name); @@ -38,8 +128,8 @@ int av_hash_alloc(struct AVHashContext **ctx, const char *name); * * This function can be used to enumerate the algorithms. * - * @param i index of the hash algorithm, starting from 0 - * @return a pointer to a static string or NULL if i is out of range + * @param[in] i Index of the hash algorithm, starting from 0 + * @return Pointer to a static string or `NULL` if `i` is out of range */ const char *av_hash_names(int i); @@ -49,64 +139,125 @@ const char *av_hash_names(int i); const char *av_hash_get_name(const struct AVHashContext *ctx); /** - * Maximum value that av_hash_get_size will currently return. + * Maximum value that av_hash_get_size() will currently return. * - * You can use this if you absolutely want or need to use static allocation - * and are fine with not supporting hashes newly added to libavutil without - * recompilation. - * Note that you still need to check against av_hash_get_size, adding new hashes - * with larger sizes will not be considered an ABI change and should not cause - * your code to overflow a buffer. + * You can use this if you absolutely want or need to use static allocation for + * the output buffer and are fine with not supporting hashes newly added to + * libavutil without recompilation. + * + * @warning + * Adding new hashes with larger sizes, and increasing the macro while doing + * so, will not be considered an ABI change. To prevent your code from + * overflowing a buffer, either dynamically allocate the output buffer with + * av_hash_get_size(), or limit your use of the Hashing API to hashes that are + * already in FFmpeg during the time of compilation. */ #define AV_HASH_MAX_SIZE 64 /** * Get the size of the resulting hash value in bytes. * - * The pointer passed to av_hash_final have space for at least this many bytes. + * The maximum value this function will currently return is available as macro + * #AV_HASH_MAX_SIZE. + * + * @param[in] ctx Hash context + * @return Size of the hash value in bytes */ int av_hash_get_size(const struct AVHashContext *ctx); /** * Initialize or reset a hash context. + * + * @param[in,out] ctx Hash context */ void av_hash_init(struct AVHashContext *ctx); /** * Update a hash context with additional data. + * + * @param[in,out] ctx Hash context + * @param[in] src Data to be added to the hash context + * @param[in] len Size of the additional data */ void av_hash_update(struct AVHashContext *ctx, const uint8_t *src, int len); /** * Finalize a hash context and compute the actual hash value. + * + * The minimum size of `dst` buffer is given by av_hash_get_size() or + * #AV_HASH_MAX_SIZE. The use of the latter macro is discouraged. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the final hash value will be stored + * + * @see av_hash_final_bin() provides an alternative API */ void av_hash_final(struct AVHashContext *ctx, uint8_t *dst); /** - * Finalize a hash context and compute the actual hash value. - * If size is smaller than the hash size, the hash is truncated; - * if size is larger, the buffer is padded with 0. + * Finalize a hash context and store the actual hash value in a buffer. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * + * If `size` is smaller than the hash size (given by av_hash_get_size()), the + * hash is truncated; if size is larger, the buffer is padded with 0. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the final hash value will be stored + * @param[in] size Number of bytes to write to `dst` */ void av_hash_final_bin(struct AVHashContext *ctx, uint8_t *dst, int size); /** - * Finalize a hash context and compute the actual hash value as a hex string. + * Finalize a hash context and store the hexadecimal representation of the + * actual hash value as a string. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * * The string is always 0-terminated. - * If size is smaller than 2 * hash_size + 1, the hex string is truncated. + * + * If `size` is smaller than `2 * hash_size + 1`, where `hash_size` is the + * value returned by av_hash_get_size(), the string will be truncated. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the string will be stored + * @param[in] size Maximum number of bytes to write to `dst` */ void av_hash_final_hex(struct AVHashContext *ctx, uint8_t *dst, int size); /** - * Finalize a hash context and compute the actual hash value as a base64 string. + * Finalize a hash context and store the Base64 representation of the + * actual hash value as a string. + * + * It is not safe to update or finalize a hash context again, if it has already + * been finalized. + * * The string is always 0-terminated. - * If size is smaller than AV_BASE64_SIZE(hash_size), the base64 string is - * truncated. + * + * If `size` is smaller than AV_BASE64_SIZE(hash_size), where `hash_size` is + * the value returned by av_hash_get_size(), the string will be truncated. + * + * @param[in,out] ctx Hash context + * @param[out] dst Where the final hash value will be stored + * @param[in] size Maximum number of bytes to write to `dst` */ void av_hash_final_b64(struct AVHashContext *ctx, uint8_t *dst, int size); /** - * Free hash context. + * Free hash context and set hash context pointer to `NULL`. + * + * @param[in,out] ctx Pointer to hash context */ void av_hash_freep(struct AVHashContext **ctx); +/** + * @} + * @} + */ + #endif /* AVUTIL_HASH_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/hwcontext.h b/third-party/FFmpeg-iOS/include/libavutil/hwcontext.h index 4e9da0224d..03334e20e0 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/hwcontext.h +++ b/third-party/FFmpeg-iOS/include/libavutil/hwcontext.h @@ -29,6 +29,11 @@ enum AVHWDeviceType { AV_HWDEVICE_TYPE_CUDA, AV_HWDEVICE_TYPE_VAAPI, AV_HWDEVICE_TYPE_DXVA2, + AV_HWDEVICE_TYPE_QSV, + AV_HWDEVICE_TYPE_VIDEOTOOLBOX, + AV_HWDEVICE_TYPE_NONE, + AV_HWDEVICE_TYPE_D3D11VA, + AV_HWDEVICE_TYPE_DRM, }; typedef struct AVHWDeviceInternal AVHWDeviceInternal; @@ -222,10 +227,36 @@ typedef struct AVHWFramesContext { } AVHWFramesContext; /** - * Allocate an AVHWDeviceContext for a given pixel format. + * Look up an AVHWDeviceType by name. * - * @param format a hwaccel pixel format (AV_PIX_FMT_FLAG_HWACCEL must be set - * on the corresponding format descriptor) + * @param name String name of the device type (case-insensitive). + * @return The type from enum AVHWDeviceType, or AV_HWDEVICE_TYPE_NONE if + * not found. + */ +enum AVHWDeviceType av_hwdevice_find_type_by_name(const char *name); + +/** Get the string name of an AVHWDeviceType. + * + * @param type Type from enum AVHWDeviceType. + * @return Pointer to a static string containing the name, or NULL if the type + * is not valid. + */ +const char *av_hwdevice_get_type_name(enum AVHWDeviceType type); + +/** + * Iterate over supported device types. + * + * @param type AV_HWDEVICE_TYPE_NONE initially, then the previous type + * returned by this function in subsequent iterations. + * @return The next usable device type from enum AVHWDeviceType, or + * AV_HWDEVICE_TYPE_NONE if there are no more. + */ +enum AVHWDeviceType av_hwdevice_iterate_types(enum AVHWDeviceType prev); + +/** + * Allocate an AVHWDeviceContext for a given hardware type. + * + * @param type the type of the hardware device to allocate. * @return a reference to the newly created AVHWDeviceContext on success or NULL * on failure. */ @@ -269,6 +300,32 @@ int av_hwdevice_ctx_init(AVBufferRef *ref); int av_hwdevice_ctx_create(AVBufferRef **device_ctx, enum AVHWDeviceType type, const char *device, AVDictionary *opts, int flags); +/** + * Create a new device of the specified type from an existing device. + * + * If the source device is a device of the target type or was originally + * derived from such a device (possibly through one or more intermediate + * devices of other types), then this will return a reference to the + * existing device of the same type as is requested. + * + * Otherwise, it will attempt to derive a new device from the given source + * device. If direct derivation to the new type is not implemented, it will + * attempt the same derivation from each ancestor of the source device in + * turn looking for an implemented derivation method. + * + * @param dst_ctx On success, a reference to the newly-created + * AVHWDeviceContext. + * @param type The type of the new device to create. + * @param src_ctx A reference to an existing AVHWDeviceContext which will be + * used to create the new device. + * @param flags Currently unused; should be set to zero. + * @return Zero on success, a negative AVERROR code on failure. + */ +int av_hwdevice_ctx_create_derived(AVBufferRef **dst_ctx, + enum AVHWDeviceType type, + AVBufferRef *src_ctx, int flags); + + /** * Allocate an AVHWFramesContext tied to a given device context. * @@ -317,6 +374,14 @@ int av_hwframe_get_buffer(AVBufferRef *hwframe_ctx, AVFrame *frame, int flags); * If dst->format is set, then this format will be used, otherwise (when * dst->format is AV_PIX_FMT_NONE) the first acceptable format will be chosen. * + * The two frames must have matching allocated dimensions (i.e. equal to + * AVHWFramesContext.width/height), since not all device types support + * transferring a sub-rectangle of the whole surface. The display dimensions + * (i.e. AVFrame.width/height) may be smaller than the allocated dimensions, but + * also have to be equal for both frames. When the display dimensions are + * smaller than the allocated dimensions, the content of the padding in the + * destination frame is unspecified. + * * @param dst the destination frame. dst is not touched on failure. * @param src the source frame. * @param flags currently unused, should be set to zero @@ -409,7 +474,7 @@ void *av_hwdevice_hwconfig_alloc(AVBufferRef *device_ctx); * configuration is provided, returns the maximum possible capabilities * of the device. * - * @param device_ctx a reference to the associated AVHWDeviceContext. + * @param ref a reference to the associated AVHWDeviceContext. * @param hwconfig a filled HW-specific configuration structure, or NULL * to return the maximum possible capabilities of the device. * @return AVHWFramesConstraints structure describing the constraints @@ -425,4 +490,93 @@ AVHWFramesConstraints *av_hwdevice_get_hwframe_constraints(AVBufferRef *ref, */ void av_hwframe_constraints_free(AVHWFramesConstraints **constraints); + +/** + * Flags to apply to frame mappings. + */ +enum { + /** + * The mapping must be readable. + */ + AV_HWFRAME_MAP_READ = 1 << 0, + /** + * The mapping must be writeable. + */ + AV_HWFRAME_MAP_WRITE = 1 << 1, + /** + * The mapped frame will be overwritten completely in subsequent + * operations, so the current frame data need not be loaded. Any values + * which are not overwritten are unspecified. + */ + AV_HWFRAME_MAP_OVERWRITE = 1 << 2, + /** + * The mapping must be direct. That is, there must not be any copying in + * the map or unmap steps. Note that performance of direct mappings may + * be much lower than normal memory. + */ + AV_HWFRAME_MAP_DIRECT = 1 << 3, +}; + +/** + * Map a hardware frame. + * + * This has a number of different possible effects, depending on the format + * and origin of the src and dst frames. On input, src should be a usable + * frame with valid buffers and dst should be blank (typically as just created + * by av_frame_alloc()). src should have an associated hwframe context, and + * dst may optionally have a format and associated hwframe context. + * + * If src was created by mapping a frame from the hwframe context of dst, + * then this function undoes the mapping - dst is replaced by a reference to + * the frame that src was originally mapped from. + * + * If both src and dst have an associated hwframe context, then this function + * attempts to map the src frame from its hardware context to that of dst and + * then fill dst with appropriate data to be usable there. This will only be + * possible if the hwframe contexts and associated devices are compatible - + * given compatible devices, av_hwframe_ctx_create_derived() can be used to + * create a hwframe context for dst in which mapping should be possible. + * + * If src has a hwframe context but dst does not, then the src frame is + * mapped to normal memory and should thereafter be usable as a normal frame. + * If the format is set on dst, then the mapping will attempt to create dst + * with that format and fail if it is not possible. If format is unset (is + * AV_PIX_FMT_NONE) then dst will be mapped with whatever the most appropriate + * format to use is (probably the sw_format of the src hwframe context). + * + * A return value of AVERROR(ENOSYS) indicates that the mapping is not + * possible with the given arguments and hwframe setup, while other return + * values indicate that it failed somehow. + * + * @param dst Destination frame, to contain the mapping. + * @param src Source frame, to be mapped. + * @param flags Some combination of AV_HWFRAME_MAP_* flags. + * @return Zero on success, negative AVERROR code on failure. + */ +int av_hwframe_map(AVFrame *dst, const AVFrame *src, int flags); + + +/** + * Create and initialise an AVHWFramesContext as a mapping of another existing + * AVHWFramesContext on a different device. + * + * av_hwframe_ctx_init() should not be called after this. + * + * @param derived_frame_ctx On success, a reference to the newly created + * AVHWFramesContext. + * @param derived_device_ctx A reference to the device to create the new + * AVHWFramesContext on. + * @param source_frame_ctx A reference to an existing AVHWFramesContext + * which will be mapped to the derived context. + * @param flags Some combination of AV_HWFRAME_MAP_* flags, defining the + * mapping parameters to apply to frames which are allocated + * in the derived device. + * @return Zero on success, negative AVERROR code on failure. + */ +int av_hwframe_ctx_create_derived(AVBufferRef **derived_frame_ctx, + enum AVPixelFormat format, + AVBufferRef *derived_device_ctx, + AVBufferRef *source_frame_ctx, + int flags); + #endif /* AVUTIL_HWCONTEXT_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/hwcontext_cuda.h b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_cuda.h index 23a77cee73..12dae8449e 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/hwcontext_cuda.h +++ b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_cuda.h @@ -20,7 +20,9 @@ #ifndef AVUTIL_HWCONTEXT_CUDA_H #define AVUTIL_HWCONTEXT_CUDA_H +#ifndef CUDA_VERSION #include +#endif #include "pixfmt.h" @@ -32,11 +34,14 @@ * AVBufferRefs whose data pointer is a CUdeviceptr. */ +typedef struct AVCUDADeviceContextInternal AVCUDADeviceContextInternal; + /** * This struct is allocated as AVHWDeviceContext.hwctx */ typedef struct AVCUDADeviceContext { CUcontext cuda_ctx; + AVCUDADeviceContextInternal *internal; } AVCUDADeviceContext; /** diff --git a/third-party/FFmpeg-iOS/include/libavutil/hwcontext_d3d11va.h b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_d3d11va.h new file mode 100644 index 0000000000..98db7ce343 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_d3d11va.h @@ -0,0 +1,168 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_D3D11VA_H +#define AVUTIL_HWCONTEXT_D3D11VA_H + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_D3D11VA. + * + * The default pool implementation will be fixed-size if initial_pool_size is + * set (and allocate elements from an array texture). Otherwise it will allocate + * individual textures. Be aware that decoding requires a single array texture. + * + * Using sw_format==AV_PIX_FMT_YUV420P has special semantics, and maps to + * DXGI_FORMAT_420_OPAQUE. av_hwframe_transfer_data() is not supported for + * this format. Refer to MSDN for details. + * + * av_hwdevice_ctx_create() for this device type supports a key named "debug" + * for the AVDictionary entry. If this is set to any value, the device creation + * code will try to load various supported D3D debugging layers. + */ + +#include + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVD3D11VADeviceContext { + /** + * Device used for texture creation and access. This can also be used to + * set the libavcodec decoding device. + * + * Must be set by the user. This is the only mandatory field - the other + * device context fields are set from this and are available for convenience. + * + * Deallocating the AVHWDeviceContext will always release this interface, + * and it does not matter whether it was user-allocated. + */ + ID3D11Device *device; + + /** + * If unset, this will be set from the device field on init. + * + * Deallocating the AVHWDeviceContext will always release this interface, + * and it does not matter whether it was user-allocated. + */ + ID3D11DeviceContext *device_context; + + /** + * If unset, this will be set from the device field on init. + * + * Deallocating the AVHWDeviceContext will always release this interface, + * and it does not matter whether it was user-allocated. + */ + ID3D11VideoDevice *video_device; + + /** + * If unset, this will be set from the device_context field on init. + * + * Deallocating the AVHWDeviceContext will always release this interface, + * and it does not matter whether it was user-allocated. + */ + ID3D11VideoContext *video_context; + + /** + * Callbacks for locking. They protect accesses to device_context and + * video_context calls. They also protect access to the internal staging + * texture (for av_hwframe_transfer_data() calls). They do NOT protect + * access to hwcontext or decoder state in general. + * + * If unset on init, the hwcontext implementation will set them to use an + * internal mutex. + * + * The underlying lock must be recursive. lock_ctx is for free use by the + * locking implementation. + */ + void (*lock)(void *lock_ctx); + void (*unlock)(void *lock_ctx); + void *lock_ctx; +} AVD3D11VADeviceContext; + +/** + * D3D11 frame descriptor for pool allocation. + * + * In user-allocated pools, AVHWFramesContext.pool must return AVBufferRefs + * with the data pointer pointing at an object of this type describing the + * planes of the frame. + * + * This has no use outside of custom allocation, and AVFrame AVBufferRef do not + * necessarily point to an instance of this struct. + */ +typedef struct AVD3D11FrameDescriptor { + /** + * The texture in which the frame is located. The reference count is + * managed by the AVBufferRef, and destroying the reference will release + * the interface. + * + * Normally stored in AVFrame.data[0]. + */ + ID3D11Texture2D *texture; + + /** + * The index into the array texture element representing the frame, or 0 + * if the texture is not an array texture. + * + * Normally stored in AVFrame.data[1] (cast from intptr_t). + */ + intptr_t index; +} AVD3D11FrameDescriptor; + +/** + * This struct is allocated as AVHWFramesContext.hwctx + */ +typedef struct AVD3D11VAFramesContext { + /** + * The canonical texture used for pool allocation. If this is set to NULL + * on init, the hwframes implementation will allocate and set an array + * texture if initial_pool_size > 0. + * + * The only situation when the API user should set this is: + * - the user wants to do manual pool allocation (setting + * AVHWFramesContext.pool), instead of letting AVHWFramesContext + * allocate the pool + * - of an array texture + * - and wants it to use it for decoding + * - this has to be done before calling av_hwframe_ctx_init() + * + * Deallocating the AVHWFramesContext will always release this interface, + * and it does not matter whether it was user-allocated. + * + * This is in particular used by the libavcodec D3D11VA hwaccel, which + * requires a single array texture. It will create ID3D11VideoDecoderOutputView + * objects for each array texture element on decoder initialization. + */ + ID3D11Texture2D *texture; + + /** + * D3D11_TEXTURE2D_DESC.BindFlags used for texture creation. The user must + * at least set D3D11_BIND_DECODER if the frames context is to be used for + * video decoding. + * This field is ignored/invalid if a user-allocated texture is provided. + */ + UINT BindFlags; + + /** + * D3D11_TEXTURE2D_DESC.MiscFlags used for texture creation. + * This field is ignored/invalid if a user-allocated texture is provided. + */ + UINT MiscFlags; +} AVD3D11VAFramesContext; + +#endif /* AVUTIL_HWCONTEXT_D3D11VA_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/hwcontext_drm.h b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_drm.h new file mode 100644 index 0000000000..2e225451e1 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_drm.h @@ -0,0 +1,166 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_DRM_H +#define AVUTIL_HWCONTEXT_DRM_H + +#include +#include + +/** + * @file + * API-specific header for AV_HWDEVICE_TYPE_DRM. + * + * Internal frame allocation is not currently supported - all frames + * must be allocated by the user. Thus AVHWFramesContext is always + * NULL, though this may change if support for frame allocation is + * added in future. + */ + +enum { + /** + * The maximum number of layers/planes in a DRM frame. + */ + AV_DRM_MAX_PLANES = 4 +}; + +/** + * DRM object descriptor. + * + * Describes a single DRM object, addressing it as a PRIME file + * descriptor. + */ +typedef struct AVDRMObjectDescriptor { + /** + * DRM PRIME fd for the object. + */ + int fd; + /** + * Total size of the object. + * + * (This includes any parts not which do not contain image data.) + */ + size_t size; + /** + * Format modifier applied to the object (DRM_FORMAT_MOD_*). + */ + uint64_t format_modifier; +} AVDRMObjectDescriptor; + +/** + * DRM plane descriptor. + * + * Describes a single plane of a layer, which is contained within + * a single object. + */ +typedef struct AVDRMPlaneDescriptor { + /** + * Index of the object containing this plane in the objects + * array of the enclosing frame descriptor. + */ + int object_index; + /** + * Offset within that object of this plane. + */ + ptrdiff_t offset; + /** + * Pitch (linesize) of this plane. + */ + ptrdiff_t pitch; +} AVDRMPlaneDescriptor; + +/** + * DRM layer descriptor. + * + * Describes a single layer within a frame. This has the structure + * defined by its format, and will contain one or more planes. + */ +typedef struct AVDRMLayerDescriptor { + /** + * Format of the layer (DRM_FORMAT_*). + */ + uint32_t format; + /** + * Number of planes in the layer. + * + * This must match the number of planes required by format. + */ + int nb_planes; + /** + * Array of planes in this layer. + */ + AVDRMPlaneDescriptor planes[AV_DRM_MAX_PLANES]; +} AVDRMLayerDescriptor; + +/** + * DRM frame descriptor. + * + * This is used as the data pointer for AV_PIX_FMT_DRM_PRIME frames. + * It is also used by user-allocated frame pools - allocating in + * AVHWFramesContext.pool must return AVBufferRefs which contain + * an object of this type. + * + * The fields of this structure should be set such it can be + * imported directly by EGL using the EGL_EXT_image_dma_buf_import + * and EGL_EXT_image_dma_buf_import_modifiers extensions. + * (Note that the exact layout of a particular format may vary between + * platforms - we only specify that the same platform should be able + * to import it.) + * + * The total number of planes must not exceed AV_DRM_MAX_PLANES, and + * the order of the planes by increasing layer index followed by + * increasing plane index must be the same as the order which would + * be used for the data pointers in the equivalent software format. + */ +typedef struct AVDRMFrameDescriptor { + /** + * Number of DRM objects making up this frame. + */ + int nb_objects; + /** + * Array of objects making up the frame. + */ + AVDRMObjectDescriptor objects[AV_DRM_MAX_PLANES]; + /** + * Number of layers in the frame. + */ + int nb_layers; + /** + * Array of layers in the frame. + */ + AVDRMLayerDescriptor layers[AV_DRM_MAX_PLANES]; +} AVDRMFrameDescriptor; + +/** + * DRM device. + * + * Allocated as AVHWDeviceContext.hwctx. + */ +typedef struct AVDRMDeviceContext { + /** + * File descriptor of DRM device. + * + * This is used as the device to create frames on, and may also be + * used in some derivation and mapping operations. + * + * If no device is required, set to -1. + */ + int fd; +} AVDRMDeviceContext; + +#endif /* AVUTIL_HWCONTEXT_DRM_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/hwcontext_dxva2.h b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_dxva2.h index 6c36cb4b6b..e1b79bc0de 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/hwcontext_dxva2.h +++ b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_dxva2.h @@ -65,6 +65,9 @@ typedef struct AVDXVA2FramesContext { * * If it is non-NULL, libavutil will call IDirectXVideoDecoder_Release() on * it just before the internal surface pool is freed. + * + * This is for convenience only. Some code uses other methods to manage the + * decoder reference. */ IDirectXVideoDecoder *decoder_to_release; } AVDXVA2FramesContext; diff --git a/third-party/FFmpeg-iOS/include/libavutil/hwcontext_qsv.h b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_qsv.h new file mode 100644 index 0000000000..b98d611cfc --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_qsv.h @@ -0,0 +1,53 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_QSV_H +#define AVUTIL_HWCONTEXT_QSV_H + +#include + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_QSV. + * + * This API does not support dynamic frame pools. AVHWFramesContext.pool must + * contain AVBufferRefs whose data pointer points to an mfxFrameSurface1 struct. + */ + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVQSVDeviceContext { + mfxSession session; +} AVQSVDeviceContext; + +/** + * This struct is allocated as AVHWFramesContext.hwctx + */ +typedef struct AVQSVFramesContext { + mfxFrameSurface1 *surfaces; + int nb_surfaces; + + /** + * A combination of MFX_MEMTYPE_* describing the frame pool. + */ + int frame_type; +} AVQSVFramesContext; + +#endif /* AVUTIL_HWCONTEXT_QSV_H */ + diff --git a/third-party/FFmpeg-iOS/include/libavutil/hwcontext_vaapi.h b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_vaapi.h index 7fd1a36e8f..0b2e071cb3 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/hwcontext_vaapi.h +++ b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_vaapi.h @@ -33,6 +33,33 @@ * with the data pointer set to a VASurfaceID. */ +enum { + /** + * The quirks field has been set by the user and should not be detected + * automatically by av_hwdevice_ctx_init(). + */ + AV_VAAPI_DRIVER_QUIRK_USER_SET = (1 << 0), + /** + * The driver does not destroy parameter buffers when they are used by + * vaRenderPicture(). Additional code will be required to destroy them + * separately afterwards. + */ + AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS = (1 << 1), + + /** + * The driver does not support the VASurfaceAttribMemoryType attribute, + * so the surface allocation code will not try to use it. + */ + AV_VAAPI_DRIVER_QUIRK_ATTRIB_MEMTYPE = (1 << 2), + + /** + * The driver does not support surface attributes at all. + * The surface allocation code will never pass them to surface allocation, + * and the results of the vaQuerySurfaceAttributes() call will be faked. + */ + AV_VAAPI_DRIVER_QUIRK_SURFACE_ATTRIBUTES = (1 << 3), +}; + /** * VAAPI connection details. * @@ -43,6 +70,14 @@ typedef struct AVVAAPIDeviceContext { * The VADisplay handle, to be filled by the user. */ VADisplay display; + /** + * Driver quirks to apply - this is filled by av_hwdevice_ctx_init(), + * with reference to a table of known drivers, unless the + * AV_VAAPI_DRIVER_QUIRK_USER_SET bit is already present. The user + * may need to refer to this field when performing any later + * operations using VAAPI with the same VADisplay. + */ + unsigned int driver_quirks; } AVVAAPIDeviceContext; /** diff --git a/third-party/FFmpeg-iOS/include/libavutil/hwcontext_videotoolbox.h b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_videotoolbox.h new file mode 100644 index 0000000000..380918d92e --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_videotoolbox.h @@ -0,0 +1,54 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_VIDEOTOOLBOX_H +#define AVUTIL_HWCONTEXT_VIDEOTOOLBOX_H + +#include + +#include + +#include "pixfmt.h" + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_VIDEOTOOLBOX. + * + * This API currently does not support frame allocation, as the raw VideoToolbox + * API does allocation, and FFmpeg itself never has the need to allocate frames. + * + * If the API user sets a custom pool, AVHWFramesContext.pool must return + * AVBufferRefs whose data pointer is a CVImageBufferRef or CVPixelBufferRef. + * + * Currently AVHWDeviceContext.hwctx and AVHWFramesContext.hwctx are always + * NULL. + */ + +/** + * Convert a VideoToolbox (actually CoreVideo) format to AVPixelFormat. + * Returns AV_PIX_FMT_NONE if no known equivalent was found. + */ +enum AVPixelFormat av_map_videotoolbox_format_to_pixfmt(uint32_t cv_fmt); + +/** + * Convert an AVPixelFormat to a VideoToolbox (actually CoreVideo) format. + * Returns 0 if no known equivalent was found. + */ +uint32_t av_map_videotoolbox_format_from_pixfmt(enum AVPixelFormat pix_fmt); + +#endif /* AVUTIL_HWCONTEXT_VIDEOTOOLBOX_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/imgutils.h b/third-party/FFmpeg-iOS/include/libavutil/imgutils.h index 23282a38fa..5b790ecf0a 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/imgutils.h +++ b/third-party/FFmpeg-iOS/include/libavutil/imgutils.h @@ -120,6 +120,24 @@ void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], const uint8_t *src_data[4], const int src_linesizes[4], enum AVPixelFormat pix_fmt, int width, int height); +/** + * Copy image data located in uncacheable (e.g. GPU mapped) memory. Where + * available, this function will use special functionality for reading from such + * memory, which may result in greatly improved performance compared to plain + * av_image_copy(). + * + * The data pointers and the linesizes must be aligned to the maximum required + * by the CPU architecture. + * + * @note The linesize parameters have the type ptrdiff_t here, while they are + * int for av_image_copy(). + * @note On x86, the linesizes currently need to be aligned to the cacheline + * size (i.e. 64) to get improved performance. + */ +void av_image_copy_uc_from(uint8_t *dst_data[4], const ptrdiff_t dst_linesizes[4], + const uint8_t *src_data[4], const ptrdiff_t src_linesizes[4], + enum AVPixelFormat pix_fmt, int width, int height); + /** * Setup the data pointers and linesizes based on the specified image * parameters and the provided array. @@ -137,7 +155,7 @@ void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], * one call, use av_image_alloc(). * * @param dst_data data pointers to be filled in - * @param dst_linesizes linesizes for the image in dst_data to be filled in + * @param dst_linesize linesizes for the image in dst_data to be filled in * @param src buffer which will contain or contains the actual image data, can be NULL * @param pix_fmt the pixel format of the image * @param width the width of the image in pixels @@ -154,7 +172,11 @@ int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], * Return the size in bytes of the amount of data required to store an * image with the given parameters. * - * @param[in] align the assumed linesize alignment + * @param pix_fmt the pixel format of the image + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @param align the assumed linesize alignment + * @return the buffer size in bytes, a negative error code in case of failure */ int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align); @@ -167,7 +189,7 @@ int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, * @param dst a buffer into which picture data will be copied * @param dst_size the size in bytes of dst * @param src_data pointers containing the source image data - * @param src_linesizes linesizes for the image in src_data + * @param src_linesize linesizes for the image in src_data * @param pix_fmt the pixel format of the source image * @param width the width of the source image in pixels * @param height the height of the source image in pixels @@ -191,6 +213,21 @@ int av_image_copy_to_buffer(uint8_t *dst, int dst_size, */ int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx); +/** + * Check if the given dimension of an image is valid, meaning that all + * bytes of a plane of an image with the specified pix_fmt can be addressed + * with a signed int. + * + * @param w the width of the picture + * @param h the height of the picture + * @param max_pixels the maximum number of pixels the user wants to accept + * @param pix_fmt the pixel format, can be AV_PIX_FMT_NONE if unknown. + * @param log_offset the offset to sum to the log level for logging with log_ctx + * @param log_ctx the parent logging context, it may be NULL + * @return >= 0 if valid, a negative error code otherwise + */ +int av_image_check_size2(unsigned int w, unsigned int h, int64_t max_pixels, enum AVPixelFormat pix_fmt, int log_offset, void *log_ctx); + /** * Check if the given sample aspect ratio of an image is valid. * @@ -205,6 +242,33 @@ int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *lo */ int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar); +/** + * Overwrite the image data with black. This is suitable for filling a + * sub-rectangle of an image, meaning the padding between the right most pixel + * and the left most pixel on the next line will not be overwritten. For some + * formats, the image size might be rounded up due to inherent alignment. + * + * If the pixel format has alpha, the alpha is cleared to opaque. + * + * This can return an error if the pixel format is not supported. Normally, all + * non-hwaccel pixel formats should be supported. + * + * Passing NULL for dst_data is allowed. Then the function returns whether the + * operation would have succeeded. (It can return an error if the pix_fmt is + * not supported.) + * + * @param dst_data data pointers to destination image + * @param dst_linesize linesizes for the destination image + * @param pix_fmt the pixel format of the image + * @param range the color range of the image (important for colorspaces such as YUV) + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @return 0 if the image data was cleared, a negative AVERROR code otherwise + */ +int av_image_fill_black(uint8_t *dst_data[4], const ptrdiff_t dst_linesize[4], + enum AVPixelFormat pix_fmt, enum AVColorRange range, + int width, int height); + /** * @} */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/intreadwrite.h b/third-party/FFmpeg-iOS/include/libavutil/intreadwrite.h index 07f717692a..e3403cc035 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/intreadwrite.h +++ b/third-party/FFmpeg-iOS/include/libavutil/intreadwrite.h @@ -20,7 +20,7 @@ #define AVUTIL_INTREADWRITE_H #include -#include "../libavutilavconfig.h" +#include "../libavutil/avconfig.h" #include "attributes.h" #include "bswap.h" @@ -229,6 +229,11 @@ union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; # define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p))) # define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v)) +#elif defined(_MSC_VER) && (defined(_M_ARM) || defined(_M_X64)) && AV_HAVE_FAST_UNALIGNED + +# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p))) +# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v)) + #elif AV_HAVE_FAST_UNALIGNED # define AV_RN(s, p) (((const av_alias##s*)(p))->u##s) @@ -242,8 +247,8 @@ union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; ((const uint8_t*)(x))[1]) #endif #ifndef AV_WB16 -# define AV_WB16(p, darg) do { \ - unsigned d = (darg); \ +# define AV_WB16(p, val) do { \ + uint16_t d = (val); \ ((uint8_t*)(p))[1] = (d); \ ((uint8_t*)(p))[0] = (d)>>8; \ } while(0) @@ -255,8 +260,8 @@ union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; ((const uint8_t*)(x))[0]) #endif #ifndef AV_WL16 -# define AV_WL16(p, darg) do { \ - unsigned d = (darg); \ +# define AV_WL16(p, val) do { \ + uint16_t d = (val); \ ((uint8_t*)(p))[0] = (d); \ ((uint8_t*)(p))[1] = (d)>>8; \ } while(0) @@ -270,8 +275,8 @@ union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; ((const uint8_t*)(x))[3]) #endif #ifndef AV_WB32 -# define AV_WB32(p, darg) do { \ - unsigned d = (darg); \ +# define AV_WB32(p, val) do { \ + uint32_t d = (val); \ ((uint8_t*)(p))[3] = (d); \ ((uint8_t*)(p))[2] = (d)>>8; \ ((uint8_t*)(p))[1] = (d)>>16; \ @@ -287,8 +292,8 @@ union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; ((const uint8_t*)(x))[0]) #endif #ifndef AV_WL32 -# define AV_WL32(p, darg) do { \ - unsigned d = (darg); \ +# define AV_WL32(p, val) do { \ + uint32_t d = (val); \ ((uint8_t*)(p))[0] = (d); \ ((uint8_t*)(p))[1] = (d)>>8; \ ((uint8_t*)(p))[2] = (d)>>16; \ @@ -308,8 +313,8 @@ union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; (uint64_t)((const uint8_t*)(x))[7]) #endif #ifndef AV_WB64 -# define AV_WB64(p, darg) do { \ - uint64_t d = (darg); \ +# define AV_WB64(p, val) do { \ + uint64_t d = (val); \ ((uint8_t*)(p))[7] = (d); \ ((uint8_t*)(p))[6] = (d)>>8; \ ((uint8_t*)(p))[5] = (d)>>16; \ @@ -333,8 +338,8 @@ union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; (uint64_t)((const uint8_t*)(x))[0]) #endif #ifndef AV_WL64 -# define AV_WL64(p, darg) do { \ - uint64_t d = (darg); \ +# define AV_WL64(p, val) do { \ + uint64_t d = (val); \ ((uint8_t*)(p))[0] = (d); \ ((uint8_t*)(p))[1] = (d)>>8; \ ((uint8_t*)(p))[2] = (d)>>16; \ diff --git a/third-party/FFmpeg-iOS/include/libavutil/lfg.h b/third-party/FFmpeg-iOS/include/libavutil/lfg.h index ec90562cf2..03f779ad8a 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/lfg.h +++ b/third-party/FFmpeg-iOS/include/libavutil/lfg.h @@ -22,6 +22,8 @@ #ifndef AVUTIL_LFG_H #define AVUTIL_LFG_H +#include + typedef struct AVLFG { unsigned int state[64]; int index; @@ -29,6 +31,13 @@ typedef struct AVLFG { void av_lfg_init(AVLFG *c, unsigned int seed); +/** + * Seed the state of the ALFG using binary data. + * + * Return value: 0 on success, negative value (AVERROR) on failure. + */ +int av_lfg_init_from_data(AVLFG *c, const uint8_t *data, unsigned int length); + /** * Get the next random unsigned 32-bit number using an ALFG. * diff --git a/third-party/FFmpeg-iOS/include/libavutil/log.h b/third-party/FFmpeg-iOS/include/libavutil/log.h index 0acc1b9214..f0a57385df 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/log.h +++ b/third-party/FFmpeg-iOS/include/libavutil/log.h @@ -44,7 +44,7 @@ typedef enum { AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT, AV_CLASS_CATEGORY_DEVICE_OUTPUT, AV_CLASS_CATEGORY_DEVICE_INPUT, - AV_CLASS_CATEGORY_NB, ///< not part of ABI/API + AV_CLASS_CATEGORY_NB ///< not part of ABI/API }AVClassCategory; #define AV_IS_INPUT_DEVICE(category) \ diff --git a/third-party/FFmpeg-iOS/include/libavutil/lzo.h b/third-party/FFmpeg-iOS/include/libavutil/lzo.h new file mode 100644 index 0000000000..c03403992d --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/lzo.h @@ -0,0 +1,66 @@ +/* + * LZO 1x decompression + * copyright (c) 2006 Reimar Doeffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LZO_H +#define AVUTIL_LZO_H + +/** + * @defgroup lavu_lzo LZO + * @ingroup lavu_crypto + * + * @{ + */ + +#include + +/** @name Error flags returned by av_lzo1x_decode + * @{ */ +/// end of the input buffer reached before decoding finished +#define AV_LZO_INPUT_DEPLETED 1 +/// decoded data did not fit into output buffer +#define AV_LZO_OUTPUT_FULL 2 +/// a reference to previously decoded data was wrong +#define AV_LZO_INVALID_BACKPTR 4 +/// a non-specific error in the compressed bitstream +#define AV_LZO_ERROR 8 +/** @} */ + +#define AV_LZO_INPUT_PADDING 8 +#define AV_LZO_OUTPUT_PADDING 12 + +/** + * @brief Decodes LZO 1x compressed data. + * @param out output buffer + * @param outlen size of output buffer, number of bytes left are returned here + * @param in input buffer + * @param inlen size of input buffer, number of bytes left are returned here + * @return 0 on success, otherwise a combination of the error flags above + * + * Make sure all buffers are appropriately padded, in must provide + * AV_LZO_INPUT_PADDING, out must provide AV_LZO_OUTPUT_PADDING additional bytes. + */ +int av_lzo1x_decode(void *out, int *outlen, const void *in, int *inlen); + +/** + * @} + */ + +#endif /* AVUTIL_LZO_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/mastering_display_metadata.h b/third-party/FFmpeg-iOS/include/libavutil/mastering_display_metadata.h index 936533fec4..847b0b62c6 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/mastering_display_metadata.h +++ b/third-party/FFmpeg-iOS/include/libavutil/mastering_display_metadata.h @@ -86,4 +86,43 @@ AVMasteringDisplayMetadata *av_mastering_display_metadata_alloc(void); */ AVMasteringDisplayMetadata *av_mastering_display_metadata_create_side_data(AVFrame *frame); +/** + * Content light level needed by to transmit HDR over HDMI (CTA-861.3). + * + * To be used as payload of a AVFrameSideData or AVPacketSideData with the + * appropriate type. + * + * @note The struct should be allocated with av_content_light_metadata_alloc() + * and its size is not a part of the public ABI. + */ +typedef struct AVContentLightMetadata { + /** + * Max content light level (cd/m^2). + */ + unsigned MaxCLL; + + /** + * Max average light level per frame (cd/m^2). + */ + unsigned MaxFALL; +} AVContentLightMetadata; + +/** + * Allocate an AVContentLightMetadata structure and set its fields to + * default values. The resulting struct can be freed using av_freep(). + * + * @return An AVContentLightMetadata filled with default values or NULL + * on failure. + */ +AVContentLightMetadata *av_content_light_metadata_alloc(size_t *size); + +/** + * Allocate a complete AVContentLightMetadata and add it to the frame. + * + * @param frame The frame which side data is added to. + * + * @return The AVContentLightMetadata structure to be filled by caller. + */ +AVContentLightMetadata *av_content_light_metadata_create_side_data(AVFrame *frame); + #endif /* AVUTIL_MASTERING_DISPLAY_METADATA_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/mathematics.h b/third-party/FFmpeg-iOS/include/libavutil/mathematics.h index 57c44f845d..54901800ba 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/mathematics.h +++ b/third-party/FFmpeg-iOS/include/libavutil/mathematics.h @@ -18,6 +18,12 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * @addtogroup lavu_math + * Mathematical utilities for working with timestamp and time base. + */ + #ifndef AVUTIL_MATHEMATICS_H #define AVUTIL_MATHEMATICS_H @@ -63,84 +69,155 @@ /** * @addtogroup lavu_math + * * @{ */ - +/** + * Rounding methods. + */ enum AVRounding { AV_ROUND_ZERO = 0, ///< Round toward zero. AV_ROUND_INF = 1, ///< Round away from zero. AV_ROUND_DOWN = 2, ///< Round toward -infinity. AV_ROUND_UP = 3, ///< Round toward +infinity. AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero. - AV_ROUND_PASS_MINMAX = 8192, ///< Flag to pass INT64_MIN/MAX through instead of rescaling, this avoids special cases for AV_NOPTS_VALUE + /** + * Flag telling rescaling functions to pass `INT64_MIN`/`MAX` through + * unchanged, avoiding special cases for #AV_NOPTS_VALUE. + * + * Unlike other values of the enumeration AVRounding, this value is a + * bitmask that must be used in conjunction with another value of the + * enumeration through a bitwise OR, in order to set behavior for normal + * cases. + * + * @code{.c} + * av_rescale_rnd(3, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX); + * // Rescaling 3: + * // Calculating 3 * 1 / 2 + * // 3 / 2 is rounded up to 2 + * // => 2 + * + * av_rescale_rnd(AV_NOPTS_VALUE, 1, 2, AV_ROUND_UP | AV_ROUND_PASS_MINMAX); + * // Rescaling AV_NOPTS_VALUE: + * // AV_NOPTS_VALUE == INT64_MIN + * // AV_NOPTS_VALUE is passed through + * // => AV_NOPTS_VALUE + * @endcode + */ + AV_ROUND_PASS_MINMAX = 8192, }; /** - * Compute the greatest common divisor of a and b. + * Compute the greatest common divisor of two integer operands. * - * @return gcd of a and b up to sign; if a >= 0 and b >= 0, return value is >= 0; + * @param a,b Operands + * @return GCD of a and b up to sign; if a >= 0 and b >= 0, return value is >= 0; * if a == 0 and b == 0, returns 0. */ int64_t av_const av_gcd(int64_t a, int64_t b); /** * Rescale a 64-bit integer with rounding to nearest. - * A simple a*b/c isn't possible as it can overflow. + * + * The operation is mathematically equivalent to `a * b / c`, but writing that + * directly can overflow. + * + * This function is equivalent to av_rescale_rnd() with #AV_ROUND_NEAR_INF. + * + * @see av_rescale_rnd(), av_rescale_q(), av_rescale_q_rnd() */ int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const; /** * Rescale a 64-bit integer with specified rounding. - * A simple a*b/c isn't possible as it can overflow. * - * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is - * INT64_MIN or INT64_MAX then a is passed through unchanged. + * The operation is mathematically equivalent to `a * b / c`, but writing that + * directly can overflow, and does not support different rounding methods. + * + * @see av_rescale(), av_rescale_q(), av_rescale_q_rnd() */ -int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding) av_const; +int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd) av_const; /** * Rescale a 64-bit integer by 2 rational numbers. + * + * The operation is mathematically equivalent to `a * bq / cq`. + * + * This function is equivalent to av_rescale_q_rnd() with #AV_ROUND_NEAR_INF. + * + * @see av_rescale(), av_rescale_rnd(), av_rescale_q_rnd() */ int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const; /** * Rescale a 64-bit integer by 2 rational numbers with specified rounding. * - * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is - * INT64_MIN or INT64_MAX then a is passed through unchanged. + * The operation is mathematically equivalent to `a * bq / cq`. + * + * @see av_rescale(), av_rescale_rnd(), av_rescale_q() */ int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, - enum AVRounding) av_const; + enum AVRounding rnd) av_const; /** - * Compare 2 timestamps each in its own timebases. - * The result of the function is undefined if one of the timestamps - * is outside the int64_t range when represented in the others timebase. - * @return -1 if ts_a is before ts_b, 1 if ts_a is after ts_b or 0 if they represent the same position + * Compare two timestamps each in its own time base. + * + * @return One of the following values: + * - -1 if `ts_a` is before `ts_b` + * - 1 if `ts_a` is after `ts_b` + * - 0 if they represent the same position + * + * @warning + * The result of the function is undefined if one of the timestamps is outside + * the `int64_t` range when represented in the other's timebase. */ int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b); /** - * Compare 2 integers modulo mod. - * That is we compare integers a and b for which only the least - * significant log2(mod) bits are known. + * Compare the remainders of two integer operands divided by a common divisor. * - * @param mod must be a power of 2 - * @return a negative value if a is smaller than b - * a positive value if a is greater than b - * 0 if a equals b + * In other words, compare the least significant `log2(mod)` bits of integers + * `a` and `b`. + * + * @code{.c} + * av_compare_mod(0x11, 0x02, 0x10) < 0 // since 0x11 % 0x10 (0x1) < 0x02 % 0x10 (0x2) + * av_compare_mod(0x11, 0x02, 0x20) > 0 // since 0x11 % 0x20 (0x11) > 0x02 % 0x20 (0x02) + * @endcode + * + * @param a,b Operands + * @param mod Divisor; must be a power of 2 + * @return + * - a negative value if `a % mod < b % mod` + * - a positive value if `a % mod > b % mod` + * - zero if `a % mod == b % mod` */ int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod); /** * Rescale a timestamp while preserving known durations. * - * @param in_ts Input timestamp - * @param in_tb Input timebase - * @param fs_tb Duration and *last timebase - * @param duration duration till the next call - * @param out_tb Output timebase + * This function is designed to be called per audio packet to scale the input + * timestamp to a different time base. Compared to a simple av_rescale_q() + * call, this function is robust against possible inconsistent frame durations. + * + * The `last` parameter is a state variable that must be preserved for all + * subsequent calls for the same stream. For the first call, `*last` should be + * initialized to #AV_NOPTS_VALUE. + * + * @param[in] in_tb Input time base + * @param[in] in_ts Input timestamp + * @param[in] fs_tb Duration time base; typically this is finer-grained + * (greater) than `in_tb` and `out_tb` + * @param[in] duration Duration till the next call to this function (i.e. + * duration of the current packet/frame) + * @param[in,out] last Pointer to a timestamp expressed in terms of + * `fs_tb`, acting as a state variable + * @param[in] out_tb Output timebase + * @return Timestamp expressed in terms of `out_tb` + * + * @note In the context of this function, "duration" is in term of samples, not + * seconds. */ int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb); @@ -150,15 +227,15 @@ int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int * This function guarantees that when the same value is repeatly added that * no accumulation of rounding errors occurs. * - * @param ts Input timestamp - * @param ts_tb Input timestamp timebase - * @param inc value to add to ts - * @param inc_tb inc timebase + * @param[in] ts Input timestamp + * @param[in] ts_tb Input timestamp time base + * @param[in] inc Value to be added + * @param[in] inc_tb Time base of `inc` */ int64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t inc); - /** +/** * @} */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/md5.h b/third-party/FFmpeg-iOS/include/libavutil/md5.h index 79702c88c2..ca72ccbf83 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/md5.h +++ b/third-party/FFmpeg-iOS/include/libavutil/md5.h @@ -18,9 +18,16 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * @ingroup lavu_md5 + * Public header for MD5 hash function implementation. + */ + #ifndef AVUTIL_MD5_H #define AVUTIL_MD5_H +#include #include #include "attributes.h" @@ -28,7 +35,9 @@ /** * @defgroup lavu_md5 MD5 - * @ingroup lavu_crypto + * @ingroup lavu_hash + * MD5 hash function implementation. + * * @{ */ @@ -55,7 +64,11 @@ void av_md5_init(struct AVMD5 *ctx); * @param src input data to update hash with * @param len input data length */ +#if FF_API_CRYPTO_SIZE_T void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, int len); +#else +void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, size_t len); +#endif /** * Finish hashing and output digest value. @@ -72,7 +85,11 @@ void av_md5_final(struct AVMD5 *ctx, uint8_t *dst); * @param src The data to hash * @param len The length of the data, in bytes */ +#if FF_API_CRYPTO_SIZE_T void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len); +#else +void av_md5_sum(uint8_t *dst, const uint8_t *src, size_t len); +#endif /** * @} diff --git a/third-party/FFmpeg-iOS/include/libavutil/mem.h b/third-party/FFmpeg-iOS/include/libavutil/mem.h index d25b3229b7..527cd03191 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/mem.h +++ b/third-party/FFmpeg-iOS/include/libavutil/mem.h @@ -20,7 +20,8 @@ /** * @file - * memory handling functions + * @ingroup lavu_mem + * Memory handling functions */ #ifndef AVUTIL_MEM_H @@ -35,9 +36,56 @@ /** * @addtogroup lavu_mem + * Utilities for manipulating memory. + * + * FFmpeg has several applications of memory that are not required of a typical + * program. For example, the computing-heavy components like video decoding and + * encoding can be sped up significantly through the use of aligned memory. + * + * However, for each of FFmpeg's applications of memory, there might not be a + * recognized or standardized API for that specific use. Memory alignment, for + * instance, varies wildly depending on operating systems, architectures, and + * compilers. Hence, this component of @ref libavutil is created to make + * dealing with memory consistently possible on all platforms. + * + * @{ + * + * @defgroup lavu_mem_macros Alignment Macros + * Helper macros for declaring aligned variables. * @{ */ +/** + * @def DECLARE_ALIGNED(n,t,v) + * Declare a variable that is aligned in memory. + * + * @code{.c} + * DECLARE_ALIGNED(16, uint16_t, aligned_int) = 42; + * DECLARE_ALIGNED(32, uint8_t, aligned_array)[128]; + * + * // The default-alignment equivalent would be + * uint16_t aligned_int = 42; + * uint8_t aligned_array[128]; + * @endcode + * + * @param n Minimum alignment in bytes + * @param t Type of the variable (or array element) + * @param v Name of the variable + */ + +/** + * @def DECLARE_ASM_CONST(n,t,v) + * Declare a static constant aligned variable appropriate for use in inline + * assembly code. + * + * @code{.c} + * DECLARE_ASM_CONST(16, uint64_t, pw_08) = UINT64_C(0x0008000800080008); + * @endcode + * + * @param n Minimum alignment in bytes + * @param t Type of the variable (or array element) + * @param v Name of the variable + */ #if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C) #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v @@ -49,7 +97,10 @@ #define DECLARE_ASM_CONST(n,t,v) \ AV_PRAGMA(DATA_ALIGN(v,n)) \ static const t __attribute__((aligned(n))) v -#elif defined(__GNUC__) +#elif defined(__DJGPP__) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (FFMIN(n, 16)))) v + #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (FFMIN(n, 16)))) v +#elif defined(__GNUC__) || defined(__clang__) #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v #elif defined(_MSC_VER) @@ -60,12 +111,47 @@ #define DECLARE_ASM_CONST(n,t,v) static const t v #endif +/** + * @} + */ + +/** + * @defgroup lavu_mem_attrs Function Attributes + * Function attributes applicable to memory handling functions. + * + * These function attributes can help compilers emit more useful warnings, or + * generate better code. + * @{ + */ + +/** + * @def av_malloc_attrib + * Function attribute denoting a malloc-like function. + * + * @see Function attribute `malloc` in GCC's documentation + */ + #if AV_GCC_VERSION_AT_LEAST(3,1) #define av_malloc_attrib __attribute__((__malloc__)) #else #define av_malloc_attrib #endif +/** + * @def av_alloc_size(...) + * Function attribute used on a function that allocates memory, whose size is + * given by the specified parameter(s). + * + * @code{.c} + * void *av_malloc(size_t size) av_alloc_size(1); + * void *av_calloc(size_t nmemb, size_t size) av_alloc_size(1, 2); + * @endcode + * + * @param ... One or two parameter indexes, separated by a comma + * + * @see Function attribute `alloc_size` in GCC's documentation + */ + #if AV_GCC_VERSION_AT_LEAST(4,3) #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__))) #else @@ -73,21 +159,51 @@ #endif /** - * Allocate a block of size bytes with alignment suitable for all - * memory accesses (including vectors if available on the CPU). - * @param size Size in bytes for the memory block to be allocated. - * @return Pointer to the allocated block, NULL if the block cannot - * be allocated. + * @} + */ + +/** + * @defgroup lavu_mem_funcs Heap Management + * Functions responsible for allocating, freeing, and copying memory. + * + * All memory allocation functions have a built-in upper limit of `INT_MAX` + * bytes. This may be changed with av_max_alloc(), although exercise extreme + * caution when doing so. + * + * @{ + */ + +/** + * Allocate a memory block with alignment suitable for all memory accesses + * (including vectors if available on the CPU). + * + * @param size Size in bytes for the memory block to be allocated + * @return Pointer to the allocated block, or `NULL` if the block cannot + * be allocated * @see av_mallocz() */ void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1); /** - * Allocate a block of size * nmemb bytes with av_malloc(). - * @param nmemb Number of elements - * @param size Size of the single element - * @return Pointer to the allocated block, NULL if the block cannot - * be allocated. + * Allocate a memory block with alignment suitable for all memory accesses + * (including vectors if available on the CPU) and zero all the bytes of the + * block. + * + * @param size Size in bytes for the memory block to be allocated + * @return Pointer to the allocated block, or `NULL` if it cannot be allocated + * @see av_malloc() + */ +void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1); + +/** + * Allocate a memory block for an array with av_malloc(). + * + * The allocated memory will have size `size * nmemb` bytes. + * + * @param nmemb Number of element + * @param size Size of a single element + * @return Pointer to the allocated block, or `NULL` if the block cannot + * be allocated * @see av_malloc() */ av_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t size) @@ -98,131 +214,15 @@ av_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t siz } /** - * Allocate or reallocate a block of memory. - * If ptr is NULL and size > 0, allocate a new block. If - * size is zero, free the memory block pointed to by ptr. - * @param ptr Pointer to a memory block already allocated with - * av_realloc() or NULL. - * @param size Size in bytes of the memory block to be allocated or - * reallocated. - * @return Pointer to a newly-reallocated block or NULL if the block - * cannot be reallocated or the function is used to free the memory block. - * @warning Pointers originating from the av_malloc() family of functions must - * not be passed to av_realloc(). The former can be implemented using - * memalign() (or other functions), and there is no guarantee that - * pointers from such functions can be passed to realloc() at all. - * The situation is undefined according to POSIX and may crash with - * some libc implementations. - * @see av_fast_realloc() - */ -void *av_realloc(void *ptr, size_t size) av_alloc_size(2); - -/** - * Allocate or reallocate a block of memory. - * This function does the same thing as av_realloc, except: - * - It takes two arguments and checks the result of the multiplication for - * integer overflow. - * - It frees the input block in case of failure, thus avoiding the memory - * leak with the classic "buf = realloc(buf); if (!buf) return -1;". - */ -void *av_realloc_f(void *ptr, size_t nelem, size_t elsize); - -/** - * Allocate or reallocate a block of memory. - * If *ptr is NULL and size > 0, allocate a new block. If - * size is zero, free the memory block pointed to by ptr. - * @param ptr Pointer to a pointer to a memory block already allocated - * with av_realloc(), or pointer to a pointer to NULL. - * The pointer is updated on success, or freed on failure. - * @param size Size in bytes for the memory block to be allocated or - * reallocated - * @return Zero on success, an AVERROR error code on failure. - * @warning Pointers originating from the av_malloc() family of functions must - * not be passed to av_reallocp(). The former can be implemented using - * memalign() (or other functions), and there is no guarantee that - * pointers from such functions can be passed to realloc() at all. - * The situation is undefined according to POSIX and may crash with - * some libc implementations. - */ -av_warn_unused_result -int av_reallocp(void *ptr, size_t size); - -/** - * Allocate or reallocate an array. - * If ptr is NULL and nmemb > 0, allocate a new block. If - * nmemb is zero, free the memory block pointed to by ptr. - * @param ptr Pointer to a memory block already allocated with - * av_realloc() or NULL. + * Allocate a memory block for an array with av_mallocz(). + * + * The allocated memory will have size `size * nmemb` bytes. + * * @param nmemb Number of elements - * @param size Size of the single element - * @return Pointer to a newly-reallocated block or NULL if the block - * cannot be reallocated or the function is used to free the memory block. - * @warning Pointers originating from the av_malloc() family of functions must - * not be passed to av_realloc(). The former can be implemented using - * memalign() (or other functions), and there is no guarantee that - * pointers from such functions can be passed to realloc() at all. - * The situation is undefined according to POSIX and may crash with - * some libc implementations. - */ -av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size); - -/** - * Allocate or reallocate an array through a pointer to a pointer. - * If *ptr is NULL and nmemb > 0, allocate a new block. If - * nmemb is zero, free the memory block pointed to by ptr. - * @param ptr Pointer to a pointer to a memory block already allocated - * with av_realloc(), or pointer to a pointer to NULL. - * The pointer is updated on success, or freed on failure. - * @param nmemb Number of elements - * @param size Size of the single element - * @return Zero on success, an AVERROR error code on failure. - * @warning Pointers originating from the av_malloc() family of functions must - * not be passed to av_realloc(). The former can be implemented using - * memalign() (or other functions), and there is no guarantee that - * pointers from such functions can be passed to realloc() at all. - * The situation is undefined according to POSIX and may crash with - * some libc implementations. - */ -av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size); - -/** - * Free a memory block which has been allocated with av_malloc(z)() or - * av_realloc(). - * @param ptr Pointer to the memory block which should be freed. - * @note ptr = NULL is explicitly allowed. - * @note It is recommended that you use av_freep() instead. - * @see av_freep() - */ -void av_free(void *ptr); - -/** - * Allocate a block of size bytes with alignment suitable for all - * memory accesses (including vectors if available on the CPU) and - * zero all the bytes of the block. - * @param size Size in bytes for the memory block to be allocated. - * @return Pointer to the allocated block, NULL if it cannot be allocated. - * @see av_malloc() - */ -void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1); - -/** - * Allocate a block of nmemb * size bytes with alignment suitable for all - * memory accesses (including vectors if available on the CPU) and - * zero all the bytes of the block. - * The allocation will fail if nmemb * size is greater than or equal - * to INT_MAX. - * @param nmemb - * @param size - * @return Pointer to the allocated block, NULL if it cannot be allocated. - */ -void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib; - -/** - * Allocate a block of size * nmemb bytes with av_mallocz(). - * @param nmemb Number of elements - * @param size Size of the single element - * @return Pointer to the allocated block, NULL if the block cannot - * be allocated. + * @param size Size of the single element + * @return Pointer to the allocated block, or `NULL` if the block cannot + * be allocated + * * @see av_mallocz() * @see av_malloc_array() */ @@ -234,43 +234,358 @@ av_alloc_size(1, 2) static inline void *av_mallocz_array(size_t nmemb, size_t si } /** - * Duplicate the string s. - * @param s string to be duplicated - * @return Pointer to a newly-allocated string containing a - * copy of s or NULL if the string cannot be allocated. + * Non-inlined equivalent of av_mallocz_array(). + * + * Created for symmetry with the calloc() C function. */ -char *av_strdup(const char *s) av_malloc_attrib; +void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib; /** - * Duplicate a substring of the string s. - * @param s string to be duplicated - * @param len the maximum length of the resulting string (not counting the - * terminating byte). - * @return Pointer to a newly-allocated string containing a - * copy of s or NULL if the string cannot be allocated. + * Allocate, reallocate, or free a block of memory. + * + * If `ptr` is `NULL` and `size` > 0, allocate a new block. If `size` is + * zero, free the memory block pointed to by `ptr`. Otherwise, expand or + * shrink that block of memory according to `size`. + * + * @param ptr Pointer to a memory block already allocated with + * av_realloc() or `NULL` + * @param size Size in bytes of the memory block to be allocated or + * reallocated + * + * @return Pointer to a newly-reallocated block or `NULL` if the block + * cannot be reallocated or the function is used to free the memory block + * + * @warning Unlike av_malloc(), the returned pointer is not guaranteed to be + * correctly aligned. + * @see av_fast_realloc() + * @see av_reallocp() */ -char *av_strndup(const char *s, size_t len) av_malloc_attrib; +void *av_realloc(void *ptr, size_t size) av_alloc_size(2); /** - * Duplicate the buffer p. - * @param p buffer to be duplicated - * @return Pointer to a newly allocated buffer containing a - * copy of p or NULL if the buffer cannot be allocated. + * Allocate, reallocate, or free a block of memory through a pointer to a + * pointer. + * + * If `*ptr` is `NULL` and `size` > 0, allocate a new block. If `size` is + * zero, free the memory block pointed to by `*ptr`. Otherwise, expand or + * shrink that block of memory according to `size`. + * + * @param[in,out] ptr Pointer to a pointer to a memory block already allocated + * with av_realloc(), or a pointer to `NULL`. The pointer + * is updated on success, or freed on failure. + * @param[in] size Size in bytes for the memory block to be allocated or + * reallocated + * + * @return Zero on success, an AVERROR error code on failure + * + * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be + * correctly aligned. */ -void *av_memdup(const void *p, size_t size); +av_warn_unused_result +int av_reallocp(void *ptr, size_t size); /** - * Free a memory block which has been allocated with av_malloc(z)() or - * av_realloc() and set the pointer pointing to it to NULL. - * @param ptr Pointer to the pointer to the memory block which should - * be freed. - * @note passing a pointer to a NULL pointer is safe and leads to no action. + * Allocate, reallocate, or free a block of memory. + * + * This function does the same thing as av_realloc(), except: + * - It takes two size arguments and allocates `nelem * elsize` bytes, + * after checking the result of the multiplication for integer overflow. + * - It frees the input block in case of failure, thus avoiding the memory + * leak with the classic + * @code{.c} + * buf = realloc(buf); + * if (!buf) + * return -1; + * @endcode + * pattern. + */ +void *av_realloc_f(void *ptr, size_t nelem, size_t elsize); + +/** + * Allocate, reallocate, or free an array. + * + * If `ptr` is `NULL` and `nmemb` > 0, allocate a new block. If + * `nmemb` is zero, free the memory block pointed to by `ptr`. + * + * @param ptr Pointer to a memory block already allocated with + * av_realloc() or `NULL` + * @param nmemb Number of elements in the array + * @param size Size of the single element of the array + * + * @return Pointer to a newly-reallocated block or NULL if the block + * cannot be reallocated or the function is used to free the memory block + * + * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be + * correctly aligned. + * @see av_reallocp_array() + */ +av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size); + +/** + * Allocate, reallocate, or free an array through a pointer to a pointer. + * + * If `*ptr` is `NULL` and `nmemb` > 0, allocate a new block. If `nmemb` is + * zero, free the memory block pointed to by `*ptr`. + * + * @param[in,out] ptr Pointer to a pointer to a memory block already + * allocated with av_realloc(), or a pointer to `NULL`. + * The pointer is updated on success, or freed on failure. + * @param[in] nmemb Number of elements + * @param[in] size Size of the single element + * + * @return Zero on success, an AVERROR error code on failure + * + * @warning Unlike av_malloc(), the allocated memory is not guaranteed to be + * correctly aligned. + */ +av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size); + +/** + * Reallocate the given buffer if it is not large enough, otherwise do nothing. + * + * If the given buffer is `NULL`, then a new uninitialized buffer is allocated. + * + * If the given buffer is not large enough, and reallocation fails, `NULL` is + * returned and `*size` is set to 0, but the original buffer is not changed or + * freed. + * + * A typical use pattern follows: + * + * @code{.c} + * uint8_t *buf = ...; + * uint8_t *new_buf = av_fast_realloc(buf, ¤t_size, size_needed); + * if (!new_buf) { + * // Allocation failed; clean up original buffer + * av_freep(&buf); + * return AVERROR(ENOMEM); + * } + * @endcode + * + * @param[in,out] ptr Already allocated buffer, or `NULL` + * @param[in,out] size Pointer to current size of buffer `ptr`. `*size` is + * changed to `min_size` in case of success or 0 in + * case of failure + * @param[in] min_size New size of buffer `ptr` + * @return `ptr` if the buffer is large enough, a pointer to newly reallocated + * buffer if the buffer was not large enough, or `NULL` in case of + * error + * @see av_realloc() + * @see av_fast_malloc() + */ +void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Allocate a buffer, reusing the given one if large enough. + * + * Contrary to av_fast_realloc(), the current buffer contents might not be + * preserved and on error the old buffer is freed, thus no special handling to + * avoid memleaks is necessary. + * + * `*ptr` is allowed to be `NULL`, in which case allocation always happens if + * `size_needed` is greater than 0. + * + * @code{.c} + * uint8_t *buf = ...; + * av_fast_malloc(&buf, ¤t_size, size_needed); + * if (!buf) { + * // Allocation failed; buf already freed + * return AVERROR(ENOMEM); + * } + * @endcode + * + * @param[in,out] ptr Pointer to pointer to an already allocated buffer. + * `*ptr` will be overwritten with pointer to new + * buffer on success or `NULL` on failure + * @param[in,out] size Pointer to current size of buffer `*ptr`. `*size` is + * changed to `min_size` in case of success or 0 in + * case of failure + * @param[in] min_size New size of buffer `*ptr` + * @see av_realloc() + * @see av_fast_mallocz() + */ +void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Allocate and clear a buffer, reusing the given one if large enough. + * + * Like av_fast_malloc(), but all newly allocated space is initially cleared. + * Reused buffer is not cleared. + * + * `*ptr` is allowed to be `NULL`, in which case allocation always happens if + * `size_needed` is greater than 0. + * + * @param[in,out] ptr Pointer to pointer to an already allocated buffer. + * `*ptr` will be overwritten with pointer to new + * buffer on success or `NULL` on failure + * @param[in,out] size Pointer to current size of buffer `*ptr`. `*size` is + * changed to `min_size` in case of success or 0 in + * case of failure + * @param[in] min_size New size of buffer `*ptr` + * @see av_fast_malloc() + */ +void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size); + +/** + * Free a memory block which has been allocated with a function of av_malloc() + * or av_realloc() family. + * + * @param ptr Pointer to the memory block which should be freed. + * + * @note `ptr = NULL` is explicitly allowed. + * @note It is recommended that you use av_freep() instead, to prevent leaving + * behind dangling pointers. + * @see av_freep() + */ +void av_free(void *ptr); + +/** + * Free a memory block which has been allocated with a function of av_malloc() + * or av_realloc() family, and set the pointer pointing to it to `NULL`. + * + * @code{.c} + * uint8_t *buf = av_malloc(16); + * av_free(buf); + * // buf now contains a dangling pointer to freed memory, and accidental + * // dereference of buf will result in a use-after-free, which may be a + * // security risk. + * + * uint8_t *buf = av_malloc(16); + * av_freep(&buf); + * // buf is now NULL, and accidental dereference will only result in a + * // NULL-pointer dereference. + * @endcode + * + * @param ptr Pointer to the pointer to the memory block which should be freed + * @note `*ptr = NULL` is safe and leads to no action. * @see av_free() */ void av_freep(void *ptr); /** - * Add an element to a dynamic array. + * Duplicate a string. + * + * @param s String to be duplicated + * @return Pointer to a newly-allocated string containing a + * copy of `s` or `NULL` if the string cannot be allocated + * @see av_strndup() + */ +char *av_strdup(const char *s) av_malloc_attrib; + +/** + * Duplicate a substring of a string. + * + * @param s String to be duplicated + * @param len Maximum length of the resulting string (not counting the + * terminating byte) + * @return Pointer to a newly-allocated string containing a + * substring of `s` or `NULL` if the string cannot be allocated + */ +char *av_strndup(const char *s, size_t len) av_malloc_attrib; + +/** + * Duplicate a buffer with av_malloc(). + * + * @param p Buffer to be duplicated + * @param size Size in bytes of the buffer copied + * @return Pointer to a newly allocated buffer containing a + * copy of `p` or `NULL` if the buffer cannot be allocated + */ +void *av_memdup(const void *p, size_t size); + +/** + * Overlapping memcpy() implementation. + * + * @param dst Destination buffer + * @param back Number of bytes back to start copying (i.e. the initial size of + * the overlapping window); must be > 0 + * @param cnt Number of bytes to copy; must be >= 0 + * + * @note `cnt > back` is valid, this will copy the bytes we just copied, + * thus creating a repeating pattern with a period length of `back`. + */ +void av_memcpy_backptr(uint8_t *dst, int back, int cnt); + +/** + * @} + */ + +/** + * @defgroup lavu_mem_dynarray Dynamic Array + * + * Utilities to make an array grow when needed. + * + * Sometimes, the programmer would want to have an array that can grow when + * needed. The libavutil dynamic array utilities fill that need. + * + * libavutil supports two systems of appending elements onto a dynamically + * allocated array, the first one storing the pointer to the value in the + * array, and the second storing the value directly. In both systems, the + * caller is responsible for maintaining a variable containing the length of + * the array, as well as freeing of the array after use. + * + * The first system stores pointers to values in a block of dynamically + * allocated memory. Since only pointers are stored, the function does not need + * to know the size of the type. Both av_dynarray_add() and + * av_dynarray_add_nofree() implement this system. + * + * @code + * type **array = NULL; //< an array of pointers to values + * int nb = 0; //< a variable to keep track of the length of the array + * + * type to_be_added = ...; + * type to_be_added2 = ...; + * + * av_dynarray_add(&array, &nb, &to_be_added); + * if (nb == 0) + * return AVERROR(ENOMEM); + * + * av_dynarray_add(&array, &nb, &to_be_added2); + * if (nb == 0) + * return AVERROR(ENOMEM); + * + * // Now: + * // nb == 2 + * // &to_be_added == array[0] + * // &to_be_added2 == array[1] + * + * av_freep(&array); + * @endcode + * + * The second system stores the value directly in a block of memory. As a + * result, the function has to know the size of the type. av_dynarray2_add() + * implements this mechanism. + * + * @code + * type *array = NULL; //< an array of values + * int nb = 0; //< a variable to keep track of the length of the array + * + * type to_be_added = ...; + * type to_be_added2 = ...; + * + * type *addr = av_dynarray2_add((void **)&array, &nb, sizeof(*array), NULL); + * if (!addr) + * return AVERROR(ENOMEM); + * memcpy(addr, &to_be_added, sizeof(to_be_added)); + * + * // Shortcut of the above. + * type *addr = av_dynarray2_add((void **)&array, &nb, sizeof(*array), + * (const void *)&to_be_added2); + * if (!addr) + * return AVERROR(ENOMEM); + * + * // Now: + * // nb == 2 + * // to_be_added == array[0] + * // to_be_added2 == array[1] + * + * av_freep(&array); + * @endcode + * + * @{ + */ + +/** + * Add the pointer to an element to a dynamic array. * * The array to grow is supposed to be an array of pointers to * structures, and the element to add must be a pointer to an already @@ -280,14 +595,14 @@ void av_freep(void *ptr); * Therefore, the amortized cost of adding an element is constant. * * In case of success, the pointer to the array is updated in order to - * point to the new grown array, and the number pointed to by nb_ptr + * point to the new grown array, and the number pointed to by `nb_ptr` * is incremented. - * In case of failure, the array is freed, *tab_ptr is set to NULL and - * *nb_ptr is set to 0. + * In case of failure, the array is freed, `*tab_ptr` is set to `NULL` and + * `*nb_ptr` is set to 0. * - * @param tab_ptr pointer to the array to grow - * @param nb_ptr pointer to the number of elements in the array - * @param elem element to add + * @param[in,out] tab_ptr Pointer to the array to grow + * @param[in,out] nb_ptr Pointer to the number of elements in the array + * @param[in] elem Element to add * @see av_dynarray_add_nofree(), av_dynarray2_add() */ void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem); @@ -299,48 +614,62 @@ void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem); * but it doesn't free memory on fails. It returns error code * instead and leave current buffer untouched. * - * @param tab_ptr pointer to the array to grow - * @param nb_ptr pointer to the number of elements in the array - * @param elem element to add - * @return >=0 on success, negative otherwise. + * @return >=0 on success, negative otherwise * @see av_dynarray_add(), av_dynarray2_add() */ av_warn_unused_result int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem); /** - * Add an element of size elem_size to a dynamic array. + * Add an element of size `elem_size` to a dynamic array. * * The array is reallocated when its number of elements reaches powers of 2. * Therefore, the amortized cost of adding an element is constant. * * In case of success, the pointer to the array is updated in order to - * point to the new grown array, and the number pointed to by nb_ptr + * point to the new grown array, and the number pointed to by `nb_ptr` * is incremented. - * In case of failure, the array is freed, *tab_ptr is set to NULL and - * *nb_ptr is set to 0. + * In case of failure, the array is freed, `*tab_ptr` is set to `NULL` and + * `*nb_ptr` is set to 0. * - * @param tab_ptr pointer to the array to grow - * @param nb_ptr pointer to the number of elements in the array - * @param elem_size size in bytes of the elements in the array - * @param elem_data pointer to the data of the element to add. If NULL, the space of - * the new added element is not filled. - * @return pointer to the data of the element to copy in the new allocated space. - * If NULL, the new allocated space is left uninitialized." + * @param[in,out] tab_ptr Pointer to the array to grow + * @param[in,out] nb_ptr Pointer to the number of elements in the array + * @param[in] elem_size Size in bytes of an element in the array + * @param[in] elem_data Pointer to the data of the element to add. If + * `NULL`, the space of the newly added element is + * allocated but left uninitialized. + * + * @return Pointer to the data of the element to copy in the newly allocated + * space * @see av_dynarray_add(), av_dynarray_add_nofree() */ void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size, const uint8_t *elem_data); /** - * Multiply two size_t values checking for overflow. - * @return 0 if success, AVERROR(EINVAL) if overflow. + * @} + */ + +/** + * @defgroup lavu_mem_misc Miscellaneous Functions + * + * Other functions related to memory allocation. + * + * @{ + */ + +/** + * Multiply two `size_t` values checking for overflow. + * + * @param[in] a,b Operands of multiplication + * @param[out] r Pointer to the result of the operation + * @return 0 on success, AVERROR(EINVAL) on overflow */ static inline int av_size_mult(size_t a, size_t b, size_t *r) { size_t t = a * b; - /* Hack inspired from glibc: only try the division if nelem and elsize - * are both greater than sqrt(SIZE_MAX). */ + /* Hack inspired from glibc: don't try the division if nelem and elsize + * are both less than sqrt(SIZE_MAX). */ if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b) return AVERROR(EINVAL); *r = t; @@ -348,58 +677,22 @@ static inline int av_size_mult(size_t a, size_t b, size_t *r) } /** - * Set the maximum size that may me allocated in one block. + * Set the maximum size that may be allocated in one block. + * + * The value specified with this function is effective for all libavutil's @ref + * lavu_mem_funcs "heap management functions." + * + * By default, the max value is defined as `INT_MAX`. + * + * @param max Value to be set as the new maximum size + * + * @warning Exercise extreme caution when using this function. Don't touch + * this if you do not understand the full consequence of doing so. */ void av_max_alloc(size_t max); /** - * deliberately overlapping memcpy implementation - * @param dst destination buffer - * @param back how many bytes back we start (the initial size of the overlapping window), must be > 0 - * @param cnt number of bytes to copy, must be >= 0 - * - * cnt > back is valid, this will copy the bytes we just copied, - * thus creating a repeating pattern with a period length of back. - */ -void av_memcpy_backptr(uint8_t *dst, int back, int cnt); - -/** - * Reallocate the given block if it is not large enough, otherwise do nothing. - * - * @see av_realloc - */ -void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size); - -/** - * Allocate a buffer, reusing the given one if large enough. - * - * Contrary to av_fast_realloc the current buffer contents might not be - * preserved and on error the old buffer is freed, thus no special - * handling to avoid memleaks is necessary. - * - * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer - * @param size size of the buffer *ptr points to - * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and - * *size 0 if an error occurred. - */ -void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size); - -/** - * Allocate a buffer, reusing the given one if large enough. - * - * All newly allocated space is initially cleared - * Contrary to av_fast_realloc the current buffer contents might not be - * preserved and on error the old buffer is freed, thus no special - * handling to avoid memleaks is necessary. - * - * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer - * @param size size of the buffer *ptr points to - * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and - * *size 0 if an error occurred. - */ -void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size); - -/** + * @} * @} */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/murmur3.h b/third-party/FFmpeg-iOS/include/libavutil/murmur3.h index f29ed973e9..6a1694c08d 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/murmur3.h +++ b/third-party/FFmpeg-iOS/include/libavutil/murmur3.h @@ -18,15 +18,97 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * @ingroup lavu_murmur3 + * Public header for MurmurHash3 hash function implementation. + */ + #ifndef AVUTIL_MURMUR3_H #define AVUTIL_MURMUR3_H #include +/** + * @defgroup lavu_murmur3 Murmur3 + * @ingroup lavu_hash + * MurmurHash3 hash function implementation. + * + * MurmurHash3 is a non-cryptographic hash function, of which three + * incompatible versions were created by its inventor Austin Appleby: + * + * - 32-bit output + * - 128-bit output for 32-bit platforms + * - 128-bit output for 64-bit platforms + * + * FFmpeg only implements the last variant: 128-bit output designed for 64-bit + * platforms. Even though the hash function was designed for 64-bit platforms, + * the function in reality works on 32-bit systems too, only with reduced + * performance. + * + * @anchor lavu_murmur3_seedinfo + * By design, MurmurHash3 requires a seed to operate. In response to this, + * libavutil provides two functions for hash initiation, one that requires a + * seed (av_murmur3_init_seeded()) and one that uses a fixed arbitrary integer + * as the seed, and therefore does not (av_murmur3_init()). + * + * To make hashes comparable, you should provide the same seed for all calls to + * this hash function -- if you are supplying one yourself, that is. + * + * @{ + */ + +/** + * Allocate an AVMurMur3 hash context. + * + * @return Uninitialized hash context or `NULL` in case of error + */ struct AVMurMur3 *av_murmur3_alloc(void); + +/** + * Initialize or reinitialize an AVMurMur3 hash context with a seed. + * + * @param[out] c Hash context + * @param[in] seed Random seed + * + * @see av_murmur3_init() + * @see @ref lavu_murmur3_seedinfo "Detailed description" on a discussion of + * seeds for MurmurHash3. + */ void av_murmur3_init_seeded(struct AVMurMur3 *c, uint64_t seed); + +/** + * Initialize or reinitialize an AVMurMur3 hash context. + * + * Equivalent to av_murmur3_init_seeded() with a built-in seed. + * + * @param[out] c Hash context + * + * @see av_murmur3_init_seeded() + * @see @ref lavu_murmur3_seedinfo "Detailed description" on a discussion of + * seeds for MurmurHash3. + */ void av_murmur3_init(struct AVMurMur3 *c); + +/** + * Update hash context with new data. + * + * @param[out] c Hash context + * @param[in] src Input data to update hash with + * @param[in] len Number of bytes to read from `src` + */ void av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len); + +/** + * Finish hashing and output digest value. + * + * @param[in,out] c Hash context + * @param[out] dst Buffer where output digest value is stored + */ void av_murmur3_final(struct AVMurMur3 *c, uint8_t dst[16]); +/** + * @} + */ + #endif /* AVUTIL_MURMUR3_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/opt.h b/third-party/FFmpeg-iOS/include/libavutil/opt.h index 9a76a47f75..0d893795de 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/opt.h +++ b/third-party/FFmpeg-iOS/include/libavutil/opt.h @@ -58,7 +58,7 @@ * The following example illustrates an AVOptions-enabled struct: * @code * typedef struct test_struct { - * AVClass *class; + * const AVClass *class; * int int_opt; * char *str_opt; * uint8_t *bin_opt; @@ -96,7 +96,7 @@ * @code * test_struct *alloc_test_struct(void) * { - * test_struct *ret = av_malloc(sizeof(*ret)); + * test_struct *ret = av_mallocz(sizeof(*ret)); * ret->class = &test_class; * av_opt_set_defaults(ret); * return ret; @@ -228,6 +228,7 @@ enum AVOptionType{ AV_OPT_TYPE_RATIONAL, AV_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length AV_OPT_TYPE_DICT, + AV_OPT_TYPE_UINT64, AV_OPT_TYPE_CONST = 128, AV_OPT_TYPE_IMAGE_SIZE = MKBETAG('S','I','Z','E'), ///< offset must point to two consecutive integers AV_OPT_TYPE_PIXEL_FMT = MKBETAG('P','F','M','T'), diff --git a/third-party/FFmpeg-iOS/include/libavutil/pixdesc.h b/third-party/FFmpeg-iOS/include/libavutil/pixdesc.h index 3b0bcdb3d8..fc3737c4ad 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/pixdesc.h +++ b/third-party/FFmpeg-iOS/include/libavutil/pixdesc.h @@ -173,76 +173,15 @@ typedef struct AVPixFmtDescriptor { #define AV_PIX_FMT_FLAG_ALPHA (1 << 7) /** - * Read a line from an image, and write the values of the - * pixel format component c to dst. - * - * @param data the array containing the pointers to the planes of the image - * @param linesize the array containing the linesizes of the image - * @param desc the pixel format descriptor for the image - * @param x the horizontal coordinate of the first pixel to read - * @param y the vertical coordinate of the first pixel to read - * @param w the width of the line to read, that is the number of - * values to write to dst - * @param read_pal_component if not zero and the format is a paletted - * format writes the values corresponding to the palette - * component c in data[1] to dst, rather than the palette indexes in - * data[0]. The behavior is undefined if the format is not paletted. + * The pixel format is following a Bayer pattern */ -void av_read_image_line(uint16_t *dst, const uint8_t *data[4], - const int linesize[4], const AVPixFmtDescriptor *desc, - int x, int y, int c, int w, int read_pal_component); +#define AV_PIX_FMT_FLAG_BAYER (1 << 8) /** - * Write the values from src to the pixel format component c of an - * image line. - * - * @param src array containing the values to write - * @param data the array containing the pointers to the planes of the - * image to write into. It is supposed to be zeroed. - * @param linesize the array containing the linesizes of the image - * @param desc the pixel format descriptor for the image - * @param x the horizontal coordinate of the first pixel to write - * @param y the vertical coordinate of the first pixel to write - * @param w the width of the line to write, that is the number of - * values to write to the image line + * The pixel format contains IEEE-754 floating point values. Precision (double, + * single, or half) should be determined by the pixel size (64, 32, or 16 bits). */ -void av_write_image_line(const uint16_t *src, uint8_t *data[4], - const int linesize[4], const AVPixFmtDescriptor *desc, - int x, int y, int c, int w); - -/** - * Return the pixel format corresponding to name. - * - * If there is no pixel format with name name, then looks for a - * pixel format with the name corresponding to the native endian - * format of name. - * For example in a little-endian system, first looks for "gray16", - * then for "gray16le". - * - * Finally if no pixel format has been found, returns AV_PIX_FMT_NONE. - */ -enum AVPixelFormat av_get_pix_fmt(const char *name); - -/** - * Return the short name for a pixel format, NULL in case pix_fmt is - * unknown. - * - * @see av_get_pix_fmt(), av_get_pix_fmt_string() - */ -const char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt); - -/** - * Print in buf the string corresponding to the pixel format with - * number pix_fmt, or a header if pix_fmt is negative. - * - * @param buf the buffer where to write the string - * @param buf_size the size of buf - * @param pix_fmt the number of the pixel format to print the - * corresponding info string, or a negative value to print the - * corresponding header. - */ -char *av_get_pix_fmt_string(char *buf, int buf_size, - enum AVPixelFormat pix_fmt); +#define AV_PIX_FMT_FLAG_FLOAT (1 << 9) /** * Return the number of bits per pixel used by the pixel format @@ -306,6 +245,128 @@ int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, */ int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt); +/** + * @return the name for provided color range or NULL if unknown. + */ +const char *av_color_range_name(enum AVColorRange range); + +/** + * @return the AVColorRange value for name or an AVError if not found. + */ +int av_color_range_from_name(const char *name); + +/** + * @return the name for provided color primaries or NULL if unknown. + */ +const char *av_color_primaries_name(enum AVColorPrimaries primaries); + +/** + * @return the AVColorPrimaries value for name or an AVError if not found. + */ +int av_color_primaries_from_name(const char *name); + +/** + * @return the name for provided color transfer or NULL if unknown. + */ +const char *av_color_transfer_name(enum AVColorTransferCharacteristic transfer); + +/** + * @return the AVColorTransferCharacteristic value for name or an AVError if not found. + */ +int av_color_transfer_from_name(const char *name); + +/** + * @return the name for provided color space or NULL if unknown. + */ +const char *av_color_space_name(enum AVColorSpace space); + +/** + * @return the AVColorSpace value for name or an AVError if not found. + */ +int av_color_space_from_name(const char *name); + +/** + * @return the name for provided chroma location or NULL if unknown. + */ +const char *av_chroma_location_name(enum AVChromaLocation location); + +/** + * @return the AVChromaLocation value for name or an AVError if not found. + */ +int av_chroma_location_from_name(const char *name); + +/** + * Return the pixel format corresponding to name. + * + * If there is no pixel format with name name, then looks for a + * pixel format with the name corresponding to the native endian + * format of name. + * For example in a little-endian system, first looks for "gray16", + * then for "gray16le". + * + * Finally if no pixel format has been found, returns AV_PIX_FMT_NONE. + */ +enum AVPixelFormat av_get_pix_fmt(const char *name); + +/** + * Return the short name for a pixel format, NULL in case pix_fmt is + * unknown. + * + * @see av_get_pix_fmt(), av_get_pix_fmt_string() + */ +const char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt); + +/** + * Print in buf the string corresponding to the pixel format with + * number pix_fmt, or a header if pix_fmt is negative. + * + * @param buf the buffer where to write the string + * @param buf_size the size of buf + * @param pix_fmt the number of the pixel format to print the + * corresponding info string, or a negative value to print the + * corresponding header. + */ +char *av_get_pix_fmt_string(char *buf, int buf_size, + enum AVPixelFormat pix_fmt); + +/** + * Read a line from an image, and write the values of the + * pixel format component c to dst. + * + * @param data the array containing the pointers to the planes of the image + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to read + * @param y the vertical coordinate of the first pixel to read + * @param w the width of the line to read, that is the number of + * values to write to dst + * @param read_pal_component if not zero and the format is a paletted + * format writes the values corresponding to the palette + * component c in data[1] to dst, rather than the palette indexes in + * data[0]. The behavior is undefined if the format is not paletted. + */ +void av_read_image_line(uint16_t *dst, const uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w, int read_pal_component); + +/** + * Write the values from src to the pixel format component c of an + * image line. + * + * @param src array containing the values to write + * @param data the array containing the pointers to the planes of the + * image to write into. It is supposed to be zeroed. + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to write + * @param y the vertical coordinate of the first pixel to write + * @param w the width of the line to write, that is the number of + * values to write to the image line + */ +void av_write_image_line(const uint16_t *src, uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w); + /** * Utility function to swap the endianness of a pixel format. * @@ -366,29 +427,4 @@ int av_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat av_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); -/** - * @return the name for provided color range or NULL if unknown. - */ -const char *av_color_range_name(enum AVColorRange range); - -/** - * @return the name for provided color primaries or NULL if unknown. - */ -const char *av_color_primaries_name(enum AVColorPrimaries primaries); - -/** - * @return the name for provided color transfer or NULL if unknown. - */ -const char *av_color_transfer_name(enum AVColorTransferCharacteristic transfer); - -/** - * @return the name for provided color space or NULL if unknown. - */ -const char *av_color_space_name(enum AVColorSpace space); - -/** - * @return the name for provided chroma location or NULL if unknown. - */ -const char *av_chroma_location_name(enum AVChromaLocation location); - #endif /* AVUTIL_PIXDESC_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/pixfmt.h b/third-party/FFmpeg-iOS/include/libavutil/pixfmt.h index afe56c2e30..02591fcad3 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/pixfmt.h +++ b/third-party/FFmpeg-iOS/include/libavutil/pixfmt.h @@ -77,7 +77,7 @@ enum AVPixelFormat { #if FF_API_XVMC AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing AV_PIX_FMT_XVMC_MPEG2_IDCT, -#define AV_PIX_FMT_XVMC AV_PIX_FMT_XVMC_MPEG2_IDCT + AV_PIX_FMT_XVMC = AV_PIX_FMT_XVMC_MPEG2_IDCT, #endif /* FF_API_XVMC */ AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 @@ -178,6 +178,7 @@ enum AVPixelFormat { AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian AV_PIX_FMT_VDA_VLD, ///< hardware decoding through VDA AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp + AV_PIX_FMT_GBR24P = AV_PIX_FMT_GBRP, // alias for #AV_PIX_FMT_GBRP AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian @@ -239,7 +240,7 @@ enum AVPixelFormat { */ AV_PIX_FMT_MMAL, - AV_PIX_FMT_D3D11VA_VLD, ///< HW decoding through Direct3D11, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer + AV_PIX_FMT_D3D11VA_VLD, ///< HW decoding through Direct3D11 via old API, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer /** * HW acceleration through CUDA. data[i] contain CUdeviceptr pointers @@ -303,11 +304,45 @@ enum AVPixelFormat { AV_PIX_FMT_GBRAP10BE, ///< planar GBR 4:4:4:4 40bpp, big-endian AV_PIX_FMT_GBRAP10LE, ///< planar GBR 4:4:4:4 40bpp, little-endian - AV_PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions -}; + AV_PIX_FMT_MEDIACODEC, ///< hardware decoding through MediaCodec -#define AV_PIX_FMT_Y400A AV_PIX_FMT_GRAY8A -#define AV_PIX_FMT_GBR24P AV_PIX_FMT_GBRP + AV_PIX_FMT_GRAY12BE, ///< Y , 12bpp, big-endian + AV_PIX_FMT_GRAY12LE, ///< Y , 12bpp, little-endian + AV_PIX_FMT_GRAY10BE, ///< Y , 10bpp, big-endian + AV_PIX_FMT_GRAY10LE, ///< Y , 10bpp, little-endian + + AV_PIX_FMT_P016LE, ///< like NV12, with 16bpp per component, little-endian + AV_PIX_FMT_P016BE, ///< like NV12, with 16bpp per component, big-endian + + /** + * Hardware surfaces for Direct3D11. + * + * This is preferred over the legacy AV_PIX_FMT_D3D11VA_VLD. The new D3D11 + * hwaccel API and filtering support AV_PIX_FMT_D3D11 only. + * + * data[0] contains a ID3D11Texture2D pointer, and data[1] contains the + * texture array index of the frame as intptr_t if the ID3D11Texture2D is + * an array texture (or always 0 if it's a normal texture). + */ + AV_PIX_FMT_D3D11, + + AV_PIX_FMT_GRAY9BE, ///< Y , 9bpp, big-endian + AV_PIX_FMT_GRAY9LE, ///< Y , 9bpp, little-endian + + AV_PIX_FMT_GBRPF32BE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp, big-endian + AV_PIX_FMT_GBRPF32LE, ///< IEEE-754 single precision planar GBR 4:4:4, 96bpp, little-endian + AV_PIX_FMT_GBRAPF32BE, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, big-endian + AV_PIX_FMT_GBRAPF32LE, ///< IEEE-754 single precision planar GBRA 4:4:4:4, 128bpp, little-endian + + /** + * DRM-managed buffers exposed through PRIME buffer sharing. + * + * data[0] points to an AVDRMFrameDescriptor. + */ + AV_PIX_FMT_DRM_PRIME, + + AV_PIX_FMT_NB ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions +}; #if AV_HAVE_BIGENDIAN # define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be @@ -322,6 +357,9 @@ enum AVPixelFormat { #define AV_PIX_FMT_0RGB32 AV_PIX_FMT_NE(0RGB, BGR0) #define AV_PIX_FMT_0BGR32 AV_PIX_FMT_NE(0BGR, RGB0) +#define AV_PIX_FMT_GRAY9 AV_PIX_FMT_NE(GRAY9BE, GRAY9LE) +#define AV_PIX_FMT_GRAY10 AV_PIX_FMT_NE(GRAY10BE, GRAY10LE) +#define AV_PIX_FMT_GRAY12 AV_PIX_FMT_NE(GRAY12BE, GRAY12LE) #define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE) #define AV_PIX_FMT_YA16 AV_PIX_FMT_NE(YA16BE, YA16LE) #define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE) @@ -367,6 +405,8 @@ enum AVPixelFormat { #define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE, BAYER_GBRG16LE) #define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE, BAYER_GRBG16LE) +#define AV_PIX_FMT_GBRPF32 AV_PIX_FMT_NE(GBRPF32BE, GBRPF32LE) +#define AV_PIX_FMT_GBRAPF32 AV_PIX_FMT_NE(GBRAPF32BE, GBRAPF32LE) #define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE) #define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE) @@ -382,9 +422,11 @@ enum AVPixelFormat { #define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE) #define AV_PIX_FMT_AYUV64 AV_PIX_FMT_NE(AYUV64BE, AYUV64LE) #define AV_PIX_FMT_P010 AV_PIX_FMT_NE(P010BE, P010LE) +#define AV_PIX_FMT_P016 AV_PIX_FMT_NE(P016BE, P016LE) /** * Chromaticity coordinates of the source primaries. + * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.1. */ enum AVColorPrimaries { AVCOL_PRI_RESERVED0 = 0, @@ -398,12 +440,17 @@ enum AVColorPrimaries { AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020 - AVCOL_PRI_SMPTEST428_1= 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ) - AVCOL_PRI_NB, ///< Not part of ABI + AVCOL_PRI_SMPTE428 = 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ) + AVCOL_PRI_SMPTEST428_1 = AVCOL_PRI_SMPTE428, + AVCOL_PRI_SMPTE431 = 11, ///< SMPTE ST 431-2 (2011) / DCI P3 + AVCOL_PRI_SMPTE432 = 12, ///< SMPTE ST 432-1 (2010) / P3 D65 / Display P3 + AVCOL_PRI_JEDEC_P22 = 22, ///< JEDEC P22 phosphors + AVCOL_PRI_NB ///< Not part of ABI }; /** * Color Transfer Characteristic. + * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.2. */ enum AVColorTransferCharacteristic { AVCOL_TRC_RESERVED0 = 0, @@ -422,14 +469,17 @@ enum AVColorTransferCharacteristic { AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC) AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10-bit system AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12-bit system - AVCOL_TRC_SMPTEST2084 = 16, ///< SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems - AVCOL_TRC_SMPTEST428_1 = 17, ///< SMPTE ST 428-1 + AVCOL_TRC_SMPTE2084 = 16, ///< SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems + AVCOL_TRC_SMPTEST2084 = AVCOL_TRC_SMPTE2084, + AVCOL_TRC_SMPTE428 = 17, ///< SMPTE ST 428-1 + AVCOL_TRC_SMPTEST428_1 = AVCOL_TRC_SMPTE428, AVCOL_TRC_ARIB_STD_B67 = 18, ///< ARIB STD-B67, known as "Hybrid log-gamma" - AVCOL_TRC_NB, ///< Not part of ABI + AVCOL_TRC_NB ///< Not part of ABI }; /** * YUV colorspace type. + * These values match the ones defined by ISO/IEC 23001-8_2013 § 7.3. */ enum AVColorSpace { AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB) @@ -440,13 +490,16 @@ enum AVColorSpace { AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC AVCOL_SPC_SMPTE240M = 7, ///< functionally identical to above - AVCOL_SPC_YCOCG = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 + AVCOL_SPC_YCGCO = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 + AVCOL_SPC_YCOCG = AVCOL_SPC_YCGCO, AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system - AVCOL_SPC_NB, ///< Not part of ABI + AVCOL_SPC_SMPTE2085 = 11, ///< SMPTE 2085, Y'D'zD'x + AVCOL_SPC_CHROMA_DERIVED_NCL = 12, ///< Chromaticity-derived non-constant luminance system + AVCOL_SPC_CHROMA_DERIVED_CL = 13, ///< Chromaticity-derived constant luminance system + AVCOL_SPC_ICTCP = 14, ///< ITU-R BT.2100-0, ICtCp + AVCOL_SPC_NB ///< Not part of ABI }; -#define AVCOL_SPC_YCGCO AVCOL_SPC_YCOCG - /** * MPEG vs JPEG YUV range. @@ -455,7 +508,7 @@ enum AVColorRange { AVCOL_RANGE_UNSPECIFIED = 0, AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges - AVCOL_RANGE_NB, ///< Not part of ABI + AVCOL_RANGE_NB ///< Not part of ABI }; /** @@ -481,7 +534,7 @@ enum AVChromaLocation { AVCHROMA_LOC_TOP = 4, AVCHROMA_LOC_BOTTOMLEFT = 5, AVCHROMA_LOC_BOTTOM = 6, - AVCHROMA_LOC_NB, ///< Not part of ABI + AVCHROMA_LOC_NB ///< Not part of ABI }; #endif /* AVUTIL_PIXFMT_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/rational.h b/third-party/FFmpeg-iOS/include/libavutil/rational.h index 2897469680..5c6b67b4e9 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/rational.h +++ b/third-party/FFmpeg-iOS/include/libavutil/rational.h @@ -21,7 +21,8 @@ /** * @file - * rational numbers + * @ingroup lavu_math_rational + * Utilties for rational number calculation. * @author Michael Niedermayer */ @@ -33,22 +34,39 @@ #include "attributes.h" /** - * @addtogroup lavu_math + * @defgroup lavu_math_rational AVRational + * @ingroup lavu_math + * Rational number calculation. + * + * While rational numbers can be expressed as floating-point numbers, the + * conversion process is a lossy one, so are floating-point operations. On the + * other hand, the nature of FFmpeg demands highly accurate calculation of + * timestamps. This set of rational number utilities serves as a generic + * interface for manipulating rational numbers as pairs of numerators and + * denominators. + * + * Many of the functions that operate on AVRational's have the suffix `_q`, in + * reference to the mathematical symbol "ℚ" (Q) which denotes the set of all + * rational numbers. + * * @{ */ /** - * rational number numerator/denominator + * Rational number (pair of numerator and denominator). */ typedef struct AVRational{ - int num; ///< numerator - int den; ///< denominator + int num; ///< Numerator + int den; ///< Denominator } AVRational; /** - * Create a rational. + * Create an AVRational. + * * Useful for compilers that do not support compound literals. - * @note The return value is not reduced. + * + * @note The return value is not reduced. + * @see av_reduce() */ static inline AVRational av_make_q(int num, int den) { @@ -58,10 +76,15 @@ static inline AVRational av_make_q(int num, int den) /** * Compare two rationals. - * @param a first rational - * @param b second rational - * @return 0 if a==b, 1 if a>b, -1 if a b` + * - -1 if `a < b` + * - `INT_MIN` if one of the values is of the form `0 / 0` */ static inline int av_cmp_q(AVRational a, AVRational b){ const int64_t tmp= a.num * (int64_t)b.den - b.num * (int64_t)a.den; @@ -73,9 +96,10 @@ static inline int av_cmp_q(AVRational a, AVRational b){ } /** - * Convert rational to double. - * @param a rational to convert - * @return (double) a + * Convert an AVRational to a `double`. + * @param a AVRational to convert + * @return `a` in floating-point form + * @see av_d2q() */ static inline double av_q2d(AVRational a){ return a.num / (double) a.den; @@ -83,44 +107,46 @@ static inline double av_q2d(AVRational a){ /** * Reduce a fraction. + * * This is useful for framerate calculations. - * @param dst_num destination numerator - * @param dst_den destination denominator - * @param num source numerator - * @param den source denominator - * @param max the maximum allowed for dst_num & dst_den - * @return 1 if exact, 0 otherwise + * + * @param[out] dst_num Destination numerator + * @param[out] dst_den Destination denominator + * @param[in] num Source numerator + * @param[in] den Source denominator + * @param[in] max Maximum allowed values for `dst_num` & `dst_den` + * @return 1 if the operation is exact, 0 otherwise */ int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max); /** * Multiply two rationals. - * @param b first rational - * @param c second rational + * @param b First rational + * @param c Second rational * @return b*c */ AVRational av_mul_q(AVRational b, AVRational c) av_const; /** * Divide one rational by another. - * @param b first rational - * @param c second rational + * @param b First rational + * @param c Second rational * @return b/c */ AVRational av_div_q(AVRational b, AVRational c) av_const; /** * Add two rationals. - * @param b first rational - * @param c second rational + * @param b First rational + * @param c Second rational * @return b+c */ AVRational av_add_q(AVRational b, AVRational c) av_const; /** * Subtract one rational from another. - * @param b first rational - * @param c second rational + * @param b First rational + * @param c Second rational * @return b-c */ AVRational av_sub_q(AVRational b, AVRational c) av_const; @@ -138,31 +164,46 @@ static av_always_inline AVRational av_inv_q(AVRational q) /** * Convert a double precision floating point number to a rational. - * inf is expressed as {1,0} or {-1,0} depending on the sign. * - * @param d double to convert - * @param max the maximum allowed numerator and denominator - * @return (AVRational) d + * In case of infinity, the returned value is expressed as `{1, 0}` or + * `{-1, 0}` depending on the sign. + * + * @param d `double` to convert + * @param max Maximum allowed numerator and denominator + * @return `d` in AVRational form + * @see av_q2d() */ AVRational av_d2q(double d, int max) av_const; /** - * @return 1 if q1 is nearer to q than q2, -1 if q2 is nearer - * than q1, 0 if they have the same distance. + * Find which of the two rationals is closer to another rational. + * + * @param q Rational to be compared against + * @param q1,q2 Rationals to be tested + * @return One of the following values: + * - 1 if `q1` is nearer to `q` than `q2` + * - -1 if `q2` is nearer to `q` than `q1` + * - 0 if they have the same distance */ int av_nearer_q(AVRational q, AVRational q1, AVRational q2); /** - * Find the nearest value in q_list to q. - * @param q_list an array of rationals terminated by {0, 0} - * @return the index of the nearest value found in the array + * Find the value in a list of rationals nearest a given reference rational. + * + * @param q Reference rational + * @param q_list Array of rationals terminated by `{0, 0}` + * @return Index of the nearest value found in the array */ int av_find_nearest_q_idx(AVRational q, const AVRational* q_list); /** - * Converts a AVRational to a IEEE 32bit float. + * Convert an AVRational to a IEEE 32-bit `float` expressed in fixed-point + * format. * - * The float is returned in a uint32_t and its value is platform indepenant. + * @param q Rational to be converted + * @return Equivalent floating-point value, expressed as an unsigned 32-bit + * integer. + * @note The returned value is platform-indepedant. */ uint32_t av_q2intfloat(AVRational q); diff --git a/third-party/FFmpeg-iOS/include/libavutil/ripemd.h b/third-party/FFmpeg-iOS/include/libavutil/ripemd.h index 7b0c8bc89c..6d6bb3208f 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/ripemd.h +++ b/third-party/FFmpeg-iOS/include/libavutil/ripemd.h @@ -19,6 +19,12 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * @ingroup lavu_ripemd + * Public header for RIPEMD hash function implementation. + */ + #ifndef AVUTIL_RIPEMD_H #define AVUTIL_RIPEMD_H @@ -29,7 +35,9 @@ /** * @defgroup lavu_ripemd RIPEMD - * @ingroup lavu_crypto + * @ingroup lavu_hash + * RIPEMD hash function implementation. + * * @{ */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/samplefmt.h b/third-party/FFmpeg-iOS/include/libavutil/samplefmt.h index 57da2784d2..8cd43ae856 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/samplefmt.h +++ b/third-party/FFmpeg-iOS/include/libavutil/samplefmt.h @@ -68,6 +68,8 @@ enum AVSampleFormat { AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar AV_SAMPLE_FMT_FLTP, ///< float, planar AV_SAMPLE_FMT_DBLP, ///< double, planar + AV_SAMPLE_FMT_S64, ///< signed 64 bits + AV_SAMPLE_FMT_S64P, ///< signed 64 bits, planar AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically }; diff --git a/third-party/FFmpeg-iOS/include/libavutil/sha.h b/third-party/FFmpeg-iOS/include/libavutil/sha.h index bf4377e51b..c0180e5729 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/sha.h +++ b/third-party/FFmpeg-iOS/include/libavutil/sha.h @@ -18,9 +18,16 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * @ingroup lavu_sha + * Public header for SHA-1 & SHA-256 hash function implementations. + */ + #ifndef AVUTIL_SHA_H #define AVUTIL_SHA_H +#include #include #include "attributes.h" @@ -28,7 +35,17 @@ /** * @defgroup lavu_sha SHA - * @ingroup lavu_crypto + * @ingroup lavu_hash + * SHA-1 and SHA-256 (Secure Hash Algorithm) hash function implementations. + * + * This module supports the following SHA hash functions: + * + * - SHA-1: 160 bits + * - SHA-224: 224 bits, as a variant of SHA-2 + * - SHA-256: 256 bits, as a variant of SHA-2 + * + * @see For SHA-384, SHA-512, and variants thereof, see @ref lavu_sha512. + * * @{ */ @@ -53,11 +70,15 @@ int av_sha_init(struct AVSHA* context, int bits); /** * Update hash value. * - * @param context hash function context + * @param ctx hash function context * @param data input data to update hash with * @param len input data length */ -void av_sha_update(struct AVSHA* context, const uint8_t* data, unsigned int len); +#if FF_API_CRYPTO_SIZE_T +void av_sha_update(struct AVSHA *ctx, const uint8_t *data, unsigned int len); +#else +void av_sha_update(struct AVSHA *ctx, const uint8_t *data, size_t len); +#endif /** * Finish hashing and output digest value. diff --git a/third-party/FFmpeg-iOS/include/libavutil/sha512.h b/third-party/FFmpeg-iOS/include/libavutil/sha512.h index 7b08701477..bef714b41c 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/sha512.h +++ b/third-party/FFmpeg-iOS/include/libavutil/sha512.h @@ -19,17 +19,35 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * @ingroup lavu_sha512 + * Public header for SHA-512 implementation. + */ + #ifndef AVUTIL_SHA512_H #define AVUTIL_SHA512_H +#include #include #include "attributes.h" #include "version.h" /** - * @defgroup lavu_sha512 SHA512 - * @ingroup lavu_crypto + * @defgroup lavu_sha512 SHA-512 + * @ingroup lavu_hash + * SHA-512 (Secure Hash Algorithm) hash function implementations. + * + * This module supports the following SHA-2 hash functions: + * + * - SHA-512/224: 224 bits + * - SHA-512/256: 256 bits + * - SHA-384: 384 bits + * - SHA-512: 512 bits + * + * @see For SHA-1, SHA-256, and variants thereof, see @ref lavu_sha. + * * @{ */ @@ -58,7 +76,11 @@ int av_sha512_init(struct AVSHA512* context, int bits); * @param data input data to update hash with * @param len input data length */ +#if FF_API_CRYPTO_SIZE_T void av_sha512_update(struct AVSHA512* context, const uint8_t* data, unsigned int len); +#else +void av_sha512_update(struct AVSHA512* context, const uint8_t* data, size_t len); +#endif /** * Finish hashing and output digest value. diff --git a/third-party/FFmpeg-iOS/include/libavutil/spherical.h b/third-party/FFmpeg-iOS/include/libavutil/spherical.h new file mode 100644 index 0000000000..cef759cf27 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/spherical.h @@ -0,0 +1,232 @@ +/* + * Copyright (c) 2016 Vittorio Giovara + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Spherical video + */ + +#ifndef AVUTIL_SPHERICAL_H +#define AVUTIL_SPHERICAL_H + +#include +#include + +/** + * @addtogroup lavu_video + * @{ + * + * @defgroup lavu_video_spherical Spherical video mapping + * @{ + */ + +/** + * @addtogroup lavu_video_spherical + * A spherical video file contains surfaces that need to be mapped onto a + * sphere. Depending on how the frame was converted, a different distortion + * transformation or surface recomposition function needs to be applied before + * the video should be mapped and displayed. + */ + +/** + * Projection of the video surface(s) on a sphere. + */ +enum AVSphericalProjection { + /** + * Video represents a sphere mapped on a flat surface using + * equirectangular projection. + */ + AV_SPHERICAL_EQUIRECTANGULAR, + + /** + * Video frame is split into 6 faces of a cube, and arranged on a + * 3x2 layout. Faces are oriented upwards for the front, left, right, + * and back faces. The up face is oriented so the top of the face is + * forwards and the down face is oriented so the top of the face is + * to the back. + */ + AV_SPHERICAL_CUBEMAP, + + /** + * Video represents a portion of a sphere mapped on a flat surface + * using equirectangular projection. The @ref bounding fields indicate + * the position of the current video in a larger surface. + */ + AV_SPHERICAL_EQUIRECTANGULAR_TILE, +}; + +/** + * This structure describes how to handle spherical videos, outlining + * information about projection, initial layout, and any other view modifier. + * + * @note The struct must be allocated with av_spherical_alloc() and + * its size is not a part of the public ABI. + */ +typedef struct AVSphericalMapping { + /** + * Projection type. + */ + enum AVSphericalProjection projection; + + /** + * @name Initial orientation + * @{ + * There fields describe additional rotations applied to the sphere after + * the video frame is mapped onto it. The sphere is rotated around the + * viewer, who remains stationary. The order of transformation is always + * yaw, followed by pitch, and finally by roll. + * + * The coordinate system matches the one defined in OpenGL, where the + * forward vector (z) is coming out of screen, and it is equivalent to + * a rotation matrix of R = r_y(yaw) * r_x(pitch) * r_z(roll). + * + * A positive yaw rotates the portion of the sphere in front of the viewer + * toward their right. A positive pitch rotates the portion of the sphere + * in front of the viewer upwards. A positive roll tilts the portion of + * the sphere in front of the viewer to the viewer's right. + * + * These values are exported as 16.16 fixed point. + * + * See this equirectangular projection as example: + * + * @code{.unparsed} + * Yaw + * -180 0 180 + * 90 +-------------+-------------+ 180 + * | | | up + * P | | | y| forward + * i | ^ | | /z + * t 0 +-------------X-------------+ 0 Roll | / + * c | | | | / + * h | | | 0|/_____right + * | | | x + * -90 +-------------+-------------+ -180 + * + * X - the default camera center + * ^ - the default up vector + * @endcode + */ + int32_t yaw; ///< Rotation around the up vector [-180, 180]. + int32_t pitch; ///< Rotation around the right vector [-90, 90]. + int32_t roll; ///< Rotation around the forward vector [-180, 180]. + /** + * @} + */ + + /** + * @name Bounding rectangle + * @anchor bounding + * @{ + * These fields indicate the location of the current tile, and where + * it should be mapped relative to the original surface. They are + * exported as 0.32 fixed point, and can be converted to classic + * pixel values with av_spherical_bounds(). + * + * @code{.unparsed} + * +----------------+----------+ + * | |bound_top | + * | +--------+ | + * | bound_left |tile | | + * +<---------->| |<--->+bound_right + * | +--------+ | + * | | | + * | bound_bottom| | + * +----------------+----------+ + * @endcode + * + * If needed, the original video surface dimensions can be derived + * by adding the current stream or frame size to the related bounds, + * like in the following example: + * + * @code{c} + * original_width = tile->width + bound_left + bound_right; + * original_height = tile->height + bound_top + bound_bottom; + * @endcode + * + * @note These values are valid only for the tiled equirectangular + * projection type (@ref AV_SPHERICAL_EQUIRECTANGULAR_TILE), + * and should be ignored in all other cases. + */ + uint32_t bound_left; ///< Distance from the left edge + uint32_t bound_top; ///< Distance from the top edge + uint32_t bound_right; ///< Distance from the right edge + uint32_t bound_bottom; ///< Distance from the bottom edge + /** + * @} + */ + + /** + * Number of pixels to pad from the edge of each cube face. + * + * @note This value is valid for only for the cubemap projection type + * (@ref AV_SPHERICAL_CUBEMAP), and should be ignored in all other + * cases. + */ + uint32_t padding; +} AVSphericalMapping; + +/** + * Allocate a AVSphericalVideo structure and initialize its fields to default + * values. + * + * @return the newly allocated struct or NULL on failure + */ +AVSphericalMapping *av_spherical_alloc(size_t *size); + +/** + * Convert the @ref bounding fields from an AVSphericalVideo + * from 0.32 fixed point to pixels. + * + * @param map The AVSphericalVideo map to read bound values from. + * @param width Width of the current frame or stream. + * @param height Height of the current frame or stream. + * @param left Pixels from the left edge. + * @param top Pixels from the top edge. + * @param right Pixels from the right edge. + * @param bottom Pixels from the bottom edge. + */ +void av_spherical_tile_bounds(const AVSphericalMapping *map, + size_t width, size_t height, + size_t *left, size_t *top, + size_t *right, size_t *bottom); + +/** + * Provide a human-readable name of a given AVSphericalProjection. + * + * @param projection The input AVSphericalProjection. + * + * @return The name of the AVSphericalProjection, or "unknown". + */ +const char *av_spherical_projection_name(enum AVSphericalProjection projection); + +/** + * Get the AVSphericalProjection form a human-readable name. + * + * @param name The input string. + * + * @return The AVSphericalProjection value, or -1 if not found. + */ +int av_spherical_from_name(const char *name); +/** + * @} + * @} + */ + +#endif /* AVUTIL_SPHERICAL_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/stereo3d.h b/third-party/FFmpeg-iOS/include/libavutil/stereo3d.h index 19c541643e..54f4c4c5c7 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/stereo3d.h +++ b/third-party/FFmpeg-iOS/include/libavutil/stereo3d.h @@ -18,6 +18,11 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * Stereoscopic video + */ + #ifndef AVUTIL_STEREO3D_H #define AVUTIL_STEREO3D_H @@ -25,6 +30,21 @@ #include "frame.h" +/** + * @addtogroup lavu_video + * @{ + * + * @defgroup lavu_video_stereo3d Stereo3D types and functions + * @{ + */ + +/** + * @addtogroup lavu_video_stereo3d + * A stereoscopic video file consists in multiple views embedded in a single + * frame, usually describing two views of a scene. This file describes all + * possible codec-independent view arrangements. + * */ + /** * List of possible 3D Types */ @@ -37,41 +57,49 @@ enum AVStereo3DType { /** * Views are next to each other. * + * @code{.unparsed} * LLLLRRRR * LLLLRRRR * LLLLRRRR * ... + * @endcode */ AV_STEREO3D_SIDEBYSIDE, /** * Views are on top of each other. * + * @code{.unparsed} * LLLLLLLL * LLLLLLLL * RRRRRRRR * RRRRRRRR + * @endcode */ AV_STEREO3D_TOPBOTTOM, /** * Views are alternated temporally. * + * @code{.unparsed} * frame0 frame1 frame2 ... * LLLLLLLL RRRRRRRR LLLLLLLL * LLLLLLLL RRRRRRRR LLLLLLLL * LLLLLLLL RRRRRRRR LLLLLLLL * ... ... ... + * @endcode */ AV_STEREO3D_FRAMESEQUENCE, /** * Views are packed in a checkerboard-like structure per pixel. * + * @code{.unparsed} * LRLRLRLR * RLRLRLRL * LRLRLRLR * ... + * @endcode */ AV_STEREO3D_CHECKERBOARD, @@ -79,30 +107,36 @@ enum AVStereo3DType { * Views are next to each other, but when upscaling * apply a checkerboard pattern. * + * @code{.unparsed} * LLLLRRRR L L L L R R R R * LLLLRRRR => L L L L R R R R * LLLLRRRR L L L L R R R R * LLLLRRRR L L L L R R R R + * @endcode */ AV_STEREO3D_SIDEBYSIDE_QUINCUNX, /** * Views are packed per line, as if interlaced. * + * @code{.unparsed} * LLLLLLLL * RRRRRRRR * LLLLLLLL * ... + * @endcode */ AV_STEREO3D_LINES, /** * Views are packed per column. * + * @code{.unparsed} * LRLRLRLR * LRLRLRLR * LRLRLRLR * ... + * @endcode */ AV_STEREO3D_COLUMNS, }; @@ -161,10 +195,15 @@ const char *av_stereo3d_type_name(unsigned int type); /** * Get the AVStereo3DType form a human-readable name. * - * @param type The input string. + * @param name The input string. * * @return The AVStereo3DType value, or -1 if not found. */ int av_stereo3d_from_name(const char *name); +/** + * @} + * @} + */ + #endif /* AVUTIL_STEREO3D_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/timecode.h b/third-party/FFmpeg-iOS/include/libavutil/timecode.h index 56e3975fd8..37c1361bc2 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/timecode.h +++ b/third-party/FFmpeg-iOS/include/libavutil/timecode.h @@ -30,7 +30,7 @@ #include #include "rational.h" -#define AV_TIMECODE_STR_SIZE 16 +#define AV_TIMECODE_STR_SIZE 23 enum AVTimecodeFlag { AV_TIMECODE_FLAG_DROPFRAME = 1<<0, ///< timecode is drop frame diff --git a/third-party/FFmpeg-iOS/include/libavutil/timestamp.h b/third-party/FFmpeg-iOS/include/libavutil/timestamp.h index f010a7ee38..e082f01b40 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/timestamp.h +++ b/third-party/FFmpeg-iOS/include/libavutil/timestamp.h @@ -43,7 +43,7 @@ static inline char *av_ts_make_string(char *buf, int64_t ts) { if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); - else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%"PRId64, ts); + else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%" PRId64, ts); return buf; } diff --git a/third-party/FFmpeg-iOS/include/libavutil/tree.h b/third-party/FFmpeg-iOS/include/libavutil/tree.h index 9a9e11b92c..d5e0aebfbd 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/tree.h +++ b/third-party/FFmpeg-iOS/include/libavutil/tree.h @@ -58,7 +58,7 @@ struct AVTreeNode *av_tree_node_alloc(void); * then the corresponding entry in next is unchanged. * @param cmp compare function used to compare elements in the tree, * API identical to that of Standard C's qsort - * It is guranteed that the first and only the first argument to cmp() + * It is guaranteed that the first and only the first argument to cmp() * will be the key parameter to av_tree_find(), thus it could if the * user wants, be a different type (like an opaque context). * @return An element with cmp(key, elem) == 0 or NULL if no such element diff --git a/third-party/FFmpeg-iOS/include/libavutil/version.h b/third-party/FFmpeg-iOS/include/libavutil/version.h index 07618fc0bc..f594dc0691 100644 --- a/third-party/FFmpeg-iOS/include/libavutil/version.h +++ b/third-party/FFmpeg-iOS/include/libavutil/version.h @@ -18,6 +18,12 @@ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA */ +/** + * @file + * @ingroup lavu + * Libavutil version macros + */ + #ifndef AVUTIL_VERSION_H #define AVUTIL_VERSION_H @@ -29,6 +35,21 @@ * Useful to check and match library version in order to maintain * backward compatibility. * + * The FFmpeg libraries follow a versioning sheme very similar to + * Semantic Versioning (http://semver.org/) + * The difference is that the component called PATCH is called MICRO in FFmpeg + * and its value is reset to 100 instead of 0 to keep it above or equal to 100. + * Also we do not increase MICRO for every bugfix or change in git master. + * + * Prior to FFmpeg 3.2 point releases did not change any lib version number to + * avoid aliassing different git master checkouts. + * Starting with FFmpeg 3.2, the released library versions will occupy + * a separate MAJOR.MINOR that is not used on the master development branch. + * That is if we branch a release of master 55.10.123 we will bump to 55.11.100 + * for the release and master will continue at 55.12.100 after it. Each new + * point release will then bump the MICRO improving the usefulness of the lib + * versions. + * * @{ */ @@ -48,12 +69,6 @@ * @} */ -/** - * @file - * @ingroup lavu - * Libavutil version macros - */ - /** * @defgroup lavu_ver Version and Build diagnostics * @@ -63,8 +78,9 @@ * @{ */ + #define LIBAVUTIL_VERSION_MAJOR 55 -#define LIBAVUTIL_VERSION_MINOR 28 +#define LIBAVUTIL_VERSION_MINOR 78 #define LIBAVUTIL_VERSION_MICRO 100 #define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ @@ -78,9 +94,7 @@ #define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION) /** - * @} - * - * @defgroup depr_guards Deprecation guards + * @defgroup lavu_depr_guards Deprecation Guards * FF_API_* defines may be placed below to indicate public API that will be * dropped at a future version bump. The defines themselves are not part of * the public API and may change, break or disappear at any time. @@ -119,9 +133,16 @@ #ifndef FF_API_CRC_BIG_TABLE #define FF_API_CRC_BIG_TABLE (LIBAVUTIL_VERSION_MAJOR < 56) #endif +#ifndef FF_API_PKT_PTS +#define FF_API_PKT_PTS (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_CRYPTO_SIZE_T +#define FF_API_CRYPTO_SIZE_T (LIBAVUTIL_VERSION_MAJOR < 56) +#endif /** + * @} * @} */ diff --git a/third-party/FFmpeg-iOS/include/libswresample/swresample.h b/third-party/FFmpeg-iOS/include/libswresample/swresample.h index f461edc544..405bd7b7fc 100644 --- a/third-party/FFmpeg-iOS/include/libswresample/swresample.h +++ b/third-party/FFmpeg-iOS/include/libswresample/swresample.h @@ -28,11 +28,10 @@ */ /** - * @defgroup lswr Libswresample + * @defgroup lswr libswresample * @{ * - * Libswresample (lswr) is a library that handles audio resampling, sample - * format conversion and mixing. + * Audio resampling, sample format conversion and mixing library. * * Interaction with lswr is done through SwrContext, which is * allocated with swr_alloc() or swr_alloc_set_opts(). It is opaque, so all parameters @@ -121,15 +120,12 @@ */ #include +#include "../libavutil/channel_layout.h" #include "../libavutil/frame.h" #include "../libavutil/samplefmt.h" #include "../libswresample/version.h" -#if LIBSWRESAMPLE_VERSION_MAJOR < 1 -#define SWR_CH_MAX 32 ///< Maximum number of channels -#endif - /** * @name Option constants * These constants are used for the @ref avoptions interface for lswr. @@ -366,6 +362,36 @@ int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensatio */ int swr_set_channel_mapping(struct SwrContext *s, const int *channel_map); +/** + * Generate a channel mixing matrix. + * + * This function is the one used internally by libswresample for building the + * default mixing matrix. It is made public just as a utility function for + * building custom matrices. + * + * @param in_layout input channel layout + * @param out_layout output channel layout + * @param center_mix_level mix level for the center channel + * @param surround_mix_level mix level for the surround channel(s) + * @param lfe_mix_level mix level for the low-frequency effects channel + * @param rematrix_maxval if 1.0, coefficients will be normalized to prevent + * overflow. if INT_MAX, coefficients will not be + * normalized. + * @param[out] matrix mixing coefficients; matrix[i + stride * o] is + * the weight of input channel i in output channel o. + * @param stride distance between adjacent input channels in the + * matrix array + * @param matrix_encoding matrixed stereo downmix mode (e.g. dplii) + * @param log_ctx parent logging context, can be NULL + * @return 0 on success, negative AVERROR code on failure + */ +int swr_build_matrix(uint64_t in_layout, uint64_t out_layout, + double center_mix_level, double surround_mix_level, + double lfe_mix_level, double rematrix_maxval, + double rematrix_volume, double *matrix, + int stride, enum AVMatrixEncoding matrix_encoding, + void *log_ctx); + /** * Set a customized remix matrix. * diff --git a/third-party/FFmpeg-iOS/include/libswresample/version.h b/third-party/FFmpeg-iOS/include/libswresample/version.h index 703d76f19d..7e7fb6abd5 100644 --- a/third-party/FFmpeg-iOS/include/libswresample/version.h +++ b/third-party/FFmpeg-iOS/include/libswresample/version.h @@ -29,7 +29,7 @@ #include "../libavutil/avutil.h" #define LIBSWRESAMPLE_VERSION_MAJOR 2 -#define LIBSWRESAMPLE_VERSION_MINOR 1 +#define LIBSWRESAMPLE_VERSION_MINOR 9 #define LIBSWRESAMPLE_VERSION_MICRO 100 #define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \ diff --git a/third-party/FFmpeg-iOS/lib/libavcodec.a b/third-party/FFmpeg-iOS/lib/libavcodec.a index 39507dfbba..f316b10040 100644 Binary files a/third-party/FFmpeg-iOS/lib/libavcodec.a and b/third-party/FFmpeg-iOS/lib/libavcodec.a differ diff --git a/third-party/FFmpeg-iOS/lib/libavdevice.a b/third-party/FFmpeg-iOS/lib/libavdevice.a new file mode 100644 index 0000000000..c6b0364df8 Binary files /dev/null and b/third-party/FFmpeg-iOS/lib/libavdevice.a differ diff --git a/third-party/FFmpeg-iOS/lib/libavfilter.a b/third-party/FFmpeg-iOS/lib/libavfilter.a new file mode 100644 index 0000000000..2b4a0459fe Binary files /dev/null and b/third-party/FFmpeg-iOS/lib/libavfilter.a differ diff --git a/third-party/FFmpeg-iOS/lib/libavformat.a b/third-party/FFmpeg-iOS/lib/libavformat.a index 93bc45eaf0..f5f72dae63 100644 Binary files a/third-party/FFmpeg-iOS/lib/libavformat.a and b/third-party/FFmpeg-iOS/lib/libavformat.a differ diff --git a/third-party/FFmpeg-iOS/lib/libavutil.a b/third-party/FFmpeg-iOS/lib/libavutil.a index ddbc4186c7..a491219fc3 100644 Binary files a/third-party/FFmpeg-iOS/lib/libavutil.a and b/third-party/FFmpeg-iOS/lib/libavutil.a differ diff --git a/third-party/FFmpeg-iOS/lib/libswresample.a b/third-party/FFmpeg-iOS/lib/libswresample.a index c022d0be8f..d7587b0a8d 100644 Binary files a/third-party/FFmpeg-iOS/lib/libswresample.a and b/third-party/FFmpeg-iOS/lib/libswresample.a differ diff --git a/third-party/FFmpeg-iOS/lib/libswscale.a b/third-party/FFmpeg-iOS/lib/libswscale.a new file mode 100644 index 0000000000..e0538fa521 Binary files /dev/null and b/third-party/FFmpeg-iOS/lib/libswscale.a differ