diff --git a/Images.xcassets/Chat List/Contents.json b/Images.xcassets/Chat List/Contents.json new file mode 100644 index 0000000000..38f0c81fc2 --- /dev/null +++ b/Images.xcassets/Chat List/Contents.json @@ -0,0 +1,9 @@ +{ + "info" : { + "version" : 1, + "author" : "xcode" + }, + "properties" : { + "provides-namespace" : true + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat List/Tabs/Contents.json b/Images.xcassets/Chat List/Tabs/Contents.json new file mode 100644 index 0000000000..38f0c81fc2 --- /dev/null +++ b/Images.xcassets/Chat List/Tabs/Contents.json @@ -0,0 +1,9 @@ +{ + "info" : { + "version" : 1, + "author" : "xcode" + }, + "properties" : { + "provides-namespace" : true + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat List/Tabs/IconChats.imageset/Contents.json b/Images.xcassets/Chat List/Tabs/IconChats.imageset/Contents.json new file mode 100644 index 0000000000..9d36fc4473 --- /dev/null +++ b/Images.xcassets/Chat List/Tabs/IconChats.imageset/Contents.json @@ -0,0 +1,21 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "idiom" : "universal", + "filename" : "TabIconMessages@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat List/Tabs/IconChats.imageset/TabIconMessages@2x.png b/Images.xcassets/Chat List/Tabs/IconChats.imageset/TabIconMessages@2x.png new file mode 100644 index 0000000000..30b8a0097e Binary files /dev/null and b/Images.xcassets/Chat List/Tabs/IconChats.imageset/TabIconMessages@2x.png differ diff --git a/Images.xcassets/Chat List/Tabs/IconChatsSelected.imageset/Contents.json b/Images.xcassets/Chat List/Tabs/IconChatsSelected.imageset/Contents.json new file mode 100644 index 0000000000..0cb58499a6 --- /dev/null +++ b/Images.xcassets/Chat List/Tabs/IconChatsSelected.imageset/Contents.json @@ -0,0 +1,21 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "idiom" : "universal", + "filename" : "TabIconMessages_Highlighted@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat List/Tabs/IconChatsSelected.imageset/TabIconMessages_Highlighted@2x.png b/Images.xcassets/Chat List/Tabs/IconChatsSelected.imageset/TabIconMessages_Highlighted@2x.png new file mode 100644 index 0000000000..23612a4c9c Binary files /dev/null and b/Images.xcassets/Chat List/Tabs/IconChatsSelected.imageset/TabIconMessages_Highlighted@2x.png differ diff --git a/Images.xcassets/Chat List/Tabs/IconContacts.imageset/Contents.json b/Images.xcassets/Chat List/Tabs/IconContacts.imageset/Contents.json new file mode 100644 index 0000000000..2a12bb8512 --- /dev/null +++ b/Images.xcassets/Chat List/Tabs/IconContacts.imageset/Contents.json @@ -0,0 +1,21 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "idiom" : "universal", + "filename" : "TabIconContacts@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat List/Tabs/IconContacts.imageset/TabIconContacts@2x.png b/Images.xcassets/Chat List/Tabs/IconContacts.imageset/TabIconContacts@2x.png new file mode 100644 index 0000000000..4a523ac902 Binary files /dev/null and b/Images.xcassets/Chat List/Tabs/IconContacts.imageset/TabIconContacts@2x.png differ diff --git a/Images.xcassets/Chat List/Tabs/IconContactsSelected.imageset/Contents.json b/Images.xcassets/Chat List/Tabs/IconContactsSelected.imageset/Contents.json new file mode 100644 index 0000000000..e9b76ca372 --- /dev/null +++ b/Images.xcassets/Chat List/Tabs/IconContactsSelected.imageset/Contents.json @@ -0,0 +1,21 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "idiom" : "universal", + "filename" : "TabIconContacts_Highlighted@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat List/Tabs/IconContactsSelected.imageset/TabIconContacts_Highlighted@2x.png b/Images.xcassets/Chat List/Tabs/IconContactsSelected.imageset/TabIconContacts_Highlighted@2x.png new file mode 100644 index 0000000000..20d555e6d3 Binary files /dev/null and b/Images.xcassets/Chat List/Tabs/IconContactsSelected.imageset/TabIconContacts_Highlighted@2x.png differ diff --git a/Images.xcassets/Chat List/Tabs/IconSettings.imageset/Contents.json b/Images.xcassets/Chat List/Tabs/IconSettings.imageset/Contents.json new file mode 100644 index 0000000000..d12dc44869 --- /dev/null +++ b/Images.xcassets/Chat List/Tabs/IconSettings.imageset/Contents.json @@ -0,0 +1,21 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "idiom" : "universal", + "filename" : "TabIconSettings@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat List/Tabs/IconSettings.imageset/TabIconSettings@2x.png b/Images.xcassets/Chat List/Tabs/IconSettings.imageset/TabIconSettings@2x.png new file mode 100644 index 0000000000..c1a9926fc4 Binary files /dev/null and b/Images.xcassets/Chat List/Tabs/IconSettings.imageset/TabIconSettings@2x.png differ diff --git a/Images.xcassets/Chat List/Tabs/IconSettingsSelected.imageset/Contents.json b/Images.xcassets/Chat List/Tabs/IconSettingsSelected.imageset/Contents.json new file mode 100644 index 0000000000..51e3998e8b --- /dev/null +++ b/Images.xcassets/Chat List/Tabs/IconSettingsSelected.imageset/Contents.json @@ -0,0 +1,21 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "idiom" : "universal", + "filename" : "TabIconSettings_Highlighted@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat List/Tabs/IconSettingsSelected.imageset/TabIconSettings_Highlighted@2x.png b/Images.xcassets/Chat List/Tabs/IconSettingsSelected.imageset/TabIconSettings_Highlighted@2x.png new file mode 100644 index 0000000000..33dd36bb4b Binary files /dev/null and b/Images.xcassets/Chat List/Tabs/IconSettingsSelected.imageset/TabIconSettings_Highlighted@2x.png differ diff --git a/Images.xcassets/Chat/Contents.json b/Images.xcassets/Chat/Contents.json new file mode 100644 index 0000000000..38f0c81fc2 --- /dev/null +++ b/Images.xcassets/Chat/Contents.json @@ -0,0 +1,9 @@ +{ + "info" : { + "version" : 1, + "author" : "xcode" + }, + "properties" : { + "provides-namespace" : true + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Input/Contents.json b/Images.xcassets/Chat/Input/Contents.json new file mode 100644 index 0000000000..38f0c81fc2 --- /dev/null +++ b/Images.xcassets/Chat/Input/Contents.json @@ -0,0 +1,9 @@ +{ + "info" : { + "version" : 1, + "author" : "xcode" + }, + "properties" : { + "provides-namespace" : true + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Input/Text/Contents.json b/Images.xcassets/Chat/Input/Text/Contents.json new file mode 100644 index 0000000000..38f0c81fc2 --- /dev/null +++ b/Images.xcassets/Chat/Input/Text/Contents.json @@ -0,0 +1,9 @@ +{ + "info" : { + "version" : 1, + "author" : "xcode" + }, + "properties" : { + "provides-namespace" : true + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Input/Text/IconAttachment.imageset/Contents.json b/Images.xcassets/Chat/Input/Text/IconAttachment.imageset/Contents.json new file mode 100644 index 0000000000..ae4dac8782 --- /dev/null +++ b/Images.xcassets/Chat/Input/Text/IconAttachment.imageset/Contents.json @@ -0,0 +1,12 @@ +{ + "images" : [ + { + "idiom" : "universal", + "filename" : "IconAttachment.pdf" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Input/Text/IconAttachment.imageset/IconAttachment.pdf b/Images.xcassets/Chat/Input/Text/IconAttachment.imageset/IconAttachment.pdf new file mode 100644 index 0000000000..0a160807a6 Binary files /dev/null and b/Images.xcassets/Chat/Input/Text/IconAttachment.imageset/IconAttachment.pdf differ diff --git a/Images.xcassets/Chat/Message/Background/BubbleIncoming.imageset/Contents.json b/Images.xcassets/Chat/Message/Background/BubbleIncoming.imageset/Contents.json new file mode 100644 index 0000000000..80cfcaecaa --- /dev/null +++ b/Images.xcassets/Chat/Message/Background/BubbleIncoming.imageset/Contents.json @@ -0,0 +1,35 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "resizing" : { + "mode" : "9-part", + "center" : { + "mode" : "stretch", + "width" : 3, + "height" : 1 + }, + "cap-insets" : { + "bottom" : 32, + "top" : 32, + "right" : 33, + "left" : 41 + } + }, + "idiom" : "universal", + "filename" : "ModernBubbleIncomingFullPad@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Message/Background/BubbleIncoming.imageset/ModernBubbleIncomingFullPad@2x.png b/Images.xcassets/Chat/Message/Background/BubbleIncoming.imageset/ModernBubbleIncomingFullPad@2x.png new file mode 100644 index 0000000000..d9f6fd76ee Binary files /dev/null and b/Images.xcassets/Chat/Message/Background/BubbleIncoming.imageset/ModernBubbleIncomingFullPad@2x.png differ diff --git a/Images.xcassets/Chat/Message/Background/BubbleIncomingMerged.imageset/Contents.json b/Images.xcassets/Chat/Message/Background/BubbleIncomingMerged.imageset/Contents.json new file mode 100644 index 0000000000..08c8c83b11 --- /dev/null +++ b/Images.xcassets/Chat/Message/Background/BubbleIncomingMerged.imageset/Contents.json @@ -0,0 +1,35 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "resizing" : { + "mode" : "9-part", + "center" : { + "mode" : "stretch", + "width" : 1, + "height" : 1 + }, + "cap-insets" : { + "bottom" : 33, + "top" : 32, + "right" : 34, + "left" : 44 + } + }, + "idiom" : "universal", + "filename" : "ModernBubbleIncomingPartialPad@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Message/Background/BubbleIncomingMerged.imageset/ModernBubbleIncomingPartialPad@2x.png b/Images.xcassets/Chat/Message/Background/BubbleIncomingMerged.imageset/ModernBubbleIncomingPartialPad@2x.png new file mode 100644 index 0000000000..11a5c80fa4 Binary files /dev/null and b/Images.xcassets/Chat/Message/Background/BubbleIncomingMerged.imageset/ModernBubbleIncomingPartialPad@2x.png differ diff --git a/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedBoth.imageset/Contents.json b/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedBoth.imageset/Contents.json new file mode 100644 index 0000000000..0e38a88e8b --- /dev/null +++ b/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedBoth.imageset/Contents.json @@ -0,0 +1,35 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "resizing" : { + "mode" : "9-part", + "center" : { + "mode" : "stretch", + "width" : 1, + "height" : 1 + }, + "cap-insets" : { + "bottom" : 32, + "top" : 33, + "right" : 49, + "left" : 30 + } + }, + "idiom" : "universal", + "filename" : "ModernBubbleIncomingMergedBoth@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedBoth.imageset/ModernBubbleIncomingMergedBoth@2x.png b/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedBoth.imageset/ModernBubbleIncomingMergedBoth@2x.png new file mode 100644 index 0000000000..2bca41e962 Binary files /dev/null and b/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedBoth.imageset/ModernBubbleIncomingMergedBoth@2x.png differ diff --git a/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedBottom.imageset/Contents.json b/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedBottom.imageset/Contents.json new file mode 100644 index 0000000000..a79bb7b0f8 --- /dev/null +++ b/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedBottom.imageset/Contents.json @@ -0,0 +1,35 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "resizing" : { + "mode" : "9-part", + "center" : { + "mode" : "stretch", + "width" : 1, + "height" : 1 + }, + "cap-insets" : { + "bottom" : 32, + "top" : 33, + "right" : 35, + "left" : 44 + } + }, + "idiom" : "universal", + "filename" : "ModernBubbleIncomingMergedBottom@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedBottom.imageset/ModernBubbleIncomingMergedBottom@2x.png b/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedBottom.imageset/ModernBubbleIncomingMergedBottom@2x.png new file mode 100644 index 0000000000..1d511b792b Binary files /dev/null and b/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedBottom.imageset/ModernBubbleIncomingMergedBottom@2x.png differ diff --git a/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedTop.imageset/Contents.json b/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedTop.imageset/Contents.json new file mode 100644 index 0000000000..00b5220dbf --- /dev/null +++ b/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedTop.imageset/Contents.json @@ -0,0 +1,35 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "resizing" : { + "mode" : "9-part", + "center" : { + "mode" : "stretch", + "width" : 1, + "height" : 1 + }, + "cap-insets" : { + "bottom" : 33, + "top" : 32, + "right" : 36, + "left" : 43 + } + }, + "idiom" : "universal", + "filename" : "ModernBubbleIncomingMergedTop@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedTop.imageset/ModernBubbleIncomingMergedTop@2x.png b/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedTop.imageset/ModernBubbleIncomingMergedTop@2x.png new file mode 100644 index 0000000000..69cfa208f6 Binary files /dev/null and b/Images.xcassets/Chat/Message/Background/BubbleIncomingMergedTop.imageset/ModernBubbleIncomingMergedTop@2x.png differ diff --git a/Images.xcassets/Chat/Message/Background/BubbleOutgoing.imageset/Contents.json b/Images.xcassets/Chat/Message/Background/BubbleOutgoing.imageset/Contents.json new file mode 100644 index 0000000000..843f329871 --- /dev/null +++ b/Images.xcassets/Chat/Message/Background/BubbleOutgoing.imageset/Contents.json @@ -0,0 +1,35 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "resizing" : { + "mode" : "9-part", + "center" : { + "mode" : "stretch", + "width" : 1, + "height" : 1 + }, + "cap-insets" : { + "bottom" : 33, + "top" : 31, + "right" : 43, + "left" : 34 + } + }, + "idiom" : "universal", + "filename" : "ModernBubbleOutgoingFullPad@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Message/Background/BubbleOutgoing.imageset/ModernBubbleOutgoingFullPad@2x.png b/Images.xcassets/Chat/Message/Background/BubbleOutgoing.imageset/ModernBubbleOutgoingFullPad@2x.png new file mode 100644 index 0000000000..51946b74b0 Binary files /dev/null and b/Images.xcassets/Chat/Message/Background/BubbleOutgoing.imageset/ModernBubbleOutgoingFullPad@2x.png differ diff --git a/Images.xcassets/Chat/Message/Background/BubbleOutgoingMerged.imageset/Contents.json b/Images.xcassets/Chat/Message/Background/BubbleOutgoingMerged.imageset/Contents.json new file mode 100644 index 0000000000..227b099c2d --- /dev/null +++ b/Images.xcassets/Chat/Message/Background/BubbleOutgoingMerged.imageset/Contents.json @@ -0,0 +1,35 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "resizing" : { + "mode" : "9-part", + "center" : { + "mode" : "stretch", + "width" : 1, + "height" : 1 + }, + "cap-insets" : { + "bottom" : 33, + "top" : 32, + "right" : 44, + "left" : 34 + } + }, + "idiom" : "universal", + "filename" : "ModernBubbleOutgoingPartialPad@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Message/Background/BubbleOutgoingMerged.imageset/ModernBubbleOutgoingPartialPad@2x.png b/Images.xcassets/Chat/Message/Background/BubbleOutgoingMerged.imageset/ModernBubbleOutgoingPartialPad@2x.png new file mode 100644 index 0000000000..982d3b1a4e Binary files /dev/null and b/Images.xcassets/Chat/Message/Background/BubbleOutgoingMerged.imageset/ModernBubbleOutgoingPartialPad@2x.png differ diff --git a/Images.xcassets/Chat/Message/Background/Contents.json b/Images.xcassets/Chat/Message/Background/Contents.json new file mode 100644 index 0000000000..38f0c81fc2 --- /dev/null +++ b/Images.xcassets/Chat/Message/Background/Contents.json @@ -0,0 +1,9 @@ +{ + "info" : { + "version" : 1, + "author" : "xcode" + }, + "properties" : { + "provides-namespace" : true + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Message/Contents.json b/Images.xcassets/Chat/Message/Contents.json new file mode 100644 index 0000000000..38f0c81fc2 --- /dev/null +++ b/Images.xcassets/Chat/Message/Contents.json @@ -0,0 +1,9 @@ +{ + "info" : { + "version" : 1, + "author" : "xcode" + }, + "properties" : { + "provides-namespace" : true + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Message/RadialProgressIconDocumentIncoming.imageset/Contents.json b/Images.xcassets/Chat/Message/RadialProgressIconDocumentIncoming.imageset/Contents.json new file mode 100644 index 0000000000..9f3aeed1f7 --- /dev/null +++ b/Images.xcassets/Chat/Message/RadialProgressIconDocumentIncoming.imageset/Contents.json @@ -0,0 +1,22 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "idiom" : "universal", + "filename" : "ModernMessageDocumentIconIncoming@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "filename" : "ModernMessageDocumentIconIncoming@3x.png", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Message/RadialProgressIconDocumentIncoming.imageset/ModernMessageDocumentIconIncoming@2x.png b/Images.xcassets/Chat/Message/RadialProgressIconDocumentIncoming.imageset/ModernMessageDocumentIconIncoming@2x.png new file mode 100644 index 0000000000..1da98c4002 Binary files /dev/null and b/Images.xcassets/Chat/Message/RadialProgressIconDocumentIncoming.imageset/ModernMessageDocumentIconIncoming@2x.png differ diff --git a/Images.xcassets/Chat/Message/RadialProgressIconDocumentIncoming.imageset/ModernMessageDocumentIconIncoming@3x.png b/Images.xcassets/Chat/Message/RadialProgressIconDocumentIncoming.imageset/ModernMessageDocumentIconIncoming@3x.png new file mode 100644 index 0000000000..b92416cd4c Binary files /dev/null and b/Images.xcassets/Chat/Message/RadialProgressIconDocumentIncoming.imageset/ModernMessageDocumentIconIncoming@3x.png differ diff --git a/Images.xcassets/Chat/Message/RadialProgressIconDocumentOutgoing.imageset/Contents.json b/Images.xcassets/Chat/Message/RadialProgressIconDocumentOutgoing.imageset/Contents.json new file mode 100644 index 0000000000..fa47c058e0 --- /dev/null +++ b/Images.xcassets/Chat/Message/RadialProgressIconDocumentOutgoing.imageset/Contents.json @@ -0,0 +1,22 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "idiom" : "universal", + "filename" : "ModernMessageDocumentIconOutgoing@2x.png", + "scale" : "2x" + }, + { + "idiom" : "universal", + "filename" : "ModernMessageDocumentIconOutgoing@3x.png", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Message/RadialProgressIconDocumentOutgoing.imageset/ModernMessageDocumentIconOutgoing@2x.png b/Images.xcassets/Chat/Message/RadialProgressIconDocumentOutgoing.imageset/ModernMessageDocumentIconOutgoing@2x.png new file mode 100644 index 0000000000..d3d84a00ef Binary files /dev/null and b/Images.xcassets/Chat/Message/RadialProgressIconDocumentOutgoing.imageset/ModernMessageDocumentIconOutgoing@2x.png differ diff --git a/Images.xcassets/Chat/Message/RadialProgressIconDocumentOutgoing.imageset/ModernMessageDocumentIconOutgoing@3x.png b/Images.xcassets/Chat/Message/RadialProgressIconDocumentOutgoing.imageset/ModernMessageDocumentIconOutgoing@3x.png new file mode 100644 index 0000000000..cc773196b2 Binary files /dev/null and b/Images.xcassets/Chat/Message/RadialProgressIconDocumentOutgoing.imageset/ModernMessageDocumentIconOutgoing@3x.png differ diff --git a/Images.xcassets/Chat/Wallpapers/Builtin0.imageset/Contents.json b/Images.xcassets/Chat/Wallpapers/Builtin0.imageset/Contents.json new file mode 100644 index 0000000000..6d499c7d57 --- /dev/null +++ b/Images.xcassets/Chat/Wallpapers/Builtin0.imageset/Contents.json @@ -0,0 +1,21 @@ +{ + "images" : [ + { + "idiom" : "universal", + "scale" : "1x" + }, + { + "idiom" : "universal", + "filename" : "builtin-wallpaper-0.jpg", + "scale" : "2x" + }, + { + "idiom" : "universal", + "scale" : "3x" + } + ], + "info" : { + "version" : 1, + "author" : "xcode" + } +} \ No newline at end of file diff --git a/Images.xcassets/Chat/Wallpapers/Builtin0.imageset/builtin-wallpaper-0.jpg b/Images.xcassets/Chat/Wallpapers/Builtin0.imageset/builtin-wallpaper-0.jpg new file mode 100644 index 0000000000..ea45b36ee0 Binary files /dev/null and b/Images.xcassets/Chat/Wallpapers/Builtin0.imageset/builtin-wallpaper-0.jpg differ diff --git a/Images.xcassets/Chat/Wallpapers/Contents.json b/Images.xcassets/Chat/Wallpapers/Contents.json new file mode 100644 index 0000000000..38f0c81fc2 --- /dev/null +++ b/Images.xcassets/Chat/Wallpapers/Contents.json @@ -0,0 +1,9 @@ +{ + "info" : { + "version" : 1, + "author" : "xcode" + }, + "properties" : { + "provides-namespace" : true + } +} \ No newline at end of file diff --git a/TelegramUI.xcodeproj/project.pbxproj b/TelegramUI.xcodeproj/project.pbxproj index 92655f00f7..9c0cfc695f 100644 --- a/TelegramUI.xcodeproj/project.pbxproj +++ b/TelegramUI.xcodeproj/project.pbxproj @@ -14,9 +14,136 @@ D0AB0BB11D6718DA002C78E7 /* libiconv.tbd in Frameworks */ = {isa = PBXBuildFile; fileRef = D0AB0BB01D6718DA002C78E7 /* libiconv.tbd */; }; D0AB0BB31D6718EB002C78E7 /* libz.tbd in Frameworks */ = {isa = PBXBuildFile; fileRef = D0AB0BB21D6718EB002C78E7 /* libz.tbd */; }; D0AB0BB51D6718F1002C78E7 /* CoreMedia.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D0AB0BB41D6718F1002C78E7 /* CoreMedia.framework */; }; - D0AB0BB81D67191C002C78E7 /* MtProtoKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D0AB0BB61D67191C002C78E7 /* MtProtoKit.framework */; }; - D0AB0BB91D67191C002C78E7 /* SSignalKit.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D0AB0BB71D67191C002C78E7 /* SSignalKit.framework */; }; D0AB0BBB1D6719B5002C78E7 /* Images.xcassets in Resources */ = {isa = PBXBuildFile; fileRef = D0AB0BBA1D6719B5002C78E7 /* Images.xcassets */; }; + D0F69D231D6B87D30046BCD6 /* FFMpegMediaFrameSourceContext.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69CD31D6B87D30046BCD6 /* FFMpegMediaFrameSourceContext.swift */; }; + D0F69D241D6B87D30046BCD6 /* MediaPlayerAudioRenderer.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69CD41D6B87D30046BCD6 /* MediaPlayerAudioRenderer.swift */; }; + D0F69D261D6B87D30046BCD6 /* MediaManager.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69CD61D6B87D30046BCD6 /* MediaManager.swift */; }; + D0F69D271D6B87D30046BCD6 /* FFMpegAudioFrameDecoder.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69CD71D6B87D30046BCD6 /* FFMpegAudioFrameDecoder.swift */; }; + D0F69D2C1D6B87D30046BCD6 /* MediaPlayerNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69CDC1D6B87D30046BCD6 /* MediaPlayerNode.swift */; }; + D0F69D2E1D6B87D30046BCD6 /* PeerAvatar.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69CDE1D6B87D30046BCD6 /* PeerAvatar.swift */; }; + D0F69D311D6B87D30046BCD6 /* FFMpegMediaFrameSource.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69CE11D6B87D30046BCD6 /* FFMpegMediaFrameSource.swift */; }; + D0F69D351D6B87D30046BCD6 /* MediaFrameSource.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69CE51D6B87D30046BCD6 /* MediaFrameSource.swift */; }; + D0F69D4B1D6B87D30046BCD6 /* TouchDownGestureRecognizer.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69CFB1D6B87D30046BCD6 /* TouchDownGestureRecognizer.swift */; }; + D0F69D521D6B87D30046BCD6 /* MediaPlayer.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69D021D6B87D30046BCD6 /* MediaPlayer.swift */; }; + D0F69D661D6B87D30046BCD6 /* FFMpegMediaFrameSourceContextHelpers.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69D161D6B87D30046BCD6 /* FFMpegMediaFrameSourceContextHelpers.swift */; }; + D0F69D671D6B87D30046BCD6 /* FFMpegPacket.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69D171D6B87D30046BCD6 /* FFMpegPacket.swift */; }; + D0F69D6D1D6B87D30046BCD6 /* MediaTrackDecodableFrame.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69D1D1D6B87D30046BCD6 /* MediaTrackDecodableFrame.swift */; }; + D0F69D771D6B87DF0046BCD6 /* FFMpegMediaPassthroughVideoFrameDecoder.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69D6F1D6B87DE0046BCD6 /* FFMpegMediaPassthroughVideoFrameDecoder.swift */; }; + D0F69D781D6B87DF0046BCD6 /* MediaTrackFrameBuffer.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69D701D6B87DE0046BCD6 /* MediaTrackFrameBuffer.swift */; }; + D0F69D791D6B87DF0046BCD6 /* MediaTrackFrame.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69D711D6B87DE0046BCD6 /* MediaTrackFrame.swift */; }; + D0F69D9C1D6B87EC0046BCD6 /* MediaPlaybackData.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69D7F1D6B87EC0046BCD6 /* MediaPlaybackData.swift */; }; + D0F69DA41D6B87EC0046BCD6 /* FFMpegMediaVideoFrameDecoder.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69D871D6B87EC0046BCD6 /* FFMpegMediaVideoFrameDecoder.swift */; }; + D0F69DA51D6B87EC0046BCD6 /* MediaTrackFrameDecoder.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69D881D6B87EC0046BCD6 /* MediaTrackFrameDecoder.swift */; }; + D0F69DAD1D6B87EC0046BCD6 /* Cache.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69D901D6B87EC0046BCD6 /* Cache.swift */; }; + D0F69DBA1D6B88190046BCD6 /* TelegramUI.xcconfig in Resources */ = {isa = PBXBuildFile; fileRef = D0F69DB91D6B88190046BCD6 /* TelegramUI.xcconfig */; }; + D0F69DC11D6B89D30046BCD6 /* ListSectionHeaderNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DC01D6B89D30046BCD6 /* ListSectionHeaderNode.swift */; }; + D0F69DC31D6B89DA0046BCD6 /* TextNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DC21D6B89DA0046BCD6 /* TextNode.swift */; }; + D0F69DC51D6B89E10046BCD6 /* RadialProgressNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DC41D6B89E10046BCD6 /* RadialProgressNode.swift */; }; + D0F69DC71D6B89E70046BCD6 /* TransformImageNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DC61D6B89E70046BCD6 /* TransformImageNode.swift */; }; + D0F69DC91D6B89EB0046BCD6 /* ImageNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DC81D6B89EB0046BCD6 /* ImageNode.swift */; }; + D0F69DCF1D6B8A0D0046BCD6 /* SearchBarNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DCB1D6B8A0D0046BCD6 /* SearchBarNode.swift */; }; + D0F69DD01D6B8A0D0046BCD6 /* SearchBarPlaceholderNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DCC1D6B8A0D0046BCD6 /* SearchBarPlaceholderNode.swift */; }; + D0F69DD11D6B8A0D0046BCD6 /* SearchDisplayController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DCD1D6B8A0D0046BCD6 /* SearchDisplayController.swift */; }; + D0F69DD21D6B8A0D0046BCD6 /* SearchDisplayControllerContentNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DCE1D6B8A0D0046BCD6 /* SearchDisplayControllerContentNode.swift */; }; + D0F69DD61D6B8A2D0046BCD6 /* AlertController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DD51D6B8A2D0046BCD6 /* AlertController.swift */; }; + D0F69DDF1D6B8A420046BCD6 /* ListController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DD81D6B8A420046BCD6 /* ListController.swift */; }; + D0F69DE01D6B8A420046BCD6 /* ListControllerButtonItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DD91D6B8A420046BCD6 /* ListControllerButtonItem.swift */; }; + D0F69DE11D6B8A420046BCD6 /* ListControllerDisclosureActionItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DDA1D6B8A420046BCD6 /* ListControllerDisclosureActionItem.swift */; }; + D0F69DE21D6B8A420046BCD6 /* ListControllerGroupableItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DDB1D6B8A420046BCD6 /* ListControllerGroupableItem.swift */; }; + D0F69DE31D6B8A420046BCD6 /* ListControllerItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DDC1D6B8A420046BCD6 /* ListControllerItem.swift */; }; + D0F69DE41D6B8A420046BCD6 /* ListControllerNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DDD1D6B8A420046BCD6 /* ListControllerNode.swift */; }; + D0F69DE51D6B8A420046BCD6 /* ListControllerSpacerItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DDE1D6B8A420046BCD6 /* ListControllerSpacerItem.swift */; }; + D0F69DEF1D6B8A6C0046BCD6 /* AuthorizationCodeController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DE81D6B8A6C0046BCD6 /* AuthorizationCodeController.swift */; }; + D0F69DF01D6B8A6C0046BCD6 /* AuthorizationCodeControllerNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DE91D6B8A6C0046BCD6 /* AuthorizationCodeControllerNode.swift */; }; + D0F69DF11D6B8A6C0046BCD6 /* AuthorizationController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DEA1D6B8A6C0046BCD6 /* AuthorizationController.swift */; }; + D0F69DF21D6B8A6C0046BCD6 /* AuthorizationPasswordController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DEB1D6B8A6C0046BCD6 /* AuthorizationPasswordController.swift */; }; + D0F69DF31D6B8A6C0046BCD6 /* AuthorizationPasswordControllerNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DEC1D6B8A6C0046BCD6 /* AuthorizationPasswordControllerNode.swift */; }; + D0F69DF41D6B8A6C0046BCD6 /* AuthorizationPhoneController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DED1D6B8A6C0046BCD6 /* AuthorizationPhoneController.swift */; }; + D0F69DF51D6B8A6C0046BCD6 /* AuthorizationPhoneControllerNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DEE1D6B8A6C0046BCD6 /* AuthorizationPhoneControllerNode.swift */; }; + D0F69DFE1D6B8A880046BCD6 /* ChatListAvatarNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DF71D6B8A880046BCD6 /* ChatListAvatarNode.swift */; }; + D0F69DFF1D6B8A880046BCD6 /* ChatListController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DF81D6B8A880046BCD6 /* ChatListController.swift */; }; + D0F69E001D6B8A880046BCD6 /* ChatListControllerNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DF91D6B8A880046BCD6 /* ChatListControllerNode.swift */; }; + D0F69E011D6B8A880046BCD6 /* ChatListEmptyItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DFA1D6B8A880046BCD6 /* ChatListEmptyItem.swift */; }; + D0F69E021D6B8A880046BCD6 /* ChatListHoleItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DFB1D6B8A880046BCD6 /* ChatListHoleItem.swift */; }; + D0F69E031D6B8A880046BCD6 /* ChatListItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DFC1D6B8A880046BCD6 /* ChatListItem.swift */; }; + D0F69E041D6B8A880046BCD6 /* ChatListSearchItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69DFD1D6B8A880046BCD6 /* ChatListSearchItem.swift */; }; + D0F69E081D6B8A9C0046BCD6 /* ChatListSearchContainerNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E071D6B8A9C0046BCD6 /* ChatListSearchContainerNode.swift */; }; + D0F69E0A1D6B8AA60046BCD6 /* ChatListSearchRecentPeersNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E091D6B8AA60046BCD6 /* ChatListSearchRecentPeersNode.swift */; }; + D0F69E0C1D6B8AB10046BCD6 /* HorizontalPeerItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E0B1D6B8AB10046BCD6 /* HorizontalPeerItem.swift */; }; + D0F69E131D6B8ACF0046BCD6 /* ChatController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E0E1D6B8ACF0046BCD6 /* ChatController.swift */; }; + D0F69E141D6B8ACF0046BCD6 /* ChatControllerInteraction.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E0F1D6B8ACF0046BCD6 /* ChatControllerInteraction.swift */; }; + D0F69E151D6B8ACF0046BCD6 /* ChatControllerNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E101D6B8ACF0046BCD6 /* ChatControllerNode.swift */; }; + D0F69E161D6B8ACF0046BCD6 /* ChatHistoryEntry.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E111D6B8ACF0046BCD6 /* ChatHistoryEntry.swift */; }; + D0F69E171D6B8ACF0046BCD6 /* ChatHistoryLocation.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E121D6B8ACF0046BCD6 /* ChatHistoryLocation.swift */; }; + D0F69E1A1D6B8AE60046BCD6 /* ChatHoleItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E191D6B8AE60046BCD6 /* ChatHoleItem.swift */; }; + D0F69E2D1D6B8B030046BCD6 /* ChatMessageActionItemNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E1B1D6B8B030046BCD6 /* ChatMessageActionItemNode.swift */; }; + D0F69E2E1D6B8B030046BCD6 /* ChatMessageAvatarAccessoryItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E1C1D6B8B030046BCD6 /* ChatMessageAvatarAccessoryItem.swift */; }; + D0F69E2F1D6B8B030046BCD6 /* ChatMessageBubbleContentCalclulateImageCorners.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E1D1D6B8B030046BCD6 /* ChatMessageBubbleContentCalclulateImageCorners.swift */; }; + D0F69E301D6B8B030046BCD6 /* ChatMessageBubbleContentNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E1E1D6B8B030046BCD6 /* ChatMessageBubbleContentNode.swift */; }; + D0F69E311D6B8B030046BCD6 /* ChatMessageBubbleItemNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E1F1D6B8B030046BCD6 /* ChatMessageBubbleItemNode.swift */; }; + D0F69E321D6B8B030046BCD6 /* ChatMessageDateAndStatusNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E201D6B8B030046BCD6 /* ChatMessageDateAndStatusNode.swift */; }; + D0F69E331D6B8B030046BCD6 /* ChatMessageFileBubbleContentNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E211D6B8B030046BCD6 /* ChatMessageFileBubbleContentNode.swift */; }; + D0F69E341D6B8B030046BCD6 /* ChatMessageForwardInfoNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E221D6B8B030046BCD6 /* ChatMessageForwardInfoNode.swift */; }; + D0F69E351D6B8B030046BCD6 /* ChatMessageInteractiveFileNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E231D6B8B030046BCD6 /* ChatMessageInteractiveFileNode.swift */; }; + D0F69E361D6B8B030046BCD6 /* ChatMessageInteractiveMediaNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E241D6B8B030046BCD6 /* ChatMessageInteractiveMediaNode.swift */; }; + D0F69E371D6B8B030046BCD6 /* ChatMessageItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E251D6B8B030046BCD6 /* ChatMessageItem.swift */; }; + D0F69E381D6B8B030046BCD6 /* ChatMessageItemView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E261D6B8B030046BCD6 /* ChatMessageItemView.swift */; }; + D0F69E391D6B8B030046BCD6 /* ChatMessageMediaBubbleContentNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E271D6B8B030046BCD6 /* ChatMessageMediaBubbleContentNode.swift */; }; + D0F69E3A1D6B8B030046BCD6 /* ChatMessageReplyInfoNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E281D6B8B030046BCD6 /* ChatMessageReplyInfoNode.swift */; }; + D0F69E3B1D6B8B030046BCD6 /* ChatMessageStickerItemNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E291D6B8B030046BCD6 /* ChatMessageStickerItemNode.swift */; }; + D0F69E3C1D6B8B030046BCD6 /* ChatMessageTextBubbleContentNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E2A1D6B8B030046BCD6 /* ChatMessageTextBubbleContentNode.swift */; }; + D0F69E3D1D6B8B030046BCD6 /* ChatMessageWebpageBubbleContentNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E2B1D6B8B030046BCD6 /* ChatMessageWebpageBubbleContentNode.swift */; }; + D0F69E3E1D6B8B030046BCD6 /* ChatUnreadItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E2C1D6B8B030046BCD6 /* ChatUnreadItem.swift */; }; + D0F69E421D6B8B7E0046BCD6 /* ChatInputView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E401D6B8B7E0046BCD6 /* ChatInputView.swift */; }; + D0F69E431D6B8B7E0046BCD6 /* ResizeableTextInputView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E411D6B8B7E0046BCD6 /* ResizeableTextInputView.swift */; }; + D0F69E461D6B8B950046BCD6 /* ChatHistoryNavigationButtonNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E451D6B8B950046BCD6 /* ChatHistoryNavigationButtonNode.swift */; }; + D0F69E491D6B8BAC0046BCD6 /* ActionSheetRollImageItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E481D6B8BAC0046BCD6 /* ActionSheetRollImageItem.swift */; }; + D0F69E4C1D6B8BB20046BCD6 /* ChatMediaActionSheetController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E4A1D6B8BB20046BCD6 /* ChatMediaActionSheetController.swift */; }; + D0F69E4D1D6B8BB20046BCD6 /* ChatMediaActionSheetRollItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E4B1D6B8BB20046BCD6 /* ChatMediaActionSheetRollItem.swift */; }; + D0F69E551D6B8BDA0046BCD6 /* GalleryController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E501D6B8BDA0046BCD6 /* GalleryController.swift */; }; + D0F69E561D6B8BDA0046BCD6 /* GalleryControllerNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E511D6B8BDA0046BCD6 /* GalleryControllerNode.swift */; }; + D0F69E571D6B8BDA0046BCD6 /* GalleryItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E521D6B8BDA0046BCD6 /* GalleryItem.swift */; }; + D0F69E581D6B8BDA0046BCD6 /* GalleryItemNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E531D6B8BDA0046BCD6 /* GalleryItemNode.swift */; }; + D0F69E591D6B8BDA0046BCD6 /* GalleryPagerNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E541D6B8BDA0046BCD6 /* GalleryPagerNode.swift */; }; + D0F69E611D6B8BF90046BCD6 /* ChatDocumentGalleryItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E5B1D6B8BF90046BCD6 /* ChatDocumentGalleryItem.swift */; }; + D0F69E621D6B8BF90046BCD6 /* ChatHoleGalleryItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E5C1D6B8BF90046BCD6 /* ChatHoleGalleryItem.swift */; }; + D0F69E631D6B8BF90046BCD6 /* ChatImageGalleryItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E5D1D6B8BF90046BCD6 /* ChatImageGalleryItem.swift */; }; + D0F69E641D6B8BF90046BCD6 /* ChatVideoGalleryItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E5E1D6B8BF90046BCD6 /* ChatVideoGalleryItem.swift */; }; + D0F69E651D6B8BF90046BCD6 /* ChatVideoGalleryItemScrubberView.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E5F1D6B8BF90046BCD6 /* ChatVideoGalleryItemScrubberView.swift */; }; + D0F69E661D6B8BF90046BCD6 /* ZoomableContentGalleryItemNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E601D6B8BF90046BCD6 /* ZoomableContentGalleryItemNode.swift */; }; + D0F69E6A1D6B8C160046BCD6 /* MapInputController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E681D6B8C160046BCD6 /* MapInputController.swift */; }; + D0F69E6B1D6B8C160046BCD6 /* MapInputControllerNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E691D6B8C160046BCD6 /* MapInputControllerNode.swift */; }; + D0F69E731D6B8C340046BCD6 /* ContactsController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E6D1D6B8C340046BCD6 /* ContactsController.swift */; }; + D0F69E741D6B8C340046BCD6 /* ContactsControllerNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E6E1D6B8C340046BCD6 /* ContactsControllerNode.swift */; }; + D0F69E751D6B8C340046BCD6 /* ContactsPeerItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E6F1D6B8C340046BCD6 /* ContactsPeerItem.swift */; }; + D0F69E761D6B8C340046BCD6 /* ContactsSearchContainerNode.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E701D6B8C340046BCD6 /* ContactsSearchContainerNode.swift */; }; + D0F69E771D6B8C340046BCD6 /* ContactsSectionHeaderAccessoryItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E711D6B8C340046BCD6 /* ContactsSectionHeaderAccessoryItem.swift */; }; + D0F69E781D6B8C340046BCD6 /* ContactsVCardItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E721D6B8C340046BCD6 /* ContactsVCardItem.swift */; }; + D0F69E7C1D6B8C470046BCD6 /* SettingsAccountInfoItem.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E7A1D6B8C470046BCD6 /* SettingsAccountInfoItem.swift */; }; + D0F69E7D1D6B8C470046BCD6 /* SettingsController.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E7B1D6B8C470046BCD6 /* SettingsController.swift */; }; + D0F69E881D6B8C850046BCD6 /* FastBlur.h in Headers */ = {isa = PBXBuildFile; fileRef = D0F69E7F1D6B8C850046BCD6 /* FastBlur.h */; }; + D0F69E891D6B8C850046BCD6 /* FastBlur.m in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E801D6B8C850046BCD6 /* FastBlur.m */; }; + D0F69E8A1D6B8C850046BCD6 /* FFMpegSwResample.h in Headers */ = {isa = PBXBuildFile; fileRef = D0F69E811D6B8C850046BCD6 /* FFMpegSwResample.h */; }; + D0F69E8B1D6B8C850046BCD6 /* FFMpegSwResample.m in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E821D6B8C850046BCD6 /* FFMpegSwResample.m */; }; + D0F69E8C1D6B8C850046BCD6 /* FrameworkBundle.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E831D6B8C850046BCD6 /* FrameworkBundle.swift */; }; + D0F69E8D1D6B8C850046BCD6 /* Localizable.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E841D6B8C850046BCD6 /* Localizable.swift */; }; + D0F69E8E1D6B8C850046BCD6 /* RingBuffer.h in Headers */ = {isa = PBXBuildFile; fileRef = D0F69E851D6B8C850046BCD6 /* RingBuffer.h */; }; + D0F69E8F1D6B8C850046BCD6 /* RingBuffer.m in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E861D6B8C850046BCD6 /* RingBuffer.m */; }; + D0F69E901D6B8C850046BCD6 /* RingByteBuffer.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E871D6B8C850046BCD6 /* RingByteBuffer.swift */; }; + D0F69E951D6B8C9B0046BCD6 /* ImageRepresentationsUtils.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E921D6B8C9B0046BCD6 /* ImageRepresentationsUtils.swift */; }; + D0F69E961D6B8C9B0046BCD6 /* ProgressiveImage.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E931D6B8C9B0046BCD6 /* ProgressiveImage.swift */; }; + D0F69E971D6B8C9B0046BCD6 /* WebP.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E941D6B8C9B0046BCD6 /* WebP.swift */; }; + D0F69E9A1D6B8D200046BCD6 /* UIImage+WebP.h in Headers */ = {isa = PBXBuildFile; fileRef = D0F69E981D6B8D200046BCD6 /* UIImage+WebP.h */; }; + D0F69E9B1D6B8D200046BCD6 /* UIImage+WebP.m in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E991D6B8D200046BCD6 /* UIImage+WebP.m */; }; + D0F69E9C1D6B8D520046BCD6 /* TelegramCore.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D08D452D1D5E340300A7428A /* TelegramCore.framework */; }; + D0F69EA11D6B8E380046BCD6 /* FileResources.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E9E1D6B8E380046BCD6 /* FileResources.swift */; }; + D0F69EA21D6B8E380046BCD6 /* PhotoResources.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69E9F1D6B8E380046BCD6 /* PhotoResources.swift */; }; + D0F69EA31D6B8E380046BCD6 /* StickerResources.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0F69EA01D6B8E380046BCD6 /* StickerResources.swift */; }; + D0F69EA71D6B9BBC0046BCD6 /* libwebp.a in Frameworks */ = {isa = PBXBuildFile; fileRef = D0F69EA61D6B9BBC0046BCD6 /* libwebp.a */; }; + D0F69EAC1D6B9BCB0046BCD6 /* libavcodec.a in Frameworks */ = {isa = PBXBuildFile; fileRef = D0F69EA81D6B9BCB0046BCD6 /* libavcodec.a */; }; + D0F69EAD1D6B9BCB0046BCD6 /* libavformat.a in Frameworks */ = {isa = PBXBuildFile; fileRef = D0F69EA91D6B9BCB0046BCD6 /* libavformat.a */; }; + D0F69EAE1D6B9BCB0046BCD6 /* libavutil.a in Frameworks */ = {isa = PBXBuildFile; fileRef = D0F69EAA1D6B9BCB0046BCD6 /* libavutil.a */; }; + D0F69EAF1D6B9BCB0046BCD6 /* libswresample.a in Frameworks */ = {isa = PBXBuildFile; fileRef = D0F69EAB1D6B9BCB0046BCD6 /* libswresample.a */; }; D0FC40891D5B8E7500261D9D /* TelegramUI.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = D0FC407F1D5B8E7400261D9D /* TelegramUI.framework */; }; D0FC408E1D5B8E7500261D9D /* TelegramUITests.swift in Sources */ = {isa = PBXBuildFile; fileRef = D0FC408D1D5B8E7500261D9D /* TelegramUITests.swift */; }; D0FC40901D5B8E7500261D9D /* TelegramUI.h in Headers */ = {isa = PBXBuildFile; fileRef = D0FC40821D5B8E7400261D9D /* TelegramUI.h */; settings = {ATTRIBUTES = (Public, ); }; }; @@ -44,6 +171,135 @@ D0AB0BB61D67191C002C78E7 /* MtProtoKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = MtProtoKit.framework; path = "../../../../Library/Developer/Xcode/DerivedData/Telegram-iOS-diblohvjozhgaifjcniwdlixlilx/Build/Products/Debug-iphoneos/MtProtoKit.framework"; sourceTree = ""; }; D0AB0BB71D67191C002C78E7 /* SSignalKit.framework */ = {isa = PBXFileReference; lastKnownFileType = wrapper.framework; name = SSignalKit.framework; path = "../../../../Library/Developer/Xcode/DerivedData/Telegram-iOS-diblohvjozhgaifjcniwdlixlilx/Build/Products/Debug-iphoneos/SSignalKit.framework"; sourceTree = ""; }; D0AB0BBA1D6719B5002C78E7 /* Images.xcassets */ = {isa = PBXFileReference; lastKnownFileType = folder.assetcatalog; path = Images.xcassets; sourceTree = ""; }; + D0F69CD31D6B87D30046BCD6 /* FFMpegMediaFrameSourceContext.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FFMpegMediaFrameSourceContext.swift; sourceTree = ""; }; + D0F69CD41D6B87D30046BCD6 /* MediaPlayerAudioRenderer.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MediaPlayerAudioRenderer.swift; sourceTree = ""; }; + D0F69CD61D6B87D30046BCD6 /* MediaManager.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MediaManager.swift; sourceTree = ""; }; + D0F69CD71D6B87D30046BCD6 /* FFMpegAudioFrameDecoder.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FFMpegAudioFrameDecoder.swift; sourceTree = ""; }; + D0F69CDC1D6B87D30046BCD6 /* MediaPlayerNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MediaPlayerNode.swift; sourceTree = ""; }; + D0F69CDE1D6B87D30046BCD6 /* PeerAvatar.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = PeerAvatar.swift; sourceTree = ""; }; + D0F69CE11D6B87D30046BCD6 /* FFMpegMediaFrameSource.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FFMpegMediaFrameSource.swift; sourceTree = ""; }; + D0F69CE51D6B87D30046BCD6 /* MediaFrameSource.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MediaFrameSource.swift; sourceTree = ""; }; + D0F69CFB1D6B87D30046BCD6 /* TouchDownGestureRecognizer.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = TouchDownGestureRecognizer.swift; sourceTree = ""; }; + D0F69D021D6B87D30046BCD6 /* MediaPlayer.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MediaPlayer.swift; sourceTree = ""; }; + D0F69D161D6B87D30046BCD6 /* FFMpegMediaFrameSourceContextHelpers.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FFMpegMediaFrameSourceContextHelpers.swift; sourceTree = ""; }; + D0F69D171D6B87D30046BCD6 /* FFMpegPacket.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FFMpegPacket.swift; sourceTree = ""; }; + D0F69D1D1D6B87D30046BCD6 /* MediaTrackDecodableFrame.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MediaTrackDecodableFrame.swift; sourceTree = ""; }; + D0F69D6F1D6B87DE0046BCD6 /* FFMpegMediaPassthroughVideoFrameDecoder.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FFMpegMediaPassthroughVideoFrameDecoder.swift; sourceTree = ""; }; + D0F69D701D6B87DE0046BCD6 /* MediaTrackFrameBuffer.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MediaTrackFrameBuffer.swift; sourceTree = ""; }; + D0F69D711D6B87DE0046BCD6 /* MediaTrackFrame.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MediaTrackFrame.swift; sourceTree = ""; }; + D0F69D7F1D6B87EC0046BCD6 /* MediaPlaybackData.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MediaPlaybackData.swift; sourceTree = ""; }; + D0F69D871D6B87EC0046BCD6 /* FFMpegMediaVideoFrameDecoder.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FFMpegMediaVideoFrameDecoder.swift; sourceTree = ""; }; + D0F69D881D6B87EC0046BCD6 /* MediaTrackFrameDecoder.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MediaTrackFrameDecoder.swift; sourceTree = ""; }; + D0F69D901D6B87EC0046BCD6 /* Cache.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = Cache.swift; sourceTree = ""; }; + D0F69DB91D6B88190046BCD6 /* TelegramUI.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; name = TelegramUI.xcconfig; path = TelegramUI/Config/TelegramUI.xcconfig; sourceTree = ""; }; + D0F69DC01D6B89D30046BCD6 /* ListSectionHeaderNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ListSectionHeaderNode.swift; sourceTree = ""; }; + D0F69DC21D6B89DA0046BCD6 /* TextNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = TextNode.swift; sourceTree = ""; }; + D0F69DC41D6B89E10046BCD6 /* RadialProgressNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = RadialProgressNode.swift; sourceTree = ""; }; + D0F69DC61D6B89E70046BCD6 /* TransformImageNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = TransformImageNode.swift; sourceTree = ""; }; + D0F69DC81D6B89EB0046BCD6 /* ImageNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ImageNode.swift; sourceTree = ""; }; + D0F69DCB1D6B8A0D0046BCD6 /* SearchBarNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SearchBarNode.swift; sourceTree = ""; }; + D0F69DCC1D6B8A0D0046BCD6 /* SearchBarPlaceholderNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SearchBarPlaceholderNode.swift; sourceTree = ""; }; + D0F69DCD1D6B8A0D0046BCD6 /* SearchDisplayController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SearchDisplayController.swift; sourceTree = ""; }; + D0F69DCE1D6B8A0D0046BCD6 /* SearchDisplayControllerContentNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SearchDisplayControllerContentNode.swift; sourceTree = ""; }; + D0F69DD51D6B8A2D0046BCD6 /* AlertController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AlertController.swift; sourceTree = ""; }; + D0F69DD81D6B8A420046BCD6 /* ListController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ListController.swift; sourceTree = ""; }; + D0F69DD91D6B8A420046BCD6 /* ListControllerButtonItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ListControllerButtonItem.swift; sourceTree = ""; }; + D0F69DDA1D6B8A420046BCD6 /* ListControllerDisclosureActionItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ListControllerDisclosureActionItem.swift; sourceTree = ""; }; + D0F69DDB1D6B8A420046BCD6 /* ListControllerGroupableItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ListControllerGroupableItem.swift; sourceTree = ""; }; + D0F69DDC1D6B8A420046BCD6 /* ListControllerItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ListControllerItem.swift; sourceTree = ""; }; + D0F69DDD1D6B8A420046BCD6 /* ListControllerNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ListControllerNode.swift; sourceTree = ""; }; + D0F69DDE1D6B8A420046BCD6 /* ListControllerSpacerItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ListControllerSpacerItem.swift; sourceTree = ""; }; + D0F69DE81D6B8A6C0046BCD6 /* AuthorizationCodeController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AuthorizationCodeController.swift; sourceTree = ""; }; + D0F69DE91D6B8A6C0046BCD6 /* AuthorizationCodeControllerNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AuthorizationCodeControllerNode.swift; sourceTree = ""; }; + D0F69DEA1D6B8A6C0046BCD6 /* AuthorizationController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AuthorizationController.swift; sourceTree = ""; }; + D0F69DEB1D6B8A6C0046BCD6 /* AuthorizationPasswordController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AuthorizationPasswordController.swift; sourceTree = ""; }; + D0F69DEC1D6B8A6C0046BCD6 /* AuthorizationPasswordControllerNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AuthorizationPasswordControllerNode.swift; sourceTree = ""; }; + D0F69DED1D6B8A6C0046BCD6 /* AuthorizationPhoneController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AuthorizationPhoneController.swift; sourceTree = ""; }; + D0F69DEE1D6B8A6C0046BCD6 /* AuthorizationPhoneControllerNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = AuthorizationPhoneControllerNode.swift; sourceTree = ""; }; + D0F69DF71D6B8A880046BCD6 /* ChatListAvatarNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatListAvatarNode.swift; sourceTree = ""; }; + D0F69DF81D6B8A880046BCD6 /* ChatListController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatListController.swift; sourceTree = ""; }; + D0F69DF91D6B8A880046BCD6 /* ChatListControllerNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatListControllerNode.swift; sourceTree = ""; }; + D0F69DFA1D6B8A880046BCD6 /* ChatListEmptyItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatListEmptyItem.swift; sourceTree = ""; }; + D0F69DFB1D6B8A880046BCD6 /* ChatListHoleItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatListHoleItem.swift; sourceTree = ""; }; + D0F69DFC1D6B8A880046BCD6 /* ChatListItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatListItem.swift; sourceTree = ""; }; + D0F69DFD1D6B8A880046BCD6 /* ChatListSearchItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatListSearchItem.swift; sourceTree = ""; }; + D0F69E071D6B8A9C0046BCD6 /* ChatListSearchContainerNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatListSearchContainerNode.swift; sourceTree = ""; }; + D0F69E091D6B8AA60046BCD6 /* ChatListSearchRecentPeersNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatListSearchRecentPeersNode.swift; sourceTree = ""; }; + D0F69E0B1D6B8AB10046BCD6 /* HorizontalPeerItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = HorizontalPeerItem.swift; sourceTree = ""; }; + D0F69E0E1D6B8ACF0046BCD6 /* ChatController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatController.swift; sourceTree = ""; }; + D0F69E0F1D6B8ACF0046BCD6 /* ChatControllerInteraction.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatControllerInteraction.swift; sourceTree = ""; }; + D0F69E101D6B8ACF0046BCD6 /* ChatControllerNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatControllerNode.swift; sourceTree = ""; }; + D0F69E111D6B8ACF0046BCD6 /* ChatHistoryEntry.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatHistoryEntry.swift; sourceTree = ""; }; + D0F69E121D6B8ACF0046BCD6 /* ChatHistoryLocation.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatHistoryLocation.swift; sourceTree = ""; }; + D0F69E191D6B8AE60046BCD6 /* ChatHoleItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatHoleItem.swift; sourceTree = ""; }; + D0F69E1B1D6B8B030046BCD6 /* ChatMessageActionItemNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageActionItemNode.swift; sourceTree = ""; }; + D0F69E1C1D6B8B030046BCD6 /* ChatMessageAvatarAccessoryItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageAvatarAccessoryItem.swift; sourceTree = ""; }; + D0F69E1D1D6B8B030046BCD6 /* ChatMessageBubbleContentCalclulateImageCorners.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageBubbleContentCalclulateImageCorners.swift; sourceTree = ""; }; + D0F69E1E1D6B8B030046BCD6 /* ChatMessageBubbleContentNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageBubbleContentNode.swift; sourceTree = ""; }; + D0F69E1F1D6B8B030046BCD6 /* ChatMessageBubbleItemNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageBubbleItemNode.swift; sourceTree = ""; }; + D0F69E201D6B8B030046BCD6 /* ChatMessageDateAndStatusNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageDateAndStatusNode.swift; sourceTree = ""; }; + D0F69E211D6B8B030046BCD6 /* ChatMessageFileBubbleContentNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageFileBubbleContentNode.swift; sourceTree = ""; }; + D0F69E221D6B8B030046BCD6 /* ChatMessageForwardInfoNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageForwardInfoNode.swift; sourceTree = ""; }; + D0F69E231D6B8B030046BCD6 /* ChatMessageInteractiveFileNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageInteractiveFileNode.swift; sourceTree = ""; }; + D0F69E241D6B8B030046BCD6 /* ChatMessageInteractiveMediaNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageInteractiveMediaNode.swift; sourceTree = ""; }; + D0F69E251D6B8B030046BCD6 /* ChatMessageItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageItem.swift; sourceTree = ""; }; + D0F69E261D6B8B030046BCD6 /* ChatMessageItemView.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageItemView.swift; sourceTree = ""; }; + D0F69E271D6B8B030046BCD6 /* ChatMessageMediaBubbleContentNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageMediaBubbleContentNode.swift; sourceTree = ""; }; + D0F69E281D6B8B030046BCD6 /* ChatMessageReplyInfoNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageReplyInfoNode.swift; sourceTree = ""; }; + D0F69E291D6B8B030046BCD6 /* ChatMessageStickerItemNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageStickerItemNode.swift; sourceTree = ""; }; + D0F69E2A1D6B8B030046BCD6 /* ChatMessageTextBubbleContentNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageTextBubbleContentNode.swift; sourceTree = ""; }; + D0F69E2B1D6B8B030046BCD6 /* ChatMessageWebpageBubbleContentNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMessageWebpageBubbleContentNode.swift; sourceTree = ""; }; + D0F69E2C1D6B8B030046BCD6 /* ChatUnreadItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatUnreadItem.swift; sourceTree = ""; }; + D0F69E401D6B8B7E0046BCD6 /* ChatInputView.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatInputView.swift; sourceTree = ""; }; + D0F69E411D6B8B7E0046BCD6 /* ResizeableTextInputView.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ResizeableTextInputView.swift; sourceTree = ""; }; + D0F69E451D6B8B950046BCD6 /* ChatHistoryNavigationButtonNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatHistoryNavigationButtonNode.swift; sourceTree = ""; }; + D0F69E481D6B8BAC0046BCD6 /* ActionSheetRollImageItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ActionSheetRollImageItem.swift; sourceTree = ""; }; + D0F69E4A1D6B8BB20046BCD6 /* ChatMediaActionSheetController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMediaActionSheetController.swift; sourceTree = ""; }; + D0F69E4B1D6B8BB20046BCD6 /* ChatMediaActionSheetRollItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatMediaActionSheetRollItem.swift; sourceTree = ""; }; + D0F69E501D6B8BDA0046BCD6 /* GalleryController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = GalleryController.swift; sourceTree = ""; }; + D0F69E511D6B8BDA0046BCD6 /* GalleryControllerNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = GalleryControllerNode.swift; sourceTree = ""; }; + D0F69E521D6B8BDA0046BCD6 /* GalleryItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = GalleryItem.swift; sourceTree = ""; }; + D0F69E531D6B8BDA0046BCD6 /* GalleryItemNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = GalleryItemNode.swift; sourceTree = ""; }; + D0F69E541D6B8BDA0046BCD6 /* GalleryPagerNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = GalleryPagerNode.swift; sourceTree = ""; }; + D0F69E5B1D6B8BF90046BCD6 /* ChatDocumentGalleryItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatDocumentGalleryItem.swift; sourceTree = ""; }; + D0F69E5C1D6B8BF90046BCD6 /* ChatHoleGalleryItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatHoleGalleryItem.swift; sourceTree = ""; }; + D0F69E5D1D6B8BF90046BCD6 /* ChatImageGalleryItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatImageGalleryItem.swift; sourceTree = ""; }; + D0F69E5E1D6B8BF90046BCD6 /* ChatVideoGalleryItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatVideoGalleryItem.swift; sourceTree = ""; }; + D0F69E5F1D6B8BF90046BCD6 /* ChatVideoGalleryItemScrubberView.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ChatVideoGalleryItemScrubberView.swift; sourceTree = ""; }; + D0F69E601D6B8BF90046BCD6 /* ZoomableContentGalleryItemNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ZoomableContentGalleryItemNode.swift; sourceTree = ""; }; + D0F69E681D6B8C160046BCD6 /* MapInputController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MapInputController.swift; sourceTree = ""; }; + D0F69E691D6B8C160046BCD6 /* MapInputControllerNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = MapInputControllerNode.swift; sourceTree = ""; }; + D0F69E6D1D6B8C340046BCD6 /* ContactsController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ContactsController.swift; sourceTree = ""; }; + D0F69E6E1D6B8C340046BCD6 /* ContactsControllerNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ContactsControllerNode.swift; sourceTree = ""; }; + D0F69E6F1D6B8C340046BCD6 /* ContactsPeerItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ContactsPeerItem.swift; sourceTree = ""; }; + D0F69E701D6B8C340046BCD6 /* ContactsSearchContainerNode.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ContactsSearchContainerNode.swift; sourceTree = ""; }; + D0F69E711D6B8C340046BCD6 /* ContactsSectionHeaderAccessoryItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ContactsSectionHeaderAccessoryItem.swift; sourceTree = ""; }; + D0F69E721D6B8C340046BCD6 /* ContactsVCardItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ContactsVCardItem.swift; sourceTree = ""; }; + D0F69E7A1D6B8C470046BCD6 /* SettingsAccountInfoItem.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SettingsAccountInfoItem.swift; sourceTree = ""; }; + D0F69E7B1D6B8C470046BCD6 /* SettingsController.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = SettingsController.swift; sourceTree = ""; }; + D0F69E7F1D6B8C850046BCD6 /* FastBlur.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FastBlur.h; sourceTree = ""; }; + D0F69E801D6B8C850046BCD6 /* FastBlur.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = FastBlur.m; sourceTree = ""; }; + D0F69E811D6B8C850046BCD6 /* FFMpegSwResample.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = FFMpegSwResample.h; sourceTree = ""; }; + D0F69E821D6B8C850046BCD6 /* FFMpegSwResample.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = FFMpegSwResample.m; sourceTree = ""; }; + D0F69E831D6B8C850046BCD6 /* FrameworkBundle.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FrameworkBundle.swift; sourceTree = ""; }; + D0F69E841D6B8C850046BCD6 /* Localizable.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = Localizable.swift; sourceTree = ""; }; + D0F69E851D6B8C850046BCD6 /* RingBuffer.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = RingBuffer.h; sourceTree = ""; }; + D0F69E861D6B8C850046BCD6 /* RingBuffer.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = RingBuffer.m; sourceTree = ""; }; + D0F69E871D6B8C850046BCD6 /* RingByteBuffer.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = RingByteBuffer.swift; sourceTree = ""; }; + D0F69E921D6B8C9B0046BCD6 /* ImageRepresentationsUtils.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ImageRepresentationsUtils.swift; sourceTree = ""; }; + D0F69E931D6B8C9B0046BCD6 /* ProgressiveImage.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = ProgressiveImage.swift; sourceTree = ""; }; + D0F69E941D6B8C9B0046BCD6 /* WebP.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = WebP.swift; sourceTree = ""; }; + D0F69E981D6B8D200046BCD6 /* UIImage+WebP.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "UIImage+WebP.h"; sourceTree = ""; }; + D0F69E991D6B8D200046BCD6 /* UIImage+WebP.m */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.objc; path = "UIImage+WebP.m"; sourceTree = ""; }; + D0F69E9E1D6B8E380046BCD6 /* FileResources.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = FileResources.swift; sourceTree = ""; }; + D0F69E9F1D6B8E380046BCD6 /* PhotoResources.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = PhotoResources.swift; sourceTree = ""; }; + D0F69EA01D6B8E380046BCD6 /* StickerResources.swift */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.swift; path = StickerResources.swift; sourceTree = ""; }; + D0F69EA51D6B8F3E0046BCD6 /* TelegramUIIncludes.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = TelegramUIIncludes.h; sourceTree = ""; }; + D0F69EA61D6B9BBC0046BCD6 /* libwebp.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libwebp.a; path = "third-party/libwebp/lib/libwebp.a"; sourceTree = ""; }; + D0F69EA81D6B9BCB0046BCD6 /* libavcodec.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavcodec.a; path = "third-party/FFmpeg-iOS/lib/libavcodec.a"; sourceTree = ""; }; + D0F69EA91D6B9BCB0046BCD6 /* libavformat.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavformat.a; path = "third-party/FFmpeg-iOS/lib/libavformat.a"; sourceTree = ""; }; + D0F69EAA1D6B9BCB0046BCD6 /* libavutil.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libavutil.a; path = "third-party/FFmpeg-iOS/lib/libavutil.a"; sourceTree = ""; }; + D0F69EAB1D6B9BCB0046BCD6 /* libswresample.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libswresample.a; path = "third-party/FFmpeg-iOS/lib/libswresample.a"; sourceTree = ""; }; D0FC407F1D5B8E7400261D9D /* TelegramUI.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = TelegramUI.framework; sourceTree = BUILT_PRODUCTS_DIR; }; D0FC40821D5B8E7400261D9D /* TelegramUI.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = TelegramUI.h; sourceTree = ""; }; D0FC40831D5B8E7400261D9D /* Info.plist */ = {isa = PBXFileReference; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; @@ -57,8 +313,12 @@ isa = PBXFrameworksBuildPhase; buildActionMask = 2147483647; files = ( - D0AB0BB81D67191C002C78E7 /* MtProtoKit.framework in Frameworks */, - D0AB0BB91D67191C002C78E7 /* SSignalKit.framework in Frameworks */, + D0F69EAC1D6B9BCB0046BCD6 /* libavcodec.a in Frameworks */, + D0F69EAD1D6B9BCB0046BCD6 /* libavformat.a in Frameworks */, + D0F69EAE1D6B9BCB0046BCD6 /* libavutil.a in Frameworks */, + D0F69EAF1D6B9BCB0046BCD6 /* libswresample.a in Frameworks */, + D0F69EA71D6B9BBC0046BCD6 /* libwebp.a in Frameworks */, + D0F69E9C1D6B8D520046BCD6 /* TelegramCore.framework in Frameworks */, D0AB0BB51D6718F1002C78E7 /* CoreMedia.framework in Frameworks */, D0AB0BB31D6718EB002C78E7 /* libz.tbd in Frameworks */, D0AB0BB11D6718DA002C78E7 /* libiconv.tbd in Frameworks */, @@ -83,6 +343,11 @@ D08D45281D5E340200A7428A /* Frameworks */ = { isa = PBXGroup; children = ( + D0F69EA81D6B9BCB0046BCD6 /* libavcodec.a */, + D0F69EA91D6B9BCB0046BCD6 /* libavformat.a */, + D0F69EAA1D6B9BCB0046BCD6 /* libavutil.a */, + D0F69EAB1D6B9BCB0046BCD6 /* libswresample.a */, + D0F69EA61D6B9BBC0046BCD6 /* libwebp.a */, D0AB0BB61D67191C002C78E7 /* MtProtoKit.framework */, D0AB0BB71D67191C002C78E7 /* SSignalKit.framework */, D0AB0BB41D6718F1002C78E7 /* CoreMedia.framework */, @@ -97,9 +362,359 @@ name = Frameworks; sourceTree = ""; }; + D0F69CCE1D6B87950046BCD6 /* Files */ = { + isa = PBXGroup; + children = ( + ); + name = Files; + sourceTree = ""; + }; + D0F69DBB1D6B88330046BCD6 /* Media */ = { + isa = PBXGroup; + children = ( + D0F69CD61D6B87D30046BCD6 /* MediaManager.swift */, + D0F69CDE1D6B87D30046BCD6 /* PeerAvatar.swift */, + D0F69D901D6B87EC0046BCD6 /* Cache.swift */, + D0F69DBC1D6B886C0046BCD6 /* Player */, + D0F69E9D1D6B8E240046BCD6 /* Resources */, + ); + name = Media; + sourceTree = ""; + }; + D0F69DBC1D6B886C0046BCD6 /* Player */ = { + isa = PBXGroup; + children = ( + D0F69CE51D6B87D30046BCD6 /* MediaFrameSource.swift */, + D0F69D7F1D6B87EC0046BCD6 /* MediaPlaybackData.swift */, + D0F69D021D6B87D30046BCD6 /* MediaPlayer.swift */, + D0F69CD41D6B87D30046BCD6 /* MediaPlayerAudioRenderer.swift */, + D0F69CDC1D6B87D30046BCD6 /* MediaPlayerNode.swift */, + D0F69D1D1D6B87D30046BCD6 /* MediaTrackDecodableFrame.swift */, + D0F69D711D6B87DE0046BCD6 /* MediaTrackFrame.swift */, + D0F69D701D6B87DE0046BCD6 /* MediaTrackFrameBuffer.swift */, + D0F69D881D6B87EC0046BCD6 /* MediaTrackFrameDecoder.swift */, + D0F69CD71D6B87D30046BCD6 /* FFMpegAudioFrameDecoder.swift */, + D0F69CE11D6B87D30046BCD6 /* FFMpegMediaFrameSource.swift */, + D0F69CD31D6B87D30046BCD6 /* FFMpegMediaFrameSourceContext.swift */, + D0F69D161D6B87D30046BCD6 /* FFMpegMediaFrameSourceContextHelpers.swift */, + D0F69D871D6B87EC0046BCD6 /* FFMpegMediaVideoFrameDecoder.swift */, + D0F69D6F1D6B87DE0046BCD6 /* FFMpegMediaPassthroughVideoFrameDecoder.swift */, + D0F69D171D6B87D30046BCD6 /* FFMpegPacket.swift */, + ); + name = Player; + sourceTree = ""; + }; + D0F69DBD1D6B897A0046BCD6 /* Components */ = { + isa = PBXGroup; + children = ( + D0F69DBE1D6B89880046BCD6 /* Gestures */, + D0F69DBF1D6B89AE0046BCD6 /* Nodes */, + D0F69DD31D6B8A160046BCD6 /* Controllers */, + ); + name = Components; + sourceTree = ""; + }; + D0F69DBE1D6B89880046BCD6 /* Gestures */ = { + isa = PBXGroup; + children = ( + D0F69CFB1D6B87D30046BCD6 /* TouchDownGestureRecognizer.swift */, + ); + name = Gestures; + sourceTree = ""; + }; + D0F69DBF1D6B89AE0046BCD6 /* Nodes */ = { + isa = PBXGroup; + children = ( + D0F69DC81D6B89EB0046BCD6 /* ImageNode.swift */, + D0F69DC61D6B89E70046BCD6 /* TransformImageNode.swift */, + D0F69DC41D6B89E10046BCD6 /* RadialProgressNode.swift */, + D0F69DC21D6B89DA0046BCD6 /* TextNode.swift */, + D0F69DC01D6B89D30046BCD6 /* ListSectionHeaderNode.swift */, + D0F69DCA1D6B89F20046BCD6 /* Search */, + ); + name = Nodes; + sourceTree = ""; + }; + D0F69DCA1D6B89F20046BCD6 /* Search */ = { + isa = PBXGroup; + children = ( + D0F69DCB1D6B8A0D0046BCD6 /* SearchBarNode.swift */, + D0F69DCC1D6B8A0D0046BCD6 /* SearchBarPlaceholderNode.swift */, + D0F69DCD1D6B8A0D0046BCD6 /* SearchDisplayController.swift */, + D0F69DCE1D6B8A0D0046BCD6 /* SearchDisplayControllerContentNode.swift */, + ); + name = Search; + sourceTree = ""; + }; + D0F69DD31D6B8A160046BCD6 /* Controllers */ = { + isa = PBXGroup; + children = ( + D0F69DD71D6B8A300046BCD6 /* List */, + D0F69DD41D6B8A240046BCD6 /* Alert */, + ); + name = Controllers; + sourceTree = ""; + }; + D0F69DD41D6B8A240046BCD6 /* Alert */ = { + isa = PBXGroup; + children = ( + D0F69DD51D6B8A2D0046BCD6 /* AlertController.swift */, + ); + name = Alert; + sourceTree = ""; + }; + D0F69DD71D6B8A300046BCD6 /* List */ = { + isa = PBXGroup; + children = ( + D0F69DD81D6B8A420046BCD6 /* ListController.swift */, + D0F69DD91D6B8A420046BCD6 /* ListControllerButtonItem.swift */, + D0F69DDA1D6B8A420046BCD6 /* ListControllerDisclosureActionItem.swift */, + D0F69DDB1D6B8A420046BCD6 /* ListControllerGroupableItem.swift */, + D0F69DDC1D6B8A420046BCD6 /* ListControllerItem.swift */, + D0F69DDD1D6B8A420046BCD6 /* ListControllerNode.swift */, + D0F69DDE1D6B8A420046BCD6 /* ListControllerSpacerItem.swift */, + ); + name = List; + sourceTree = ""; + }; + D0F69DE61D6B8A4E0046BCD6 /* Controllers */ = { + isa = PBXGroup; + children = ( + D0F69DE71D6B8A590046BCD6 /* Authorization */, + D0F69DF61D6B8A720046BCD6 /* Chat List */, + D0F69E0D1D6B8AB90046BCD6 /* Chat */, + D0F69E4E1D6B8BB90046BCD6 /* Media */, + D0F69E6C1D6B8C220046BCD6 /* Contacts */, + D0F69E791D6B8C3B0046BCD6 /* Settings */, + ); + name = Controllers; + sourceTree = ""; + }; + D0F69DE71D6B8A590046BCD6 /* Authorization */ = { + isa = PBXGroup; + children = ( + D0F69DE81D6B8A6C0046BCD6 /* AuthorizationCodeController.swift */, + D0F69DE91D6B8A6C0046BCD6 /* AuthorizationCodeControllerNode.swift */, + D0F69DEA1D6B8A6C0046BCD6 /* AuthorizationController.swift */, + D0F69DEB1D6B8A6C0046BCD6 /* AuthorizationPasswordController.swift */, + D0F69DEC1D6B8A6C0046BCD6 /* AuthorizationPasswordControllerNode.swift */, + D0F69DED1D6B8A6C0046BCD6 /* AuthorizationPhoneController.swift */, + D0F69DEE1D6B8A6C0046BCD6 /* AuthorizationPhoneControllerNode.swift */, + ); + name = Authorization; + sourceTree = ""; + }; + D0F69DF61D6B8A720046BCD6 /* Chat List */ = { + isa = PBXGroup; + children = ( + D0F69DF71D6B8A880046BCD6 /* ChatListAvatarNode.swift */, + D0F69DF81D6B8A880046BCD6 /* ChatListController.swift */, + D0F69DF91D6B8A880046BCD6 /* ChatListControllerNode.swift */, + D0F69DFA1D6B8A880046BCD6 /* ChatListEmptyItem.swift */, + D0F69DFB1D6B8A880046BCD6 /* ChatListHoleItem.swift */, + D0F69DFC1D6B8A880046BCD6 /* ChatListItem.swift */, + D0F69DFD1D6B8A880046BCD6 /* ChatListSearchItem.swift */, + D0F69E051D6B8A8B0046BCD6 /* Search */, + ); + name = "Chat List"; + sourceTree = ""; + }; + D0F69E051D6B8A8B0046BCD6 /* Search */ = { + isa = PBXGroup; + children = ( + D0F69E071D6B8A9C0046BCD6 /* ChatListSearchContainerNode.swift */, + D0F69E061D6B8A930046BCD6 /* Recent Peers */, + ); + name = Search; + sourceTree = ""; + }; + D0F69E061D6B8A930046BCD6 /* Recent Peers */ = { + isa = PBXGroup; + children = ( + D0F69E0B1D6B8AB10046BCD6 /* HorizontalPeerItem.swift */, + D0F69E091D6B8AA60046BCD6 /* ChatListSearchRecentPeersNode.swift */, + ); + name = "Recent Peers"; + sourceTree = ""; + }; + D0F69E0D1D6B8AB90046BCD6 /* Chat */ = { + isa = PBXGroup; + children = ( + D0F69E0E1D6B8ACF0046BCD6 /* ChatController.swift */, + D0F69E0F1D6B8ACF0046BCD6 /* ChatControllerInteraction.swift */, + D0F69E101D6B8ACF0046BCD6 /* ChatControllerNode.swift */, + D0F69E111D6B8ACF0046BCD6 /* ChatHistoryEntry.swift */, + D0F69E121D6B8ACF0046BCD6 /* ChatHistoryLocation.swift */, + D0F69E181D6B8AD10046BCD6 /* Items */, + D0F69E3F1D6B8B6B0046BCD6 /* Input Panel */, + D0F69E441D6B8B850046BCD6 /* History Navigation */, + D0F69E471D6B8B9A0046BCD6 /* Input Media Action Sheet */, + ); + name = Chat; + sourceTree = ""; + }; + D0F69E181D6B8AD10046BCD6 /* Items */ = { + isa = PBXGroup; + children = ( + D0F69E1B1D6B8B030046BCD6 /* ChatMessageActionItemNode.swift */, + D0F69E1C1D6B8B030046BCD6 /* ChatMessageAvatarAccessoryItem.swift */, + D0F69E1D1D6B8B030046BCD6 /* ChatMessageBubbleContentCalclulateImageCorners.swift */, + D0F69E1E1D6B8B030046BCD6 /* ChatMessageBubbleContentNode.swift */, + D0F69E1F1D6B8B030046BCD6 /* ChatMessageBubbleItemNode.swift */, + D0F69E201D6B8B030046BCD6 /* ChatMessageDateAndStatusNode.swift */, + D0F69E211D6B8B030046BCD6 /* ChatMessageFileBubbleContentNode.swift */, + D0F69E221D6B8B030046BCD6 /* ChatMessageForwardInfoNode.swift */, + D0F69E231D6B8B030046BCD6 /* ChatMessageInteractiveFileNode.swift */, + D0F69E241D6B8B030046BCD6 /* ChatMessageInteractiveMediaNode.swift */, + D0F69E251D6B8B030046BCD6 /* ChatMessageItem.swift */, + D0F69E261D6B8B030046BCD6 /* ChatMessageItemView.swift */, + D0F69E271D6B8B030046BCD6 /* ChatMessageMediaBubbleContentNode.swift */, + D0F69E281D6B8B030046BCD6 /* ChatMessageReplyInfoNode.swift */, + D0F69E291D6B8B030046BCD6 /* ChatMessageStickerItemNode.swift */, + D0F69E2A1D6B8B030046BCD6 /* ChatMessageTextBubbleContentNode.swift */, + D0F69E2B1D6B8B030046BCD6 /* ChatMessageWebpageBubbleContentNode.swift */, + D0F69E2C1D6B8B030046BCD6 /* ChatUnreadItem.swift */, + D0F69E191D6B8AE60046BCD6 /* ChatHoleItem.swift */, + ); + name = Items; + sourceTree = ""; + }; + D0F69E3F1D6B8B6B0046BCD6 /* Input Panel */ = { + isa = PBXGroup; + children = ( + D0F69E401D6B8B7E0046BCD6 /* ChatInputView.swift */, + D0F69E411D6B8B7E0046BCD6 /* ResizeableTextInputView.swift */, + ); + name = "Input Panel"; + sourceTree = ""; + }; + D0F69E441D6B8B850046BCD6 /* History Navigation */ = { + isa = PBXGroup; + children = ( + D0F69E451D6B8B950046BCD6 /* ChatHistoryNavigationButtonNode.swift */, + ); + name = "History Navigation"; + sourceTree = ""; + }; + D0F69E471D6B8B9A0046BCD6 /* Input Media Action Sheet */ = { + isa = PBXGroup; + children = ( + D0F69E4A1D6B8BB20046BCD6 /* ChatMediaActionSheetController.swift */, + D0F69E4B1D6B8BB20046BCD6 /* ChatMediaActionSheetRollItem.swift */, + D0F69E481D6B8BAC0046BCD6 /* ActionSheetRollImageItem.swift */, + ); + name = "Input Media Action Sheet"; + sourceTree = ""; + }; + D0F69E4E1D6B8BB90046BCD6 /* Media */ = { + isa = PBXGroup; + children = ( + D0F69E4F1D6B8BC40046BCD6 /* Gallery */, + D0F69E671D6B8C030046BCD6 /* Map Input */, + ); + name = Media; + sourceTree = ""; + }; + D0F69E4F1D6B8BC40046BCD6 /* Gallery */ = { + isa = PBXGroup; + children = ( + D0F69E501D6B8BDA0046BCD6 /* GalleryController.swift */, + D0F69E511D6B8BDA0046BCD6 /* GalleryControllerNode.swift */, + D0F69E521D6B8BDA0046BCD6 /* GalleryItem.swift */, + D0F69E531D6B8BDA0046BCD6 /* GalleryItemNode.swift */, + D0F69E541D6B8BDA0046BCD6 /* GalleryPagerNode.swift */, + D0F69E5A1D6B8BDD0046BCD6 /* Items */, + ); + name = Gallery; + sourceTree = ""; + }; + D0F69E5A1D6B8BDD0046BCD6 /* Items */ = { + isa = PBXGroup; + children = ( + D0F69E5B1D6B8BF90046BCD6 /* ChatDocumentGalleryItem.swift */, + D0F69E5C1D6B8BF90046BCD6 /* ChatHoleGalleryItem.swift */, + D0F69E5D1D6B8BF90046BCD6 /* ChatImageGalleryItem.swift */, + D0F69E5E1D6B8BF90046BCD6 /* ChatVideoGalleryItem.swift */, + D0F69E5F1D6B8BF90046BCD6 /* ChatVideoGalleryItemScrubberView.swift */, + D0F69E601D6B8BF90046BCD6 /* ZoomableContentGalleryItemNode.swift */, + ); + name = Items; + sourceTree = ""; + }; + D0F69E671D6B8C030046BCD6 /* Map Input */ = { + isa = PBXGroup; + children = ( + D0F69E681D6B8C160046BCD6 /* MapInputController.swift */, + D0F69E691D6B8C160046BCD6 /* MapInputControllerNode.swift */, + ); + name = "Map Input"; + sourceTree = ""; + }; + D0F69E6C1D6B8C220046BCD6 /* Contacts */ = { + isa = PBXGroup; + children = ( + D0F69E6D1D6B8C340046BCD6 /* ContactsController.swift */, + D0F69E6E1D6B8C340046BCD6 /* ContactsControllerNode.swift */, + D0F69E6F1D6B8C340046BCD6 /* ContactsPeerItem.swift */, + D0F69E701D6B8C340046BCD6 /* ContactsSearchContainerNode.swift */, + D0F69E711D6B8C340046BCD6 /* ContactsSectionHeaderAccessoryItem.swift */, + D0F69E721D6B8C340046BCD6 /* ContactsVCardItem.swift */, + ); + name = Contacts; + sourceTree = ""; + }; + D0F69E791D6B8C3B0046BCD6 /* Settings */ = { + isa = PBXGroup; + children = ( + D0F69E7A1D6B8C470046BCD6 /* SettingsAccountInfoItem.swift */, + D0F69E7B1D6B8C470046BCD6 /* SettingsController.swift */, + ); + name = Settings; + sourceTree = ""; + }; + D0F69E7E1D6B8C500046BCD6 /* Supporting Files */ = { + isa = PBXGroup; + children = ( + D0F69E981D6B8D200046BCD6 /* UIImage+WebP.h */, + D0F69E991D6B8D200046BCD6 /* UIImage+WebP.m */, + D0F69E7F1D6B8C850046BCD6 /* FastBlur.h */, + D0F69E801D6B8C850046BCD6 /* FastBlur.m */, + D0F69E811D6B8C850046BCD6 /* FFMpegSwResample.h */, + D0F69E821D6B8C850046BCD6 /* FFMpegSwResample.m */, + D0F69E831D6B8C850046BCD6 /* FrameworkBundle.swift */, + D0F69E841D6B8C850046BCD6 /* Localizable.swift */, + D0F69E851D6B8C850046BCD6 /* RingBuffer.h */, + D0F69E861D6B8C850046BCD6 /* RingBuffer.m */, + D0F69E871D6B8C850046BCD6 /* RingByteBuffer.swift */, + D0F69EA51D6B8F3E0046BCD6 /* TelegramUIIncludes.h */, + ); + name = "Supporting Files"; + sourceTree = ""; + }; + D0F69E911D6B8C8E0046BCD6 /* Utils */ = { + isa = PBXGroup; + children = ( + D0F69E921D6B8C9B0046BCD6 /* ImageRepresentationsUtils.swift */, + D0F69E931D6B8C9B0046BCD6 /* ProgressiveImage.swift */, + D0F69E941D6B8C9B0046BCD6 /* WebP.swift */, + ); + name = Utils; + sourceTree = ""; + }; + D0F69E9D1D6B8E240046BCD6 /* Resources */ = { + isa = PBXGroup; + children = ( + D0F69E9E1D6B8E380046BCD6 /* FileResources.swift */, + D0F69E9F1D6B8E380046BCD6 /* PhotoResources.swift */, + D0F69EA01D6B8E380046BCD6 /* StickerResources.swift */, + ); + name = Resources; + sourceTree = ""; + }; D0FC40751D5B8E7400261D9D = { isa = PBXGroup; children = ( + D0F69DB91D6B88190046BCD6 /* TelegramUI.xcconfig */, D0AB0BBA1D6719B5002C78E7 /* Images.xcassets */, D0FC40811D5B8E7400261D9D /* TelegramUI */, D0FC408C1D5B8E7500261D9D /* TelegramUITests */, @@ -120,6 +735,12 @@ D0FC40811D5B8E7400261D9D /* TelegramUI */ = { isa = PBXGroup; children = ( + D0F69E911D6B8C8E0046BCD6 /* Utils */, + D0F69DBB1D6B88330046BCD6 /* Media */, + D0F69DBD1D6B897A0046BCD6 /* Components */, + D0F69DE61D6B8A4E0046BCD6 /* Controllers */, + D0F69CCE1D6B87950046BCD6 /* Files */, + D0F69E7E1D6B8C500046BCD6 /* Supporting Files */, D0FC40821D5B8E7400261D9D /* TelegramUI.h */, D0FC40831D5B8E7400261D9D /* Info.plist */, ); @@ -142,7 +763,11 @@ isa = PBXHeadersBuildPhase; buildActionMask = 2147483647; files = ( + D0F69E8E1D6B8C850046BCD6 /* RingBuffer.h in Headers */, D0FC40901D5B8E7500261D9D /* TelegramUI.h in Headers */, + D0F69E9A1D6B8D200046BCD6 /* UIImage+WebP.h in Headers */, + D0F69E8A1D6B8C850046BCD6 /* FFMpegSwResample.h in Headers */, + D0F69E881D6B8C850046BCD6 /* FastBlur.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -198,6 +823,7 @@ D0FC407E1D5B8E7400261D9D = { CreatedOnToolsVersion = 8.0; DevelopmentTeam = X834Q8SBVP; + LastSwiftMigration = 0800; ProvisioningStyle = Manual; }; D0FC40871D5B8E7500261D9D = { @@ -231,6 +857,7 @@ buildActionMask = 2147483647; files = ( D0AB0BBB1D6719B5002C78E7 /* Images.xcassets in Resources */, + D0F69DBA1D6B88190046BCD6 /* TelegramUI.xcconfig in Resources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -248,6 +875,124 @@ isa = PBXSourcesBuildPhase; buildActionMask = 2147483647; files = ( + D0F69E3C1D6B8B030046BCD6 /* ChatMessageTextBubbleContentNode.swift in Sources */, + D0F69E171D6B8ACF0046BCD6 /* ChatHistoryLocation.swift in Sources */, + D0F69E741D6B8C340046BCD6 /* ContactsControllerNode.swift in Sources */, + D0F69EA21D6B8E380046BCD6 /* PhotoResources.swift in Sources */, + D0F69DC71D6B89E70046BCD6 /* TransformImageNode.swift in Sources */, + D0F69E341D6B8B030046BCD6 /* ChatMessageForwardInfoNode.swift in Sources */, + D0F69E561D6B8BDA0046BCD6 /* GalleryControllerNode.swift in Sources */, + D0F69E4D1D6B8BB20046BCD6 /* ChatMediaActionSheetRollItem.swift in Sources */, + D0F69E661D6B8BF90046BCD6 /* ZoomableContentGalleryItemNode.swift in Sources */, + D0F69DF01D6B8A6C0046BCD6 /* AuthorizationCodeControllerNode.swift in Sources */, + D0F69EA11D6B8E380046BCD6 /* FileResources.swift in Sources */, + D0F69D271D6B87D30046BCD6 /* FFMpegAudioFrameDecoder.swift in Sources */, + D0F69D521D6B87D30046BCD6 /* MediaPlayer.swift in Sources */, + D0F69E031D6B8A880046BCD6 /* ChatListItem.swift in Sources */, + D0F69E081D6B8A9C0046BCD6 /* ChatListSearchContainerNode.swift in Sources */, + D0F69E4C1D6B8BB20046BCD6 /* ChatMediaActionSheetController.swift in Sources */, + D0F69DA41D6B87EC0046BCD6 /* FFMpegMediaVideoFrameDecoder.swift in Sources */, + D0F69E161D6B8ACF0046BCD6 /* ChatHistoryEntry.swift in Sources */, + D0F69DE01D6B8A420046BCD6 /* ListControllerButtonItem.swift in Sources */, + D0F69E0C1D6B8AB10046BCD6 /* HorizontalPeerItem.swift in Sources */, + D0F69E551D6B8BDA0046BCD6 /* GalleryController.swift in Sources */, + D0F69E571D6B8BDA0046BCD6 /* GalleryItem.swift in Sources */, + D0F69E951D6B8C9B0046BCD6 /* ImageRepresentationsUtils.swift in Sources */, + D0F69D231D6B87D30046BCD6 /* FFMpegMediaFrameSourceContext.swift in Sources */, + D0F69E431D6B8B7E0046BCD6 /* ResizeableTextInputView.swift in Sources */, + D0F69E8D1D6B8C850046BCD6 /* Localizable.swift in Sources */, + D0F69E651D6B8BF90046BCD6 /* ChatVideoGalleryItemScrubberView.swift in Sources */, + D0F69E421D6B8B7E0046BCD6 /* ChatInputView.swift in Sources */, + D0F69E1A1D6B8AE60046BCD6 /* ChatHoleItem.swift in Sources */, + D0F69D9C1D6B87EC0046BCD6 /* MediaPlaybackData.swift in Sources */, + D0F69D241D6B87D30046BCD6 /* MediaPlayerAudioRenderer.swift in Sources */, + D0F69D4B1D6B87D30046BCD6 /* TouchDownGestureRecognizer.swift in Sources */, + D0F69E3D1D6B8B030046BCD6 /* ChatMessageWebpageBubbleContentNode.swift in Sources */, + D0F69E8B1D6B8C850046BCD6 /* FFMpegSwResample.m in Sources */, + D0F69DD21D6B8A0D0046BCD6 /* SearchDisplayControllerContentNode.swift in Sources */, + D0F69DC51D6B89E10046BCD6 /* RadialProgressNode.swift in Sources */, + D0F69E491D6B8BAC0046BCD6 /* ActionSheetRollImageItem.swift in Sources */, + D0F69E761D6B8C340046BCD6 /* ContactsSearchContainerNode.swift in Sources */, + D0F69E011D6B8A880046BCD6 /* ChatListEmptyItem.swift in Sources */, + D0F69E591D6B8BDA0046BCD6 /* GalleryPagerNode.swift in Sources */, + D0F69E391D6B8B030046BCD6 /* ChatMessageMediaBubbleContentNode.swift in Sources */, + D0F69D351D6B87D30046BCD6 /* MediaFrameSource.swift in Sources */, + D0F69E371D6B8B030046BCD6 /* ChatMessageItem.swift in Sources */, + D0F69E641D6B8BF90046BCD6 /* ChatVideoGalleryItem.swift in Sources */, + D0F69E351D6B8B030046BCD6 /* ChatMessageInteractiveFileNode.swift in Sources */, + D0F69E151D6B8ACF0046BCD6 /* ChatControllerNode.swift in Sources */, + D0F69E001D6B8A880046BCD6 /* ChatListControllerNode.swift in Sources */, + D0F69EA31D6B8E380046BCD6 /* StickerResources.swift in Sources */, + D0F69E961D6B8C9B0046BCD6 /* ProgressiveImage.swift in Sources */, + D0F69E621D6B8BF90046BCD6 /* ChatHoleGalleryItem.swift in Sources */, + D0F69E331D6B8B030046BCD6 /* ChatMessageFileBubbleContentNode.swift in Sources */, + D0F69E461D6B8B950046BCD6 /* ChatHistoryNavigationButtonNode.swift in Sources */, + D0F69D671D6B87D30046BCD6 /* FFMpegPacket.swift in Sources */, + D0F69E321D6B8B030046BCD6 /* ChatMessageDateAndStatusNode.swift in Sources */, + D0F69E041D6B8A880046BCD6 /* ChatListSearchItem.swift in Sources */, + D0F69E611D6B8BF90046BCD6 /* ChatDocumentGalleryItem.swift in Sources */, + D0F69E0A1D6B8AA60046BCD6 /* ChatListSearchRecentPeersNode.swift in Sources */, + D0F69E3E1D6B8B030046BCD6 /* ChatUnreadItem.swift in Sources */, + D0F69D771D6B87DF0046BCD6 /* FFMpegMediaPassthroughVideoFrameDecoder.swift in Sources */, + D0F69DFE1D6B8A880046BCD6 /* ChatListAvatarNode.swift in Sources */, + D0F69E9B1D6B8D200046BCD6 /* UIImage+WebP.m in Sources */, + D0F69E581D6B8BDA0046BCD6 /* GalleryItemNode.swift in Sources */, + D0F69DAD1D6B87EC0046BCD6 /* Cache.swift in Sources */, + D0F69E971D6B8C9B0046BCD6 /* WebP.swift in Sources */, + D0F69E2F1D6B8B030046BCD6 /* ChatMessageBubbleContentCalclulateImageCorners.swift in Sources */, + D0F69E361D6B8B030046BCD6 /* ChatMessageInteractiveMediaNode.swift in Sources */, + D0F69E381D6B8B030046BCD6 /* ChatMessageItemView.swift in Sources */, + D0F69E901D6B8C850046BCD6 /* RingByteBuffer.swift in Sources */, + D0F69E731D6B8C340046BCD6 /* ContactsController.swift in Sources */, + D0F69D261D6B87D30046BCD6 /* MediaManager.swift in Sources */, + D0F69D2C1D6B87D30046BCD6 /* MediaPlayerNode.swift in Sources */, + D0F69E311D6B8B030046BCD6 /* ChatMessageBubbleItemNode.swift in Sources */, + D0F69E021D6B8A880046BCD6 /* ChatListHoleItem.swift in Sources */, + D0F69DF51D6B8A6C0046BCD6 /* AuthorizationPhoneControllerNode.swift in Sources */, + D0F69E751D6B8C340046BCD6 /* ContactsPeerItem.swift in Sources */, + D0F69DD61D6B8A2D0046BCD6 /* AlertController.swift in Sources */, + D0F69E7D1D6B8C470046BCD6 /* SettingsController.swift in Sources */, + D0F69E8C1D6B8C850046BCD6 /* FrameworkBundle.swift in Sources */, + D0F69D661D6B87D30046BCD6 /* FFMpegMediaFrameSourceContextHelpers.swift in Sources */, + D0F69DD11D6B8A0D0046BCD6 /* SearchDisplayController.swift in Sources */, + D0F69DF21D6B8A6C0046BCD6 /* AuthorizationPasswordController.swift in Sources */, + D0F69E8F1D6B8C850046BCD6 /* RingBuffer.m in Sources */, + D0F69DF31D6B8A6C0046BCD6 /* AuthorizationPasswordControllerNode.swift in Sources */, + D0F69E131D6B8ACF0046BCD6 /* ChatController.swift in Sources */, + D0F69DFF1D6B8A880046BCD6 /* ChatListController.swift in Sources */, + D0F69DF11D6B8A6C0046BCD6 /* AuthorizationController.swift in Sources */, + D0F69E891D6B8C850046BCD6 /* FastBlur.m in Sources */, + D0F69E7C1D6B8C470046BCD6 /* SettingsAccountInfoItem.swift in Sources */, + D0F69E6A1D6B8C160046BCD6 /* MapInputController.swift in Sources */, + D0F69DE51D6B8A420046BCD6 /* ListControllerSpacerItem.swift in Sources */, + D0F69DE11D6B8A420046BCD6 /* ListControllerDisclosureActionItem.swift in Sources */, + D0F69E301D6B8B030046BCD6 /* ChatMessageBubbleContentNode.swift in Sources */, + D0F69DC31D6B89DA0046BCD6 /* TextNode.swift in Sources */, + D0F69DC11D6B89D30046BCD6 /* ListSectionHeaderNode.swift in Sources */, + D0F69E771D6B8C340046BCD6 /* ContactsSectionHeaderAccessoryItem.swift in Sources */, + D0F69E631D6B8BF90046BCD6 /* ChatImageGalleryItem.swift in Sources */, + D0F69E3B1D6B8B030046BCD6 /* ChatMessageStickerItemNode.swift in Sources */, + D0F69DEF1D6B8A6C0046BCD6 /* AuthorizationCodeController.swift in Sources */, + D0F69DF41D6B8A6C0046BCD6 /* AuthorizationPhoneController.swift in Sources */, + D0F69D781D6B87DF0046BCD6 /* MediaTrackFrameBuffer.swift in Sources */, + D0F69DD01D6B8A0D0046BCD6 /* SearchBarPlaceholderNode.swift in Sources */, + D0F69E781D6B8C340046BCD6 /* ContactsVCardItem.swift in Sources */, + D0F69DA51D6B87EC0046BCD6 /* MediaTrackFrameDecoder.swift in Sources */, + D0F69E2D1D6B8B030046BCD6 /* ChatMessageActionItemNode.swift in Sources */, + D0F69DE21D6B8A420046BCD6 /* ListControllerGroupableItem.swift in Sources */, + D0F69D791D6B87DF0046BCD6 /* MediaTrackFrame.swift in Sources */, + D0F69DC91D6B89EB0046BCD6 /* ImageNode.swift in Sources */, + D0F69D311D6B87D30046BCD6 /* FFMpegMediaFrameSource.swift in Sources */, + D0F69DDF1D6B8A420046BCD6 /* ListController.swift in Sources */, + D0F69E3A1D6B8B030046BCD6 /* ChatMessageReplyInfoNode.swift in Sources */, + D0F69DE31D6B8A420046BCD6 /* ListControllerItem.swift in Sources */, + D0F69E6B1D6B8C160046BCD6 /* MapInputControllerNode.swift in Sources */, + D0F69DCF1D6B8A0D0046BCD6 /* SearchBarNode.swift in Sources */, + D0F69DE41D6B8A420046BCD6 /* ListControllerNode.swift in Sources */, + D0F69E2E1D6B8B030046BCD6 /* ChatMessageAvatarAccessoryItem.swift in Sources */, + D0F69D2E1D6B87D30046BCD6 /* PeerAvatar.swift in Sources */, + D0F69E141D6B8ACF0046BCD6 /* ChatControllerInteraction.swift in Sources */, + D0F69D6D1D6B87D30046BCD6 /* MediaTrackDecodableFrame.swift in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -272,6 +1017,7 @@ /* Begin XCBuildConfiguration section */ D0400EDB1D5B900A007931CE /* Hockeyapp */ = { isa = XCBuildConfiguration; + baseConfigurationReference = D0F69DB91D6B88190046BCD6 /* TelegramUI.xcconfig */; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; CLANG_ANALYZER_NONNULL = YES; @@ -318,8 +1064,10 @@ }; D0400EDC1D5B900A007931CE /* Hockeyapp */ = { isa = XCBuildConfiguration; + baseConfigurationReference = D0F69DB91D6B88190046BCD6 /* TelegramUI.xcconfig */; buildSettings = { APPLICATION_EXTENSION_API_ONLY = YES; + CLANG_ENABLE_MODULES = YES; CODE_SIGN_IDENTITY = ""; "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Distribution"; DEFINES_MODULE = YES; @@ -331,6 +1079,11 @@ INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; IPHONEOS_DEPLOYMENT_TARGET = 8.0; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + LIBRARY_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/third-party/libwebp/lib", + "$(PROJECT_DIR)/third-party/FFmpeg-iOS/lib", + ); OTHER_LDFLAGS = "-ObjC"; PRODUCT_BUNDLE_IDENTIFIER = org.telegram.TelegramUI; PRODUCT_NAME = "$(TARGET_NAME)"; @@ -341,7 +1094,9 @@ }; D0400EDD1D5B900A007931CE /* Hockeyapp */ = { isa = XCBuildConfiguration; + baseConfigurationReference = D0F69DB91D6B88190046BCD6 /* TelegramUI.xcconfig */; buildSettings = { + ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES; DEVELOPMENT_TEAM = X834Q8SBVP; INFOPLIST_FILE = TelegramUITests/Info.plist; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; @@ -353,6 +1108,7 @@ }; D0FC40911D5B8E7500261D9D /* Debug */ = { isa = XCBuildConfiguration; + baseConfigurationReference = D0F69DB91D6B88190046BCD6 /* TelegramUI.xcconfig */; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; CLANG_ANALYZER_NONNULL = YES; @@ -406,6 +1162,7 @@ }; D0FC40921D5B8E7500261D9D /* Release */ = { isa = XCBuildConfiguration; + baseConfigurationReference = D0F69DB91D6B88190046BCD6 /* TelegramUI.xcconfig */; buildSettings = { ALWAYS_SEARCH_USER_PATHS = NO; CLANG_ANALYZER_NONNULL = YES; @@ -452,8 +1209,10 @@ }; D0FC40941D5B8E7500261D9D /* Debug */ = { isa = XCBuildConfiguration; + baseConfigurationReference = D0F69DB91D6B88190046BCD6 /* TelegramUI.xcconfig */; buildSettings = { APPLICATION_EXTENSION_API_ONLY = YES; + CLANG_ENABLE_MODULES = YES; CODE_SIGN_IDENTITY = ""; "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Developer"; DEFINES_MODULE = YES; @@ -465,18 +1224,26 @@ INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; IPHONEOS_DEPLOYMENT_TARGET = 8.0; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + LIBRARY_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/third-party/libwebp/lib", + "$(PROJECT_DIR)/third-party/FFmpeg-iOS/lib", + ); OTHER_LDFLAGS = "-ObjC"; PRODUCT_BUNDLE_IDENTIFIER = org.telegram.TelegramUI; PRODUCT_NAME = "$(TARGET_NAME)"; SKIP_INSTALL = YES; + SWIFT_OPTIMIZATION_LEVEL = "-Onone"; SWIFT_VERSION = 3.0; }; name = Debug; }; D0FC40951D5B8E7500261D9D /* Release */ = { isa = XCBuildConfiguration; + baseConfigurationReference = D0F69DB91D6B88190046BCD6 /* TelegramUI.xcconfig */; buildSettings = { APPLICATION_EXTENSION_API_ONLY = YES; + CLANG_ENABLE_MODULES = YES; CODE_SIGN_IDENTITY = ""; "CODE_SIGN_IDENTITY[sdk=iphoneos*]" = "iPhone Distribution"; DEFINES_MODULE = YES; @@ -488,6 +1255,11 @@ INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks"; IPHONEOS_DEPLOYMENT_TARGET = 8.0; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; + LIBRARY_SEARCH_PATHS = ( + "$(inherited)", + "$(PROJECT_DIR)/third-party/libwebp/lib", + "$(PROJECT_DIR)/third-party/FFmpeg-iOS/lib", + ); OTHER_LDFLAGS = "-ObjC"; PRODUCT_BUNDLE_IDENTIFIER = org.telegram.TelegramUI; PRODUCT_NAME = "$(TARGET_NAME)"; @@ -498,7 +1270,9 @@ }; D0FC40971D5B8E7500261D9D /* Debug */ = { isa = XCBuildConfiguration; + baseConfigurationReference = D0F69DB91D6B88190046BCD6 /* TelegramUI.xcconfig */; buildSettings = { + ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES; DEVELOPMENT_TEAM = X834Q8SBVP; INFOPLIST_FILE = TelegramUITests/Info.plist; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; @@ -510,7 +1284,9 @@ }; D0FC40981D5B8E7500261D9D /* Release */ = { isa = XCBuildConfiguration; + baseConfigurationReference = D0F69DB91D6B88190046BCD6 /* TelegramUI.xcconfig */; buildSettings = { + ALWAYS_EMBED_SWIFT_STANDARD_LIBRARIES = YES; DEVELOPMENT_TEAM = X834Q8SBVP; INFOPLIST_FILE = TelegramUITests/Info.plist; LD_RUNPATH_SEARCH_PATHS = "$(inherited) @executable_path/Frameworks @loader_path/Frameworks"; diff --git a/TelegramUI.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/TelegramUI.xcscheme b/TelegramUI.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/TelegramUI.xcscheme index 13207a4343..d3e17a9527 100644 --- a/TelegramUI.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/TelegramUI.xcscheme +++ b/TelegramUI.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/TelegramUI.xcscheme @@ -5,6 +5,22 @@ + + + + + + + + + + @@ -35,6 +60,15 @@ savedToolIdentifier = "" useCustomWorkingDirectory = "NO" debugDocumentVersioning = "YES"> + + + + diff --git a/TelegramUI.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/xcschememanagement.plist b/TelegramUI.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/xcschememanagement.plist index dd1147e83f..bffa9bd7a0 100644 --- a/TelegramUI.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/xcschememanagement.plist +++ b/TelegramUI.xcodeproj/xcuserdata/peter.xcuserdatad/xcschemes/xcschememanagement.plist @@ -7,7 +7,7 @@ TelegramUI.xcscheme orderHint - 1 + 19 SuppressBuildableAutocreation diff --git a/TelegramUI/ActionSheetRollImageItem.swift b/TelegramUI/ActionSheetRollImageItem.swift new file mode 100644 index 0000000000..5c2da247a4 --- /dev/null +++ b/TelegramUI/ActionSheetRollImageItem.swift @@ -0,0 +1,69 @@ +import Foundation +import Display +import AsyncDisplayKit +import Photos + +private let testBackground = generateStretchableFilledCircleImage(radius: 8.0, color: UIColor.lightGray) + +final class ActionSheetRollImageItem: ListViewItem { + let asset: PHAsset + + init(asset: PHAsset) { + self.asset = asset + } + + func nodeConfiguredForWidth(async: @escaping (@escaping () -> Void) -> Void, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, completion: @escaping (ListViewItemNode, @escaping () -> Void) -> Void) { + async { + let node = ActionSheetRollImageItemNode() + node.contentSize = CGSize(width: 84.0, height: 84.0) + node.insets = UIEdgeInsets(top: 4.0, left: 0.0, bottom: 4.0, right: 0.0) + node.updateAsset(asset: self.asset) + completion(node, { + }) + } + } + + func updateNode(async: (() -> Void) -> Void, node: ListViewItemNode, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, animation: ListViewItemUpdateAnimation, completion: (ListViewItemNodeLayout, () -> Void) -> Void) { + completion(ListViewItemNodeLayout(contentSize: node.contentSize, insets: node.insets), { + }) + } +} + +private final class ActionSheetRollImageItemNode: ListViewItemNode { + private let imageNode: ASImageNode + + init() { + self.imageNode = ASImageNode() + + self.imageNode.frame = CGRect(origin: CGPoint(), size: CGSize(width: 84.0, height: 84.0)) + self.imageNode.displaysAsynchronously = true + self.imageNode.clipsToBounds = true + self.imageNode.cornerRadius = 8.0 + //self.imageNode.contentMode = .scaleToFill + //self.imageNode.contentsScale = UIScreenScale + + super.init(layerBacked: false, dynamicBounce: false) + + self.addSubnode(self.imageNode) + } + + func updateAsset(asset: PHAsset) { + let retinaSquare = CGSize(width: 84.0 * UIScreenScale, height: 84.0 * UIScreenScale) + + let cropToSquare = PHImageRequestOptions() + cropToSquare.resizeMode = .exact; + + let cropSideLength = min(asset.pixelWidth, asset.pixelHeight) + let square = CGRect(x: 0.0, y: 0.0, width: CGFloat(cropSideLength), height: CGFloat(cropSideLength)) + let cropRect = square.applying(CGAffineTransform(scaleX: 1.0 / CGFloat(asset.pixelWidth), y: 1.0 / CGFloat(asset.pixelHeight))) + + cropToSquare.normalizedCropRect = cropRect + + PHImageManager.default().requestImage(for: asset, targetSize: retinaSquare, contentMode: .aspectFit, options: cropToSquare, resultHandler: { [weak self] image, result in + if let strongSelf = self, let image = image, let cgImage = image.cgImage { + let orientedImage = UIImage(cgImage: cgImage, scale: image.scale, orientation: .right) + strongSelf.imageNode.image = orientedImage + } + }) + } +} diff --git a/TelegramUI/AlertController.swift b/TelegramUI/AlertController.swift new file mode 100644 index 0000000000..5656c9b853 --- /dev/null +++ b/TelegramUI/AlertController.swift @@ -0,0 +1,7 @@ +import Foundation +import Display +import AsyncDisplayKit + +class AlertController { + +} diff --git a/TelegramUI/AuthorizationCodeController.swift b/TelegramUI/AuthorizationCodeController.swift new file mode 100644 index 0000000000..23f996ef68 --- /dev/null +++ b/TelegramUI/AuthorizationCodeController.swift @@ -0,0 +1,84 @@ +import Foundation +import Display +import SwiftSignalKit +import MtProtoKit +import TelegramCore + +enum AuthorizationCodeResult { + case Authorization(Api.auth.Authorization) + case Password +} + +class AuthorizationCodeController: ViewController { + let account: UnauthorizedAccount + let phone: String + let sentCode: Api.auth.SentCode + + var node: AuthorizationCodeControllerNode { + return self.displayNode as! AuthorizationCodeControllerNode + } + + let signInDisposable = MetaDisposable() + let resultPipe = ValuePipe() + var result: Signal { + return resultPipe.signal() + } + + init(account: UnauthorizedAccount, phone: String, sentCode: Api.auth.SentCode) { + self.account = account + self.phone = phone + self.sentCode = sentCode + + super.init() + + self.title = "Code" + self.navigationItem.leftBarButtonItem = UIBarButtonItem(title: "Next", style: .done, target: self, action: #selector(AuthorizationCodeController.nextPressed)) + } + + required init(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + deinit { + self.signInDisposable.dispose() + } + + override func loadDisplayNode() { + self.displayNode = AuthorizationCodeControllerNode() + self.displayNodeDidLoad() + } + + override func containerLayoutUpdated(_ layout: ContainerViewLayout, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, transition: transition) + + self.node.containerLayoutUpdated(layout, navigationBarHeight: self.navigationBar.frame.maxY, transition: transition) + } + + @objc func nextPressed() { + var phoneCodeHash: String? + switch self.sentCode { + case let .sentCode(_, _, apiPhoneCodeHash, _, _): + phoneCodeHash = apiPhoneCodeHash + default: + break + } + if let phoneCodeHash = phoneCodeHash { + let signal = self.account.network.request(Api.functions.auth.signIn(phoneNumber: self.phone, phoneCodeHash: phoneCodeHash, phoneCode: node.codeNode.attributedText?.string ?? "")) |> map { authorization in + return AuthorizationCodeResult.Authorization(authorization) + } |> `catch` { error -> Signal in + switch (error.errorCode, error.errorDescription) { + case (401, "SESSION_PASSWORD_NEEDED"): + return .single(.Password) + case _: + return .fail(error) + } + } + + self.signInDisposable.set(signal.start(next: { [weak self] result in + if let strongSelf = self { + strongSelf.resultPipe.putNext(result) + } + })) + } + } +} diff --git a/TelegramUI/AuthorizationCodeControllerNode.swift b/TelegramUI/AuthorizationCodeControllerNode.swift new file mode 100644 index 0000000000..46b5343006 --- /dev/null +++ b/TelegramUI/AuthorizationCodeControllerNode.swift @@ -0,0 +1,21 @@ +import Foundation +import Display +import AsyncDisplayKit + +class AuthorizationCodeControllerNode: ASDisplayNode { + let codeNode: ASEditableTextNode + + override init() { + self.codeNode = ASEditableTextNode() + + super.init() + + self.codeNode.typingAttributes = [NSFontAttributeName: Font.regular(17.0)] + self.codeNode.backgroundColor = UIColor.lightGray + self.addSubnode(self.codeNode) + } + + func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + self.codeNode.frame = CGRect(origin: CGPoint(x: 4.0, y: navigationBarHeight + 4.0), size: CGSize(width: layout.size.width - 8.0, height: 32.0)) + } +} diff --git a/TelegramUI/AuthorizationController.swift b/TelegramUI/AuthorizationController.swift new file mode 100644 index 0000000000..d324c10f59 --- /dev/null +++ b/TelegramUI/AuthorizationController.swift @@ -0,0 +1,84 @@ +import Foundation +import Display +import SwiftSignalKit +import TelegramCore + +public class AuthorizationController: NavigationController { + private var account: UnauthorizedAccount! + + private let authorizedAccountValue = Promise() + public var authorizedAccount: Signal { + return authorizedAccountValue.get() + } + + public init(account: UnauthorizedAccount) { + self.account = account + let phoneController = AuthorizationPhoneController(account: account) + + super.init() + + self.pushViewController(phoneController, animated: false) + + let authorizationSequence = phoneController.result |> mapToSignal { (account, sentCode, phone) -> Signal in + return deferred { [weak self] in + if let strongSelf = self { + strongSelf.account = account + let codeController = AuthorizationCodeController(account: account, phone: phone, sentCode: sentCode) + strongSelf.pushViewController(codeController, animated: true) + + return codeController.result |> mapToSignal { result -> Signal in + switch result { + case let .Authorization(authorization): + return single(authorization, NoError.self) + case .Password: + return deferred { [weak self] () -> Signal in + if let strongSelf = self { + let passwordController = AuthorizationPasswordController(account: account) + strongSelf.pushViewController(passwordController, animated: true) + + return passwordController.result + } else { + return complete(Api.auth.Authorization.self, NoError.self) + } + } |> runOn(Queue.mainQueue()) + } + } + } else { + return complete(Api.auth.Authorization.self, NoError.self) + } + } |> runOn(Queue.mainQueue()) + } + + let accountSignal = authorizationSequence |> mapToSignal { [weak self] authorization -> Signal in + if let strongSelf = self { + switch authorization { + case let .authorization(user): + let user = TelegramUser(user: user) + + return account.postbox.modify { modifier -> AccountState in + let state = AuthorizedAccountState(masterDatacenterId: strongSelf.account.masterDatacenterId, peerId: user.id, state: nil) + modifier.setState(state) + return state + } |> map { state -> Account in + return Account(id: account.id, postbox: account.postbox, network: account.network, peerId: user.id) + } + } + } else { + return .complete() + } + } + + self.authorizedAccountValue.set(accountSignal) + } + + override public init(nibName nibNameOrNil: String?, bundle nibBundleOrNil: Bundle?) { + super.init(nibName: nibNameOrNil, bundle: nibBundleOrNil) + } + + required public init(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + deinit { + } +} diff --git a/TelegramUI/AuthorizationPasswordController.swift b/TelegramUI/AuthorizationPasswordController.swift new file mode 100644 index 0000000000..39971e0e0a --- /dev/null +++ b/TelegramUI/AuthorizationPasswordController.swift @@ -0,0 +1,57 @@ +import Foundation +import Display +import SwiftSignalKit +import MtProtoKit +import TelegramCore + +class AuthorizationPasswordController: ViewController { + private var account: UnauthorizedAccount + + private var node: AuthorizationPasswordControllerNode { + return self.displayNode as! AuthorizationPasswordControllerNode + } + + private let signInDisposable = MetaDisposable() + private let resultPipe = ValuePipe() + var result: Signal { + return resultPipe.signal() + } + + init(account: UnauthorizedAccount) { + self.account = account + + super.init() + + self.title = "Password" + self.navigationItem.leftBarButtonItem = UIBarButtonItem(title: "Next", style: .done, target: self, action: #selector(AuthorizationPasswordController.nextPressed)) + } + + required init(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + deinit { + signInDisposable.dispose() + } + + override func loadDisplayNode() { + self.displayNode = AuthorizationPasswordControllerNode() + self.displayNodeDidLoad() + } + + override func containerLayoutUpdated(_ layout: ContainerViewLayout, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, transition: transition) + + self.node.containerLayoutUpdated(layout, navigationBarHeight: self.navigationBar.frame.maxY, transition: transition) + } + + @objc func nextPressed() { + let password = self.node.passwordNode.attributedText?.string ?? "" + + self.signInDisposable.set(verifyPassword(self.account, password: password).start(next: { [weak self] result in + if let strongSelf = self { + strongSelf.resultPipe.putNext(result) + } + })) + } +} diff --git a/TelegramUI/AuthorizationPasswordControllerNode.swift b/TelegramUI/AuthorizationPasswordControllerNode.swift new file mode 100644 index 0000000000..6b4ddc10d5 --- /dev/null +++ b/TelegramUI/AuthorizationPasswordControllerNode.swift @@ -0,0 +1,21 @@ +import Foundation +import Display +import AsyncDisplayKit + +class AuthorizationPasswordControllerNode: ASDisplayNode { + let passwordNode: ASEditableTextNode + + override init() { + self.passwordNode = ASEditableTextNode() + + super.init() + + self.passwordNode.typingAttributes = [NSFontAttributeName: Font.regular(17.0)] + self.passwordNode.backgroundColor = UIColor.lightGray + self.addSubnode(self.passwordNode) + } + + func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + self.passwordNode.frame = CGRect(origin: CGPoint(x: 4.0, y: navigationBarHeight + 4.0), size: CGSize(width: layout.size.width - 8.0, height: 32.0)) + } +} diff --git a/TelegramUI/AuthorizationPhoneController.swift b/TelegramUI/AuthorizationPhoneController.swift new file mode 100644 index 0000000000..9f6ed69ab8 --- /dev/null +++ b/TelegramUI/AuthorizationPhoneController.swift @@ -0,0 +1,75 @@ +import Foundation +import Display +import SwiftSignalKit +import MtProtoKit +import TelegramCore + +class AuthorizationPhoneController: ViewController { + private var account: UnauthorizedAccount + + private var node: AuthorizationPhoneControllerNode { + return self.displayNode as! AuthorizationPhoneControllerNode + } + + private let codeDisposable = MetaDisposable() + private let resultPipe = ValuePipe<(UnauthorizedAccount, Api.auth.SentCode, String)>() + var result: Signal<(UnauthorizedAccount, Api.auth.SentCode, String), NoError> { + return resultPipe.signal() + } + + init(account: UnauthorizedAccount) { + self.account = account + + super.init() + + self.title = "Telegram" + self.navigationItem.leftBarButtonItem = UIBarButtonItem(title: "Next", style: .done, target: self, action: #selector(AuthorizationPhoneController.nextPressed)) + } + + required init(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + deinit { + codeDisposable.dispose() + } + + override func loadDisplayNode() { + self.displayNode = AuthorizationPhoneControllerNode() + self.displayNodeDidLoad() + } + + override func containerLayoutUpdated(_ layout: ContainerViewLayout, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, transition: transition) + + self.node.containerLayoutUpdated(layout, navigationBarHeight: self.navigationBar.frame.maxY, transition: transition) + } + + @objc func nextPressed() { + let phone = self.node.phoneNode.attributedText?.string ?? "" + let account = self.account + let sendCode = Api.functions.auth.sendCode(flags: 0, phoneNumber: phone, currentNumber: nil, apiId: 10840, apiHash: "33c45224029d59cb3ad0c16134215aeb", langCode: "en") + + let signal = account.network.request(sendCode) + |> map { result in + return (result, account) + } |> `catch` { error -> Signal<(Api.auth.SentCode, UnauthorizedAccount), MTRpcError> in + switch error.errorDescription { + case Regex("(PHONE_|USER_|NETWORK_)MIGRATE_(\\d+)"): + let range = error.errorDescription.range(of: "MIGRATE_")! + let updatedMasterDatacenterId = Int32(error.errorDescription.substring(from: range.upperBound))! + let updatedAccount = account.changedMasterDatacenterId(updatedMasterDatacenterId) + return updatedAccount.network.request(sendCode) |> map { sentCode in return (sentCode, updatedAccount) } + case _: + return .fail(error) + } + } + + codeDisposable.set(signal.start(next: { [weak self] (result, account) in + if let strongSelf = self { + strongSelf.account = account + strongSelf.resultPipe.putNext((account, result, phone)) + } + })) + } +} diff --git a/TelegramUI/AuthorizationPhoneControllerNode.swift b/TelegramUI/AuthorizationPhoneControllerNode.swift new file mode 100644 index 0000000000..3945d8111d --- /dev/null +++ b/TelegramUI/AuthorizationPhoneControllerNode.swift @@ -0,0 +1,21 @@ +import Foundation +import Display +import AsyncDisplayKit + +class AuthorizationPhoneControllerNode: ASDisplayNode { + let phoneNode: ASEditableTextNode + + override init() { + self.phoneNode = ASEditableTextNode() + + super.init() + + self.phoneNode.typingAttributes = [NSFontAttributeName: Font.regular(17.0)] + self.phoneNode.backgroundColor = UIColor.lightGray + self.addSubnode(self.phoneNode) + } + + func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + self.phoneNode.frame = CGRect(origin: CGPoint(x: 4.0, y: navigationBarHeight + 4.0), size: CGSize(width: layout.size.width - 8.0, height: 32.0)) + } +} diff --git a/TelegramUI/Cache.swift b/TelegramUI/Cache.swift new file mode 100644 index 0000000000..75ec105480 --- /dev/null +++ b/TelegramUI/Cache.swift @@ -0,0 +1,40 @@ +import Foundation +import SwiftSignalKit +import Display +import TelegramCore + +let threadPool = ThreadPool(threadCount: 4, threadPriority: 0.2) + +func cachedCloudFileLocation(_ location: TelegramCloudMediaLocation) -> Signal { + return Signal { subscriber in + assertNotOnMainThread() + switch location.apiInputLocation { + case let .inputFileLocation(volumeId, localId, _): + let path = NSTemporaryDirectory() + "/\(location.datacenterId)_\(volumeId)_\(localId)" + do { + let data = try Data(contentsOf: URL(fileURLWithPath: path), options: .mappedIfSafe) + subscriber.putNext(data) + subscriber.putCompletion() + } catch { + subscriber.putError(NoError()) + } + + case _: + subscriber.putError(NoError()) + } + return ActionDisposable { + + } + } +} + +func cacheCloudFileLocation(_ location: TelegramCloudMediaLocation, data: Data) { + assertNotOnMainThread() + switch location.apiInputLocation { + case let .inputFileLocation(volumeId, localId, _): + let path = NSTemporaryDirectory() + "/\(location.datacenterId)_\(volumeId)_\(localId)" + let _ = try? data.write(to: URL(fileURLWithPath: path), options: [.atomicWrite]) + case _: + break + } +} diff --git a/TelegramUI/ChatController.swift b/TelegramUI/ChatController.swift new file mode 100644 index 0000000000..2ec122fa98 --- /dev/null +++ b/TelegramUI/ChatController.swift @@ -0,0 +1,846 @@ +import Foundation +import UIKit +import Postbox +import SwiftSignalKit +import Display +import AsyncDisplayKit +import TelegramCore + +private enum ChatControllerScrollPosition { + case Unread(index: MessageIndex) + case Index(index: MessageIndex, position: ListViewScrollPosition, directionHint: ListViewScrollToItemDirectionHint, animated: Bool) +} + +private enum ChatHistoryViewUpdateType { + case Initial(fadeIn: Bool) + case Generic(type: ViewUpdateType) +} + +private enum ChatHistoryViewUpdate { + case Loading + case HistoryView(view: MessageHistoryView, type: ChatHistoryViewUpdateType, scrollPosition: ChatControllerScrollPosition?) +} + +private struct ChatHistoryView { + let originalView: MessageHistoryView + let filteredEntries: [ChatHistoryEntry] +} + +private enum ChatHistoryViewTransitionReason { + case Initial(fadeIn: Bool) + case InteractiveChanges + case HoleChanges(filledHoleDirections: [MessageIndex: HoleFillDirection], removeHoleDirections: [MessageIndex: HoleFillDirection]) + case Reload +} + +private struct ChatHistoryViewTransition { + let historyView: ChatHistoryView + let deleteItems: [ListViewDeleteItem] + let insertItems: [ListViewInsertItem] + let updateItems: [ListViewUpdateItem] + let options: ListViewDeleteAndInsertOptions + let scrollToItem: ListViewScrollToItem? + let stationaryItemRange: (Int, Int)? +} + +private func messageHistoryViewForLocation(_ location: ChatHistoryLocation, account: Account, peerId: PeerId, fixedCombinedReadState: CombinedPeerReadState?, tagMask: MessageTags?) -> Signal { + switch location { + case let .Initial(count): + var preloaded = false + var fadeIn = false + return account.viewTracker.aroundUnreadMessageHistoryViewForPeerId(peerId, count: count, tagMask: tagMask) |> map { view, updateType -> ChatHistoryViewUpdate in + if preloaded { + return .HistoryView(view: view, type: .Generic(type: updateType), scrollPosition: nil) + } else { + if let maxReadIndex = view.maxReadIndex { + var targetIndex = 0 + for i in 0 ..< view.entries.count { + if view.entries[i].index >= maxReadIndex { + targetIndex = i + break + } + } + + let maxIndex = min(view.entries.count, targetIndex + count / 2) + if maxIndex >= targetIndex { + for i in targetIndex ..< maxIndex { + if case .HoleEntry = view.entries[i] { + fadeIn = true + return .Loading + } + } + } + + preloaded = true + return .HistoryView(view: view, type: .Initial(fadeIn: fadeIn), scrollPosition: .Unread(index: maxReadIndex)) + } else { + preloaded = true + return .HistoryView(view: view, type: .Initial(fadeIn: fadeIn), scrollPosition: nil) + } + } + } + case let .InitialSearch(messageId, count): + var preloaded = false + var fadeIn = false + return account.viewTracker.aroundIdMessageHistoryViewForPeerId(peerId, count: count, messageId: messageId, tagMask: tagMask) |> map { view, updateType -> ChatHistoryViewUpdate in + if preloaded { + return .HistoryView(view: view, type: .Generic(type: updateType), scrollPosition: nil) + } else { + let anchorIndex = view.anchorIndex + + var targetIndex = 0 + for i in 0 ..< view.entries.count { + if view.entries[i].index >= anchorIndex { + targetIndex = i + break + } + } + + let maxIndex = min(view.entries.count, targetIndex + count / 2) + if maxIndex >= targetIndex { + for i in targetIndex ..< maxIndex { + if case .HoleEntry = view.entries[i] { + fadeIn = true + return .Loading + } + } + } + + preloaded = true + //case Index(index: MessageIndex, position: ListViewScrollPosition, directionHint: ListViewScrollToItemDirectionHint, animated: Bool) + return .HistoryView(view: view, type: .Initial(fadeIn: fadeIn), scrollPosition: .Index(index: anchorIndex, position: .Center(.Bottom), directionHint: .Down, animated: false)) + } + } + case let .Navigation(index, anchorIndex): + trace("messageHistoryViewForLocation navigation \(index.id.id)") + var first = true + return account.viewTracker.aroundMessageHistoryViewForPeerId(peerId, index: index, count: 140, anchorIndex: anchorIndex, fixedCombinedReadState: fixedCombinedReadState, tagMask: tagMask) |> map { view, updateType -> ChatHistoryViewUpdate in + let genericType: ViewUpdateType + if first { + first = false + genericType = ViewUpdateType.UpdateVisible + } else { + genericType = updateType + } + return .HistoryView(view: view, type: .Generic(type: genericType), scrollPosition: nil) + } + case let .Scroll(index, anchorIndex, sourceIndex, scrollPosition, animated): + let directionHint: ListViewScrollToItemDirectionHint = sourceIndex > index ? .Down : .Up + let chatScrollPosition = ChatControllerScrollPosition.Index(index: index, position: scrollPosition, directionHint: directionHint, animated: animated) + var first = true + return account.viewTracker.aroundMessageHistoryViewForPeerId(peerId, index: index, count: 140, anchorIndex: anchorIndex, fixedCombinedReadState: fixedCombinedReadState, tagMask: tagMask) |> map { view, updateType -> ChatHistoryViewUpdate in + let genericType: ViewUpdateType + let scrollPosition: ChatControllerScrollPosition? = first ? chatScrollPosition : nil + if first { + first = false + genericType = ViewUpdateType.UpdateVisible + } else { + genericType = updateType + } + return .HistoryView(view: view, type: .Generic(type: genericType), scrollPosition: scrollPosition) + } + } +} + +private func historyEntriesForView(_ view: MessageHistoryView) -> [ChatHistoryEntry] { + var entries: [ChatHistoryEntry] = [] + + for entry in view.entries { + switch entry { + case let .HoleEntry(hole, _): + entries.append(.HoleEntry(hole)) + case let .MessageEntry(message, _): + entries.append(.MessageEntry(message)) + } + } + + if let maxReadIndex = view.maxReadIndex { + var inserted = false + var i = 0 + let unreadEntry: ChatHistoryEntry = .UnreadEntry(maxReadIndex) + for entry in entries { + if entry > unreadEntry { + entries.insert(unreadEntry, at: i) + inserted = true + + break + } + i += 1 + } + if !inserted { + //entries.append(.UnreadEntry(maxReadIndex)) + } + } + + return entries +} + +private func preparedHistoryViewTransition(from fromView: ChatHistoryView?, to toView: ChatHistoryView, reason: ChatHistoryViewTransitionReason, account: Account, peerId: PeerId, controllerInteraction: ChatControllerInteraction, scrollPosition: ChatControllerScrollPosition?) -> Signal { + return Signal { subscriber in + let updateIndices: [(Int, ChatHistoryEntry)] = [] + //let (deleteIndices, indicesAndItems, updateIndices) = mergeListsStableWithUpdates(leftList: fromView?.filteredEntries ?? [], rightList: toView.filteredEntries) + let (deleteIndices, indicesAndItems) = mergeListsStable(leftList: fromView?.filteredEntries ?? [], rightList: toView.filteredEntries) + + var adjustedDeleteIndices: [ListViewDeleteItem] = [] + let previousCount: Int + if let fromView = fromView { + previousCount = fromView.filteredEntries.count + } else { + previousCount = 0; + } + for index in deleteIndices { + adjustedDeleteIndices.append(ListViewDeleteItem(index: previousCount - 1 - index, directionHint: nil)) + } + + var adjustedIndicesAndItems: [ListViewInsertItem] = [] + var adjustedUpdateItems: [ListViewUpdateItem] = [] + let updatedCount = toView.filteredEntries.count + + var options: ListViewDeleteAndInsertOptions = [] + var maxAnimatedInsertionIndex = -1 + var stationaryItemRange: (Int, Int)? + var scrollToItem: ListViewScrollToItem? + + switch reason { + case let .Initial(fadeIn): + if fadeIn { + let _ = options.insert(.AnimateAlpha) + } else { + let _ = options.insert(.LowLatency) + let _ = options.insert(.Synchronous) + } + case .InteractiveChanges: + let _ = options.insert(.AnimateAlpha) + let _ = options.insert(.AnimateInsertion) + + for (index, _, _) in indicesAndItems.sorted(by: { $0.0 > $1.0 }) { + let adjustedIndex = updatedCount - 1 - index + if adjustedIndex == maxAnimatedInsertionIndex + 1 { + maxAnimatedInsertionIndex += 1 + } + } + case .Reload: + break + case let .HoleChanges(filledHoleDirections, removeHoleDirections): + if let (_, removeDirection) = removeHoleDirections.first { + switch removeDirection { + case .LowerToUpper: + var holeIndex: MessageIndex? + for (index, _) in filledHoleDirections { + if holeIndex == nil || index < holeIndex! { + holeIndex = index + } + } + + if let holeIndex = holeIndex { + for i in 0 ..< toView.filteredEntries.count { + if toView.filteredEntries[i].index >= holeIndex { + let index = toView.filteredEntries.count - 1 - (i - 1) + stationaryItemRange = (index, Int.max) + break + } + } + } + case .UpperToLower: + break + case .AroundIndex: + break + } + } + } + + for (index, entry, previousIndex) in indicesAndItems { + let adjustedIndex = updatedCount - 1 - index + + let adjustedPrevousIndex: Int? + if let previousIndex = previousIndex { + adjustedPrevousIndex = previousCount - 1 - previousIndex + } else { + adjustedPrevousIndex = nil + } + + var directionHint: ListViewItemOperationDirectionHint? + if maxAnimatedInsertionIndex >= 0 && adjustedIndex <= maxAnimatedInsertionIndex { + directionHint = .Down + } + + switch entry { + case let .MessageEntry(message): + adjustedIndicesAndItems.append(ListViewInsertItem(index: adjustedIndex, previousIndex: adjustedPrevousIndex, item: ChatMessageItem(account: account, peerId: peerId, controllerInteraction: controllerInteraction, message: message), directionHint: directionHint)) + case .HoleEntry: + adjustedIndicesAndItems.append(ListViewInsertItem(index: adjustedIndex, previousIndex: adjustedPrevousIndex, item: ChatHoleItem(), directionHint: directionHint)) + case .UnreadEntry: + adjustedIndicesAndItems.append(ListViewInsertItem(index: adjustedIndex, previousIndex: adjustedPrevousIndex, item: ChatUnreadItem(), directionHint: directionHint)) + } + } + + for (index, entry) in updateIndices { + let adjustedIndex = updatedCount - 1 - index + + let directionHint: ListViewItemOperationDirectionHint? = nil + + switch entry { + case let .MessageEntry(message): + adjustedUpdateItems.append(ListViewUpdateItem(index: adjustedIndex, item: ChatMessageItem(account: account, peerId: peerId, controllerInteraction: controllerInteraction, message: message), directionHint: directionHint)) + case .HoleEntry: + adjustedUpdateItems.append(ListViewUpdateItem(index: adjustedIndex, item: ChatHoleItem(), directionHint: directionHint)) + case .UnreadEntry: + adjustedUpdateItems.append(ListViewUpdateItem(index: adjustedIndex, item: ChatUnreadItem(), directionHint: directionHint)) + } + } + + if let scrollPosition = scrollPosition { + switch scrollPosition { + case let .Unread(unreadIndex): + var index = toView.filteredEntries.count - 1 + for entry in toView.filteredEntries { + if case .UnreadEntry = entry { + scrollToItem = ListViewScrollToItem(index: index, position: .Bottom, animated: false, curve: .Default, directionHint: .Down) + break + } + index -= 1 + } + + if scrollToItem == nil { + var index = toView.filteredEntries.count - 1 + for entry in toView.filteredEntries { + if entry.index >= unreadIndex { + scrollToItem = ListViewScrollToItem(index: index, position: .Bottom, animated: false, curve: .Default, directionHint: .Down) + break + } + index -= 1 + } + } + + if scrollToItem == nil { + var index = 0 + for entry in toView.filteredEntries.reversed() { + if entry.index < unreadIndex { + scrollToItem = ListViewScrollToItem(index: index, position: .Bottom, animated: false, curve: .Default, directionHint: .Down) + break + } + index += 1 + } + } + case let .Index(scrollIndex, position, directionHint, animated): + var index = toView.filteredEntries.count - 1 + for entry in toView.filteredEntries { + if entry.index >= scrollIndex { + scrollToItem = ListViewScrollToItem(index: index, position: position, animated: animated, curve: .Default, directionHint: directionHint) + break + } + index -= 1 + } + + if scrollToItem == nil { + var index = 0 + for entry in toView.filteredEntries.reversed() { + if entry.index < scrollIndex { + scrollToItem = ListViewScrollToItem(index: index, position: position, animated: animated, curve: .Default, directionHint: directionHint) + break + } + index += 1 + } + } + } + } + + subscriber.putNext(ChatHistoryViewTransition(historyView: toView, deleteItems: adjustedDeleteIndices, insertItems: adjustedIndicesAndItems, updateItems: adjustedUpdateItems, options: options, scrollToItem: scrollToItem, stationaryItemRange: stationaryItemRange)) + subscriber.putCompletion() + + return EmptyDisposable + } +} + +private func maxIncomingMessageIdForEntries(_ entries: [ChatHistoryEntry], indexRange: (Int, Int)) -> MessageId? { + for i in (indexRange.0 ... indexRange.1).reversed() { + if case let .MessageEntry(message) = entries[i], message.flags.contains(.Incoming) { + return message.id + } + } + return nil +} + +private var useDarkMode = false + +public class ChatController: ViewController { + private var containerLayout = ContainerViewLayout() + + private let account: Account + private let peerId: PeerId + private let messageId: MessageId? + + private var historyView: ChatHistoryView? + + private let peerDisposable = MetaDisposable() + private let historyDisposable = MetaDisposable() + private let readHistoryDisposable = MetaDisposable() + + private let messageViewQueue = Queue() + + private let messageIndexDisposable = MetaDisposable() + + private var enqueuedHistoryViewTransition: (ChatHistoryViewTransition, () -> Void)? + private var layoutActionOnViewTransition: (@escaping () -> Void)? + + private let _ready = Promise() + override public var ready: Promise { + return self._ready + } + private var didSetReady = false + + private let maxVisibleIncomingMessageId = Promise() + private let canReadHistory = Promise() + + private let _chatHistoryLocation = Promise() + private var chatHistoryLocation: Signal { + return self._chatHistoryLocation.get() + } + + private let galleryHiddenMesageAndMediaDisposable = MetaDisposable() + + private var controllerInteraction: ChatControllerInteraction? + + public init(account: Account, peerId: PeerId, messageId: MessageId? = nil) { + self.account = account + self.peerId = peerId + self.messageId = messageId + + super.init() + + self.setupThemeWithDarkMode(useDarkMode) + + self.scrollToTop = { [weak self] in + if let strongSelf = self { + strongSelf._chatHistoryLocation.set(.single(ChatHistoryLocation.Scroll(index: MessageIndex.lowerBound(peerId: strongSelf.peerId), anchorIndex: MessageIndex.lowerBound(peerId: strongSelf.peerId), sourceIndex: MessageIndex.upperBound(peerId: strongSelf.peerId), scrollPosition: .Bottom, animated: true))) + } + } + + let controllerInteraction = ChatControllerInteraction(openMessage: { [weak self] id in + if let strongSelf = self, let historyView = strongSelf.historyView { + var galleryMedia: Media? + for case let .MessageEntry(message) in historyView.filteredEntries where message.id == id { + for media in message.media { + if let file = media as? TelegramMediaFile { + galleryMedia = file + } else if let image = media as? TelegramMediaImage { + galleryMedia = image + } else if let webpage = media as? TelegramMediaWebpage, case let .Loaded(content) = webpage.content { + if let file = content.file { + galleryMedia = file + } else if let image = content.image { + galleryMedia = image + } + } + } + break + } + + if let galleryMedia = galleryMedia { + if let file = galleryMedia as? TelegramMediaFile, file.mimeType == "audio/mpeg" { + debugPlayMedia(account: strongSelf.account, file: file) + } else { + let gallery = GalleryController(account: strongSelf.account, messageId: id) + + strongSelf.galleryHiddenMesageAndMediaDisposable.set(gallery.hiddenMedia.start(next: { [weak strongSelf] messageIdAndMedia in + if let strongSelf = strongSelf { + if let messageIdAndMedia = messageIdAndMedia { + strongSelf.controllerInteraction?.hiddenMedia = [messageIdAndMedia.0: [messageIdAndMedia.1]] + } else { + strongSelf.controllerInteraction?.hiddenMedia = [:] + } + strongSelf.chatDisplayNode.listView.forEachItemNode { itemNode in + if let itemNode = itemNode as? ChatMessageItemView { + itemNode.updateHiddenMedia() + } + } + } + })) + + strongSelf.present(gallery, in: .window, with: GalleryControllerPresentationArguments(transitionNode: { [weak self] messageId, media in + if let strongSelf = self { + var transitionNode: ASDisplayNode? + strongSelf.chatDisplayNode.listView.forEachItemNode { itemNode in + if let itemNode = itemNode as? ChatMessageItemView { + if let result = itemNode.transitionNode(id: messageId, media: media) { + transitionNode = result + } + } + } + return transitionNode + } + return nil + })) + } + } + } + }, testNavigateToMessage: { [weak self] fromId, id in + if let strongSelf = self, let historyView = strongSelf.historyView { + var fromIndex: MessageIndex? + + for case let .MessageEntry(message) in historyView.filteredEntries where message.id == fromId { + fromIndex = MessageIndex(message) + break + } + + if let fromIndex = fromIndex { + var found = false + for case let .MessageEntry(message) in historyView.filteredEntries where message.id == id { + found = true + + strongSelf._chatHistoryLocation.set(.single(ChatHistoryLocation.Scroll(index: MessageIndex(message), anchorIndex: MessageIndex(message), sourceIndex: fromIndex, scrollPosition: .Center(.Bottom), animated: true))) + } + + if !found { + strongSelf.messageIndexDisposable.set((strongSelf.account.postbox.messageIndexAtId(id) |> deliverOnMainQueue).start(next: { [weak strongSelf] index in + if let strongSelf = strongSelf, let index = index { + strongSelf._chatHistoryLocation.set(.single(ChatHistoryLocation.Scroll(index:index, anchorIndex: index, sourceIndex: fromIndex, scrollPosition: .Center(.Bottom), animated: true))) + } + })) + } + } + } + }) + + self.controllerInteraction = controllerInteraction + + let messageViewQueue = self.messageViewQueue + + peerDisposable.set((account.postbox.peerWithId(peerId) + |> deliverOnMainQueue).start(next: { [weak self] peer in + if let strongSelf = self { + strongSelf.title = peer.displayTitle + } + })) + + let fixedCombinedReadState = Atomic(value: nil) + + let historyViewUpdate = self.chatHistoryLocation + |> distinctUntilChanged + |> mapToSignal { location in + return messageHistoryViewForLocation(location, account: account, peerId: peerId, fixedCombinedReadState: fixedCombinedReadState.with { $0 }, tagMask: nil) |> beforeNext { viewUpdate in + switch viewUpdate { + case let .HistoryView(view, _, _): + let _ = fixedCombinedReadState.swap(view.combinedReadState) + default: + break + } + } + } + + let previousView = Atomic(value: nil) + + let historyViewTransition = historyViewUpdate |> mapToQueue { [weak self] update -> Signal in + switch update { + case .Loading: + Queue.mainQueue().async { [weak self] in + if let strongSelf = self { + if !strongSelf.didSetReady { + strongSelf.didSetReady = true + strongSelf._ready.set(.single(true)) + } + } + } + return .complete() + case let .HistoryView(view, type, scrollPosition): + let reason: ChatHistoryViewTransitionReason + var prepareOnMainQueue = false + switch type { + case let .Initial(fadeIn): + reason = ChatHistoryViewTransitionReason.Initial(fadeIn: fadeIn) + prepareOnMainQueue = !fadeIn + case let .Generic(genericType): + switch genericType { + case .InitialUnread: + reason = ChatHistoryViewTransitionReason.Initial(fadeIn: false) + case .Generic: + reason = ChatHistoryViewTransitionReason.InteractiveChanges + case .UpdateVisible: + reason = ChatHistoryViewTransitionReason.Reload + case let .FillHole(insertions, deletions): + reason = ChatHistoryViewTransitionReason.HoleChanges(filledHoleDirections: insertions, removeHoleDirections: deletions) + } + } + + let processedView = ChatHistoryView(originalView: view, filteredEntries: historyEntriesForView(view)) + let previous = previousView.swap(processedView) + + return preparedHistoryViewTransition(from: previous, to: processedView, reason: reason, account: account, peerId: peerId, controllerInteraction: controllerInteraction, scrollPosition: scrollPosition) |> runOn( prepareOnMainQueue ? Queue.mainQueue() : messageViewQueue) + } + } + + let appliedTransition = historyViewTransition |> deliverOnMainQueue |> mapToQueue { [weak self] transition -> Signal in + if let strongSelf = self { + return strongSelf.enqueueHistoryViewTransition(transition) + } + return .complete() + } + + self.historyDisposable.set(appliedTransition.start()) + + let previousMaxIncomingMessageId = Atomic(value: nil) + let readHistory = combineLatest(self.maxVisibleIncomingMessageId.get(), self.canReadHistory.get()) + |> map { messageId, canRead in + if canRead { + var apply = false + let _ = previousMaxIncomingMessageId.modify { previousId in + if previousId == nil || previousId! < messageId { + apply = true + return messageId + } else { + return previousId + } + } + if apply { + let _ = account.postbox.modify({ modifier in + modifier.applyInteractiveReadMaxId(messageId) + }).start() + } + } + } + + self.readHistoryDisposable.set(readHistory.start()) + + if let messageId = messageId { + self._chatHistoryLocation.set(.single(ChatHistoryLocation.InitialSearch(messageId: messageId, count: 60))) + } else { + self._chatHistoryLocation.set(.single(ChatHistoryLocation.Initial(count: 60))) + } + } + + required public init(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + deinit { + self.historyDisposable.dispose() + self.readHistoryDisposable.dispose() + self.messageIndexDisposable.dispose() + self.galleryHiddenMesageAndMediaDisposable.dispose() + } + + private func setupThemeWithDarkMode(_ darkMode: Bool) { + if darkMode { + self.statusBar.style = .White + self.navigationBar.backgroundColor = UIColor(white: 0.0, alpha: 0.9) + self.navigationBar.foregroundColor = UIColor.white + self.navigationBar.accentColor = UIColor.white + self.navigationBar.stripeColor = UIColor.black + } else { + self.statusBar.style = .Black + self.navigationBar.backgroundColor = UIColor(red: 0.968626451, green: 0.968626451, blue: 0.968626451, alpha: 1.0) + self.navigationBar.foregroundColor = UIColor.black + self.navigationBar.accentColor = UIColor(0x1195f2) + self.navigationBar.stripeColor = UIColor(red: 0.6953125, green: 0.6953125, blue: 0.6953125, alpha: 1.0) + } + } + + var chatDisplayNode: ChatControllerNode { + get { + return super.displayNode as! ChatControllerNode + } + } + + override public func loadDisplayNode() { + self.displayNode = ChatControllerNode(account: self.account, peerId: self.peerId) + + self.chatDisplayNode.listView.displayedItemRangeChanged = { [weak self] displayedRange in + if let strongSelf = self { + /*if let transactionTag = strongSelf.listViewTransactionTag { + strongSelf.messageViewQueue.dispatch { + if transactionTag == strongSelf.historyViewTransactionTag { + if let range = range, historyView = strongSelf.historyView, firstEntry = historyView.filteredEntries.first, lastEntry = historyView.filteredEntries.last { + if range.firstIndex < 5 && historyView.originalView.laterId != nil { + strongSelf._chatHistoryLocation.set(.single(ChatHistoryLocation.Navigation(index: lastEntry.index, anchorIndex: historyView.originalView.anchorIndex))) + } else if range.lastIndex >= historyView.filteredEntries.count - 5 && historyView.originalView.earlierId != nil { + strongSelf._chatHistoryLocation.set(.single(ChatHistoryLocation.Navigation(index: firstEntry.index, anchorIndex: historyView.originalView.anchorIndex))) + } else { + //strongSelf.account.postbox.updateMessageHistoryViewVisibleRange(messageView.id, earliestVisibleIndex: viewEntries[viewEntries.count - 1 - range.lastIndex].index, latestVisibleIndex: viewEntries[viewEntries.count - 1 - range.firstIndex].index) + } + } + } + } + }*/ + + if let visible = displayedRange.visibleRange, let historyView = strongSelf.historyView { + if let messageId = maxIncomingMessageIdForEntries(historyView.filteredEntries, indexRange: (historyView.filteredEntries.count - 1 - visible.lastIndex, historyView.filteredEntries.count - 1 - visible.firstIndex)) { + strongSelf.updateMaxVisibleReadIncomingMessageId(messageId) + } + } + } + } + + self.chatDisplayNode.listView.visibleContentOffsetChanged = { [weak self] offset in + if let strongSelf = self { + if let offset = offset, offset < 40.0 { + if strongSelf.chatDisplayNode.navigateToLatestButton.alpha == 1.0 { + UIView.animate(withDuration: 0.2, delay: 0.0, options: [.beginFromCurrentState], animations: { + strongSelf.chatDisplayNode.navigateToLatestButton.alpha = 0.0 + }, completion: nil) + } + } else { + if strongSelf.chatDisplayNode.navigateToLatestButton.alpha == 0.0 { + UIView.animate(withDuration: 0.2, delay: 0.0, options: [.beginFromCurrentState], animations: { + strongSelf.chatDisplayNode.navigateToLatestButton.alpha = 1.0 + }, completion: nil) + } + } + } + } + + self.chatDisplayNode.requestLayout = { [weak self] animated in + self?.requestLayout(transition: animated ? .animated(duration: 0.1, curve: .easeInOut) : .immediate) + } + + self.chatDisplayNode.setupSendActionOnViewUpdate = { [weak self] f in + self?.layoutActionOnViewTransition = f + } + + self.chatDisplayNode.displayAttachmentMenu = { [weak self] in + if let strongSelf = self { + let controller = ChatMediaActionSheetController() + controller.location = { [weak strongSelf] in + if let strongSelf = strongSelf { + let mapInputController = MapInputController() + strongSelf.present(mapInputController, in: .window) + } + } + controller.contacts = { [weak strongSelf] in + if let strongSelf = strongSelf { + useDarkMode = !useDarkMode + strongSelf.setupThemeWithDarkMode(useDarkMode) + } + } + strongSelf.present(controller, in: .window) + } + } + + self.chatDisplayNode.navigateToLatestButton.tapped = { [weak self] in + if let strongSelf = self { + strongSelf._chatHistoryLocation.set(.single(ChatHistoryLocation.Scroll(index: MessageIndex.upperBound(peerId: strongSelf.peerId), anchorIndex: MessageIndex.upperBound(peerId: strongSelf.peerId), sourceIndex: MessageIndex.lowerBound(peerId: strongSelf.peerId), scrollPosition: .Top, animated: true))) + } + } + + self.displayNodeDidLoad() + + self.dequeueHistoryViewTransition() + } + + override public func viewWillAppear(_ animated: Bool) { + super.viewWillAppear(animated) + } + + override public func viewDidAppear(_ animated: Bool) { + super.viewDidAppear(animated) + + self.chatDisplayNode.listView.preloadPages = true + self.canReadHistory.set(.single(true)) + } + + private func enqueueHistoryViewTransition(_ transition: ChatHistoryViewTransition) -> Signal { + return Signal { [weak self] subscriber in + if let strongSelf = self { + if let _ = strongSelf.enqueuedHistoryViewTransition { + preconditionFailure() + } + + strongSelf.enqueuedHistoryViewTransition = (transition, { + subscriber.putCompletion() + }) + + if strongSelf.isNodeLoaded { + strongSelf.dequeueHistoryViewTransition() + } else { + if !strongSelf.didSetReady { + strongSelf.didSetReady = true + strongSelf._ready.set(.single(true)) + } + } + } else { + subscriber.putCompletion() + } + + return EmptyDisposable + } |> runOn(Queue.mainQueue()) + } + + private func updateMaxVisibleReadIncomingMessageId(_ id: MessageId) { + self.maxVisibleIncomingMessageId.set(.single(id)) + } + + private func dequeueHistoryViewTransition() { + if let (transition, completion) = self.enqueuedHistoryViewTransition { + self.enqueuedHistoryViewTransition = nil + + let completion: (ListViewDisplayedItemRange) -> Void = { [weak self] visibleRange in + if let strongSelf = self { + strongSelf.historyView = transition.historyView + + if let range = visibleRange.loadedRange { + strongSelf.account.postbox.updateMessageHistoryViewVisibleRange(transition.historyView.originalView.id, earliestVisibleIndex: transition.historyView.filteredEntries[transition.historyView.filteredEntries.count - 1 - range.lastIndex].index, latestVisibleIndex: transition.historyView.filteredEntries[transition.historyView.filteredEntries.count - 1 - range.firstIndex].index) + + if let visible = visibleRange.visibleRange { + if let messageId = maxIncomingMessageIdForEntries(transition.historyView.filteredEntries, indexRange: (transition.historyView.filteredEntries.count - 1 - visible.lastIndex, transition.historyView.filteredEntries.count - 1 - visible.firstIndex)) { + strongSelf.updateMaxVisibleReadIncomingMessageId(messageId) + } + } + } + + if !strongSelf.didSetReady { + strongSelf.didSetReady = true + strongSelf._ready.set(.single(true)) + } + + completion() + } + } + + if let layoutActionOnViewTransition = self.layoutActionOnViewTransition { + self.layoutActionOnViewTransition = nil + layoutActionOnViewTransition() + + self.chatDisplayNode.containerLayoutUpdated(self.containerLayout, navigationBarHeight: self.navigationBar.frame.maxY, transition: .animated(duration: 0.5 * 1.3, curve: .spring), listViewTransaction: { updateSizeAndInsets in + var options = transition.options + let _ = options.insert(.Synchronous) + let _ = options.insert(.LowLatency) + options.remove(.AnimateInsertion) + + let deleteItems = transition.deleteItems.map({ item in + return ListViewDeleteItem(index: item.index, directionHint: nil) + }) + + var maxInsertedItem: Int? + var insertItems: [ListViewInsertItem] = [] + for i in 0 ..< transition.insertItems.count { + let item = transition.insertItems[i] + if item.directionHint == .Down && (maxInsertedItem == nil || maxInsertedItem! < item.index) { + maxInsertedItem = item.index + } + insertItems.append(ListViewInsertItem(index: item.index, previousIndex: item.previousIndex, item: item.item, directionHint: item.directionHint == .Down ? .Up : nil)) + } + + let scrollToItem = ListViewScrollToItem(index: 0, position: .Top, animated: true, curve: .Spring(speed: 1.3), directionHint: .Up) + + var stationaryItemRange: (Int, Int)? + if let maxInsertedItem = maxInsertedItem { + stationaryItemRange = (maxInsertedItem + 1, Int.max) + } + + self.chatDisplayNode.listView.deleteAndInsertItems(deleteIndices: deleteItems, insertIndicesAndItems: insertItems, updateIndicesAndItems: transition.updateItems, options: options, scrollToItem: scrollToItem, updateSizeAndInsets: updateSizeAndInsets, stationaryItemRange: stationaryItemRange, completion: completion) + }) + } else { + self.chatDisplayNode.listView.deleteAndInsertItems(deleteIndices: transition.deleteItems, insertIndicesAndItems: transition.insertItems, updateIndicesAndItems: transition.updateItems, options: transition.options, scrollToItem: transition.scrollToItem, stationaryItemRange: transition.stationaryItemRange, completion: completion) + } + } + } + + override public func containerLayoutUpdated(_ layout: ContainerViewLayout, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, transition: transition) + + self.containerLayout = layout + + self.chatDisplayNode.containerLayoutUpdated(layout, navigationBarHeight: self.navigationBar.frame.maxY, transition: transition, listViewTransaction: { updateSizeAndInsets in + self.chatDisplayNode.listView.deleteAndInsertItems(deleteIndices: [], insertIndicesAndItems: [], updateIndicesAndItems: [], options: [.Synchronous, .LowLatency], scrollToItem: nil, updateSizeAndInsets: updateSizeAndInsets, stationaryItemRange: nil, completion: { _ in }) + }) + } +} diff --git a/TelegramUI/ChatControllerInteraction.swift b/TelegramUI/ChatControllerInteraction.swift new file mode 100644 index 0000000000..f9598eb19f --- /dev/null +++ b/TelegramUI/ChatControllerInteraction.swift @@ -0,0 +1,14 @@ +import Foundation +import Postbox +import AsyncDisplayKit + +public final class ChatControllerInteraction { + let openMessage: (MessageId) -> Void + let testNavigateToMessage: (MessageId, MessageId) -> Void + var hiddenMedia: [MessageId: [Media]] = [:] + + public init(openMessage: @escaping (MessageId) -> Void, testNavigateToMessage: @escaping (MessageId, MessageId) -> Void) { + self.openMessage = openMessage + self.testNavigateToMessage = testNavigateToMessage + } +} diff --git a/TelegramUI/ChatControllerNode.swift b/TelegramUI/ChatControllerNode.swift new file mode 100644 index 0000000000..2900ee5364 --- /dev/null +++ b/TelegramUI/ChatControllerNode.swift @@ -0,0 +1,183 @@ +import Foundation +import AsyncDisplayKit +import Postbox +import SwiftSignalKit +import Display +import TelegramCore + +private let backgroundImage = UIImage(bundleImageName: "Chat/Wallpapers/Builtin0") + +enum ChatMessageViewPosition: Equatable { + case AroundUnread(count: Int) + case Around(index: MessageIndex, anchorIndex: MessageIndex) + case Scroll(index: MessageIndex, anchorIndex: MessageIndex, sourceIndex: MessageIndex, scrollPosition: ListViewScrollPosition) +} + +func ==(lhs: ChatMessageViewPosition, rhs: ChatMessageViewPosition) -> Bool { + switch lhs { + case let .Around(lhsId, lhsAnchorIndex): + switch rhs { + case let .Around(rhsId, rhsAnchorIndex) where lhsId == rhsId && lhsAnchorIndex == rhsAnchorIndex: + return true + default: + return false + } + case let .Scroll(lhsIndex, lhsAnchorIndex, lhsSourceIndex, lhsScrollPosition): + switch rhs { + case let .Scroll(rhsIndex, rhsAnchorIndex, rhsSourceIndex, rhsScrollPosition) where lhsIndex == rhsIndex && lhsAnchorIndex == rhsAnchorIndex && lhsSourceIndex == rhsSourceIndex && lhsScrollPosition == rhsScrollPosition: + return true + default: + return false + } + case let .AroundUnread(lhsCount): + switch rhs { + case let .AroundUnread(rhsCount) where lhsCount == rhsCount: + return true + default: + return false + } + } +} + +class ChatControllerNode: ASDisplayNode { + let account: Account + let peerId: PeerId + + let backgroundNode: ASDisplayNode + let listView: ListView + let inputNode: ChatInputNode + let navigateToLatestButton: ChatHistoryNavigationButtonNode + + private var ignoreUpdateHeight = false + + var displayAttachmentMenu: () -> Void = { } + var setupSendActionOnViewUpdate: (@escaping () -> Void) -> Void = { _ in } + var requestLayout: (Bool) -> Void = { _ in } + + init(account: Account, peerId: PeerId) { + self.account = account + self.peerId = peerId + + self.backgroundNode = ASDisplayNode() + self.backgroundNode.isLayerBacked = true + self.backgroundNode.contentMode = .scaleAspectFill + self.backgroundNode.displaysAsynchronously = false + self.backgroundNode.clipsToBounds = true + + self.listView = ListView() + self.listView.preloadPages = false + //self.listView.debugInfo = true + self.inputNode = ChatInputNode() + + self.navigateToLatestButton = ChatHistoryNavigationButtonNode() + self.navigateToLatestButton.alpha = 0.0 + + super.init(viewBlock: { + return UITracingLayerView() + }, didLoad: nil) + + self.backgroundColor = UIColor(0xdee3e9) + self.backgroundNode.contents = backgroundImage?.cgImage + self.addSubnode(self.backgroundNode) + + self.listView.transform = CATransform3DMakeRotation(CGFloat(M_PI), 0.0, 0.0, 1.0) + self.addSubnode(self.listView) + + self.addSubnode(self.inputNode) + + self.addSubnode(self.navigateToLatestButton) + + self.view.addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(self.tapGesture(_:)))) + + self.inputNode.updateHeight = { [weak self] in + if let strongSelf = self, !strongSelf.ignoreUpdateHeight { + strongSelf.requestLayout(true) + } + } + + self.inputNode.sendMessage = { [weak self] in + if let strongSelf = self { + if strongSelf.inputNode.textInputNode?.isFirstResponder() ?? false { + applyKeyboardAutocorrection() + } + let text = strongSelf.inputNode.text + + strongSelf.setupSendActionOnViewUpdate({ [weak strongSelf] in + if let strongSelf = strongSelf { + strongSelf.ignoreUpdateHeight = true + strongSelf.inputNode.text = "" + strongSelf.ignoreUpdateHeight = false + } + }) + + let _ = enqueueMessage(account: strongSelf.account, peerId: strongSelf.peerId, text: text).start() + } + } + + self.inputNode.displayAttachmentMenu = { [weak self] in + self?.displayAttachmentMenu() + } + } + + func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition, listViewTransaction: (ListViewUpdateSizeAndInsets) -> Void) { + var insets = layout.insets(options: [.input]) + insets.top += navigationBarHeight + + self.listView.bounds = CGRect(x: 0.0, y: 0.0, width: layout.size.width, height: layout.size.height) + self.listView.position = CGPoint(x: layout.size.width / 2.0, y: layout.size.height / 2.0) + + var duration: Double = 0.0 + var curve: UInt = 0 + switch transition { + case .immediate: + break + case let .animated(animationDuration, animationCurve): + duration = animationDuration + switch animationCurve { + case .easeInOut: + break + case .spring: + curve = 7 + } + } + + let messageTextInputSize = self.inputNode.calculateSizeThatFits(CGSize(width: layout.size.width, height: min(layout.size.height / 2.0, 240.0))) + + self.backgroundNode.frame = CGRect(x: 0.0, y: 0.0, width: layout.size.width, height: layout.size.height) + + self.listView.bounds = CGRect(x: 0.0, y: 0.0, width: layout.size.width, height: layout.size.height) + self.listView.position = CGPoint(x: layout.size.width / 2.0, y: layout.size.height / 2.0) + + let listViewCurve: ListViewAnimationCurve + var speedFactor: CGFloat = 1.0 + if curve == 7 { + speedFactor = CGFloat(duration) / 0.5 + listViewCurve = .Spring(speed: CGFloat(speedFactor)) + } else { + listViewCurve = .Default + } + + let inputViewFrame = CGRect(x: 0.0, y: layout.size.height - messageTextInputSize.height - insets.bottom, width: layout.size.width, height: messageTextInputSize.height) + + listViewTransaction(ListViewUpdateSizeAndInsets(size: layout.size, insets: UIEdgeInsets(top: insets.bottom + inputViewFrame.size.height + 4.0, left: insets.right, bottom: insets.top, right: insets.left), duration: duration, curve: listViewCurve)) + + let navigateToLatestButtonSize = self.navigateToLatestButton.bounds.size + let navigateToLatestButtonFrame = CGRect(origin: CGPoint(x: layout.size.width - navigateToLatestButtonSize.width - 6.0, y: inputViewFrame.minY - navigateToLatestButtonSize.height - 6.0), size: navigateToLatestButtonSize) + + if duration > DBL_EPSILON { + UIView.animate(withDuration: duration / Double(speedFactor), delay: 0.0, options: UIViewAnimationOptions(rawValue: curve << 16), animations: { + self.inputNode.frame = inputViewFrame + self.navigateToLatestButton.frame = navigateToLatestButtonFrame + }, completion: nil) + } else { + self.inputNode.frame = inputViewFrame + self.navigateToLatestButton.frame = navigateToLatestButtonFrame + } + } + + @objc func tapGesture(_ recognizer: UITapGestureRecognizer) { + if recognizer.state == .ended { + self.view.endEditing(true) + } + } +} diff --git a/TelegramUI/ChatDocumentGalleryItem.swift b/TelegramUI/ChatDocumentGalleryItem.swift new file mode 100644 index 0000000000..8e4823462d --- /dev/null +++ b/TelegramUI/ChatDocumentGalleryItem.swift @@ -0,0 +1,138 @@ +import Foundation +import Postbox +import Display +import SwiftSignalKit +import WebKit +import TelegramCore + +class ChatDocumentGalleryItem: GalleryItem { + let account: Account + let message: Message + let location: MessageHistoryEntryLocation? + + init(account: Account, message: Message, location: MessageHistoryEntryLocation?) { + self.account = account + self.message = message + self.location = location + } + + func node() -> GalleryItemNode { + let node = ChatDocumentGalleryItemNode() + + for media in self.message.media { + if let file = media as? TelegramMediaFile { + node.setFile(account: account, file: file) + break + } + } + + if let location = self.location { + node._title.set(.single("\(location.index + 1) of \(location.count)")) + } + + return node + } + + func updateNode(node: GalleryItemNode) { + if let node = node as? ChatDocumentGalleryItemNode, let location = self.location { + node._title.set(.single("\(location.index + 1) of \(location.count)")) + } + } +} + +class ChatDocumentGalleryItemNode: GalleryItemNode { + fileprivate let _title = Promise() + + private let webView: UIView + + private var accountAndFile: (Account, TelegramMediaFile)? + private let dataDisposable = MetaDisposable() + + private var isVisible = false + + override init() { + if #available(iOS 9.0, *) { + let webView = WKWebView() + self.webView = webView + } else { + let webView = UIWebView() + webView.scalesPageToFit = true + self.webView = webView + } + + super.init() + + self.view.addSubview(self.webView) + } + + deinit { + self.dataDisposable.dispose() + } + + override func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: transition) + + self.webView.frame = CGRect(origin: CGPoint(x: 0.0, y: navigationBarHeight), size: CGSize(width: layout.size.width, height: layout.size.height - navigationBarHeight)) + } + + override func navigationStyle() -> Signal { + return .single(.light) + } + + func setFile(account: Account, file: TelegramMediaFile) { + let updateFile = self.accountAndFile?.1 != file + self.accountAndFile = (account, file) + if updateFile { + self.maybeLoadContent() + } + } + + private func maybeLoadContent() { + if let (account, file) = self.accountAndFile { + var pathExtension: String? + if let fileName = file.fileName { + pathExtension = (fileName as NSString).pathExtension + } + let data = account.postbox.mediaBox.resourceData(CloudFileMediaResource(location: file.location, size: file.size), pathExtension: pathExtension, complete: true) + |> deliverOnMainQueue + self.dataDisposable.set(data.start(next: { [weak self] data in + if let strongSelf = self { + if data.size == file.size { + if let webView = strongSelf.webView as? WKWebView { + if #available(iOS 9.0, *) { + webView.loadFileURL(URL(fileURLWithPath: data.path), allowingReadAccessTo: URL(fileURLWithPath: data.path)) + } + } else if let webView = strongSelf.webView as? UIWebView { + webView.loadRequest(URLRequest(url: URL(fileURLWithPath: data.path))) + } + } + } + })) + } + } + + /*private func unloadContent() { + self.dataDisposable.set(nil) + + self.webView.stopLoading() + self.webView.loadHTMLString("", baseURL: nil) + }*/ + + override func visibilityUpdated(isVisible: Bool) { + super.visibilityUpdated(isVisible: isVisible) + + /*if self.isVisible != isVisible { + self.isVisible = isVisible + + if isVisible { + self.maybeLoadContent() + } else { + self.unloadContent() + } + }*/ + } + + override func title() -> Signal { + return self._title.get() + } +} diff --git a/TelegramUI/ChatHistoryEntry.swift b/TelegramUI/ChatHistoryEntry.swift new file mode 100644 index 0000000000..cf639fc938 --- /dev/null +++ b/TelegramUI/ChatHistoryEntry.swift @@ -0,0 +1,74 @@ +import Postbox +import TelegramCore + +enum ChatHistoryEntry: Identifiable, Comparable { + case HoleEntry(MessageHistoryHole) + case MessageEntry(Message) + case UnreadEntry(MessageIndex) + + var stableId: UInt64 { + switch self { + case let .HoleEntry(hole): + return UInt64(hole.stableId) | ((UInt64(1) << 40)) + case let .MessageEntry(message): + return UInt64(message.stableId) | ((UInt64(2) << 40)) + case .UnreadEntry: + return UInt64(3) << 40 + } + } + + var index: MessageIndex { + switch self { + case let .HoleEntry(hole): + return hole.maxIndex + case let .MessageEntry(message): + return MessageIndex(message) + case let .UnreadEntry(index): + return index + } + } +} + +func ==(lhs: ChatHistoryEntry, rhs: ChatHistoryEntry) -> Bool { + switch lhs { + case let .HoleEntry(lhsHole): + switch rhs { + case let .HoleEntry(rhsHole) where lhsHole == rhsHole: + return true + default: + return false + } + case let .MessageEntry(lhsMessage): + switch rhs { + case let .MessageEntry(rhsMessage) where MessageIndex(lhsMessage) == MessageIndex(rhsMessage) && lhsMessage.flags == rhsMessage.flags: + if lhsMessage.media.count != rhsMessage.media.count { + return false + } + for i in 0 ..< lhsMessage.media.count { + if !lhsMessage.media[i].isEqual(rhsMessage.media[i]) { + return false + } + } + return true + default: + return false + } + case let .UnreadEntry(lhsIndex): + switch rhs { + case let .UnreadEntry(rhsIndex) where lhsIndex == rhsIndex: + return true + default: + return false + } + } +} + +func <(lhs: ChatHistoryEntry, rhs: ChatHistoryEntry) -> Bool { + let lhsIndex = lhs.index + let rhsIndex = rhs.index + if lhsIndex == rhsIndex { + return lhs.stableId < rhs.stableId + } else { + return lhsIndex < rhsIndex + } +} diff --git a/TelegramUI/ChatHistoryLocation.swift b/TelegramUI/ChatHistoryLocation.swift new file mode 100644 index 0000000000..8682418630 --- /dev/null +++ b/TelegramUI/ChatHistoryLocation.swift @@ -0,0 +1,23 @@ +import Postbox +import Display + +enum ChatHistoryLocation: Equatable { + case Initial(count: Int) + case InitialSearch(messageId: MessageId, count: Int) + case Navigation(index: MessageIndex, anchorIndex: MessageIndex) + case Scroll(index: MessageIndex, anchorIndex: MessageIndex, sourceIndex: MessageIndex, scrollPosition: ListViewScrollPosition, animated: Bool) +} + +func ==(lhs: ChatHistoryLocation, rhs: ChatHistoryLocation) -> Bool { + switch lhs { + case let .Navigation(lhsIndex, lhsAnchorIndex): + switch rhs { + case let .Navigation(rhsIndex, rhsAnchorIndex) where lhsIndex == rhsIndex && lhsAnchorIndex == rhsAnchorIndex: + return true + default: + return false + } + default: + return false + } +} diff --git a/TelegramUI/ChatHistoryNavigationButtonNode.swift b/TelegramUI/ChatHistoryNavigationButtonNode.swift new file mode 100644 index 0000000000..573f7664c2 --- /dev/null +++ b/TelegramUI/ChatHistoryNavigationButtonNode.swift @@ -0,0 +1,52 @@ +import Foundation +import AsyncDisplayKit +import Display + +private func generateBackgroundImage() -> UIImage? { + return generateImage(CGSize(width: 38.0, height: 38.0), contextGenerator: { size, context in + context.clear(CGRect(origin: CGPoint(), size: size)) + context.setFillColor(UIColor.white.cgColor) + context.fillEllipse(in: CGRect(origin: CGPoint(x: 0.5, y: 0.5), size: CGSize(width: size.width - 1.0, height: size.height - 1.0))) + context.setLineWidth(0.5) + context.setStrokeColor(UIColor(0x000000, 0.15).cgColor) + context.strokeEllipse(in: CGRect(origin: CGPoint(x: 0.25, y: 0.25), size: CGSize(width: size.width - 0.5, height: size.height - 0.5))) + context.setStrokeColor(UIColor(0x88888D).cgColor) + context.setLineWidth(1.5) + + let position = CGPoint(x: 9.0 - 0.5, y: 23.0) + context.move(to: CGPoint(x: position.x + 1.0, y: position.y - 1.0)) + context.addLine(to: CGPoint(x: position.x + 10.0, y: position.y - 10.0)) + context.addLine(to: CGPoint(x: position.x + 19.0, y: position.y - 1.0)) + context.strokePath() + }) +} + +private let backgroundImage = generateBackgroundImage() + +class ChatHistoryNavigationButtonNode: ASControlNode { + private let imageNode: ASImageNode + + var tapped: (() -> Void)? + + override init() { + self.imageNode = ASImageNode() + self.imageNode.displayWithoutProcessing = true + self.imageNode.image = backgroundImage + self.imageNode.isLayerBacked = true + + super.init() + + self.addSubnode(self.imageNode) + self.imageNode.frame = CGRect(origin: CGPoint(), size: CGSize(width: 38.0, height: 38.0)) + + self.frame = CGRect(origin: CGPoint(), size: CGSize(width: 38.0, height: 38.0)) + + self.addTarget(self, action: #selector(onTap), forControlEvents: .touchUpInside) + } + + @objc func onTap() { + if let tapped = self.tapped { + tapped() + } + } +} diff --git a/TelegramUI/ChatHoleGalleryItem.swift b/TelegramUI/ChatHoleGalleryItem.swift new file mode 100644 index 0000000000..867fdf45e5 --- /dev/null +++ b/TelegramUI/ChatHoleGalleryItem.swift @@ -0,0 +1,21 @@ +import Foundation +import Display +import AsyncDisplayKit + +final class ChatHoleGalleryItem: GalleryItem { + func node() -> GalleryItemNode { + return ChatHoleGalleryItemNode() + } + + func updateNode(node: GalleryItemNode) { + + } +} + +final class ChatHoleGalleryItemNode: GalleryItemNode { + override init() { + super.init() + + self.backgroundColor = UIColor.blue + } +} diff --git a/TelegramUI/ChatHoleItem.swift b/TelegramUI/ChatHoleItem.swift new file mode 100644 index 0000000000..a71a6ee03d --- /dev/null +++ b/TelegramUI/ChatHoleItem.swift @@ -0,0 +1,75 @@ +import Foundation +import UIKit +import Postbox +import AsyncDisplayKit +import Display + +private func backgroundImage(color: UIColor) -> UIImage? { + return generateImage(CGSize(width: 20.0, height: 20.0), contextGenerator: { size, context -> Void in + context.clear(CGRect(origin: CGPoint(), size: size)) + context.setFillColor(UIColor(0x748391, 0.45).cgColor) + context.fillEllipse(in: CGRect(origin: CGPoint(), size: size)) + })?.stretchableImage(withLeftCapWidth: 8, topCapHeight: 8) +} + +private let titleFont = UIFont.systemFont(ofSize: 13.0) + +class ChatHoleItem: ListViewItem { + func nodeConfiguredForWidth(async: @escaping (@escaping () -> Void) -> Void, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, completion: @escaping (ListViewItemNode, @escaping () -> Void) -> Void) { + + async { + let node = ChatHoleItemNode() + node.layoutForWidth(width, item: self, previousItem: previousItem, nextItem: nextItem) + completion(node, {}) + } + } +} + +class ChatHoleItemNode: ListViewItemNode { + let backgroundNode: ASImageNode + let labelNode: TextNode + + init() { + self.backgroundNode = ASImageNode() + self.backgroundNode.isLayerBacked = true + self.backgroundNode.displayWithoutProcessing = true + self.backgroundNode.displaysAsynchronously = false + + self.labelNode = TextNode() + self.labelNode.isLayerBacked = true + + super.init(layerBacked: true) + + self.backgroundNode.image = backgroundImage(color: UIColor.blue) + self.addSubnode(self.backgroundNode) + + self.addSubnode(self.labelNode) + + self.transform = CATransform3DMakeRotation(CGFloat(M_PI), 0.0, 0.0, 1.0) + } + + override func layoutForWidth(_ width: CGFloat, item: ListViewItem, previousItem: ListViewItem?, nextItem: ListViewItem?) { + let (layout, apply) = self.asyncLayout()(width) + apply() + self.contentSize = layout.contentSize + self.insets = layout.insets + } + + func asyncLayout() -> (_ width: CGFloat) -> (ListViewItemNodeLayout, () -> Void) { + let labelLayout = TextNode.asyncLayout(self.labelNode) + return { width in + let (size, apply) = labelLayout(NSAttributedString(string: "Loading", font: titleFont, textColor: UIColor.white), nil, 1, .end, CGSize(width: width, height: CGFloat.greatestFiniteMagnitude), nil) + + let backgroundSize = CGSize(width: size.size.width + 8.0 + 8.0, height: 20.0) + + return (ListViewItemNodeLayout(contentSize: CGSize(width: width, height: 20.0), insets: UIEdgeInsets(top: 4.0, left: 0.0, bottom: 4.0, right: 0.0)), { [weak self] in + if let strongSelf = self { + let _ = apply() + + strongSelf.backgroundNode.frame = CGRect(origin: CGPoint(x: floorToScreenPixels((width - backgroundSize.width) / 2.0), y: 0.0), size: backgroundSize) + strongSelf.labelNode.frame = CGRect(origin: CGPoint(x: strongSelf.backgroundNode.frame.origin.x + 8.0, y: floorToScreenPixels((backgroundSize.height - size.size.height) / 2.0) - 1.0), size: size.size) + } + }) + } + } +} diff --git a/TelegramUI/ChatImageGalleryItem.swift b/TelegramUI/ChatImageGalleryItem.swift new file mode 100644 index 0000000000..e6f06e360e --- /dev/null +++ b/TelegramUI/ChatImageGalleryItem.swift @@ -0,0 +1,214 @@ +import Foundation +import Display +import AsyncDisplayKit +import SwiftSignalKit +import Postbox +import TelegramCore + +class ChatImageGalleryItem: GalleryItem { + let account: Account + let message: Message + let location: MessageHistoryEntryLocation? + + init(account: Account, message: Message, location: MessageHistoryEntryLocation?) { + self.account = account + self.message = message + self.location = location + } + + func node() -> GalleryItemNode { + let node = ChatImageGalleryItemNode() + + for media in self.message.media { + if let image = media as? TelegramMediaImage { + node.setImage(account: account, image: image) + break + } else if let file = media as? TelegramMediaFile, file.mimeType.hasPrefix("image/") { + node.setFile(account: account, file: file) + break + } + } + + if let location = self.location { + node._title.set(.single("\(location.index + 1) of \(location.count)")) + } + + return node + } + + func updateNode(node: GalleryItemNode) { + if let node = node as? ChatImageGalleryItemNode, let location = self.location { + node._title.set(.single("\(location.index + 1) of \(location.count)")) + } + } +} + +final class ChatImageGalleryItemNode: ZoomableContentGalleryItemNode { + private let imageNode: TransformImageNode + fileprivate let _ready = Promise() + fileprivate let _title = Promise() + + private var accountAndMedia: (Account, Media)? + + private var fetchDisposable = MetaDisposable() + + override init() { + self.imageNode = TransformImageNode() + + super.init() + + self.imageNode.imageUpdated = { [weak self] in + self?._ready.set(.single(Void())) + } + + self.imageNode.view.contentMode = .scaleAspectFill + self.imageNode.clipsToBounds = true + + /*self.imageNode.layer.shadowRadius = 80.0 + self.imageNode.layer.shadowColor = UIColor(white: 0.0, alpha: 1.0).cgColor + self.imageNode.layer.shadowOffset = CGSize(width: 0.0, height: 40.0) + self.imageNode.layer.shadowOpacity = 0.5*/ + } + + deinit { + self.fetchDisposable.dispose() + } + + override func ready() -> Signal { + return self._ready.get() + } + + override func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: transition) + } + + fileprivate func setImage(account: Account, image: TelegramMediaImage) { + if self.accountAndMedia == nil || !self.accountAndMedia!.1.isEqual(image) { + if let largestSize = largestRepresentationForPhoto(image) { + let displaySize = largestSize.dimensions.dividedByScreenScale() + self.imageNode.alphaTransitionOnFirstUpdate = false + self.imageNode.asyncLayout()(TransformImageArguments(corners: ImageCorners(), imageSize: displaySize, boundingSize: displaySize, intrinsicInsets: UIEdgeInsets()))() + self.imageNode.setSignal(account: account, signal: chatMessagePhoto(account: account, photo: image), dispatchOnDisplayLink: false) + self.zoomableContent = (largestSize.dimensions, self.imageNode) + + self.fetchDisposable.set(account.postbox.mediaBox.fetchedResource(CloudFileMediaResource(location: largestSize.location, size: largestSize.size ?? 0)).start()) + } else { + self._ready.set(.single(Void())) + } + } + self.accountAndMedia = (account, image) + } + + func setFile(account: Account, file: TelegramMediaFile) { + if self.accountAndMedia == nil || !self.accountAndMedia!.1.isEqual(file) { + if let largestSize = file.dimensions { + self.imageNode.alphaTransitionOnFirstUpdate = false + let displaySize = largestSize.dividedByScreenScale() + self.imageNode.asyncLayout()(TransformImageArguments(corners: ImageCorners(), imageSize: displaySize, boundingSize: displaySize, intrinsicInsets: UIEdgeInsets()))() + self.imageNode.setSignal(account: account, signal: chatMessageImageFile(account: account, file: file, progressive: true), dispatchOnDisplayLink: false) + self.zoomableContent = (largestSize, self.imageNode) + } else { + self._ready.set(.single(Void())) + } + } + self.accountAndMedia = (account, file) + } + + override func animateIn(from node: ASDisplayNode) { + var transformedFrame = node.view.convert(node.view.bounds, to: self.imageNode.view) + let transformedSuperFrame = node.view.convert(node.view.bounds, to: self.imageNode.view.superview) + let transformedSelfFrame = node.view.convert(node.view.bounds, to: self.view) + let transformedCopyViewFinalFrame = self.imageNode.view.convert(self.imageNode.view.bounds, to: self.view) + + /*let image = generateImage(node.view.bounds.size, contextGenerator: { size, context in + context.clear(CGRect(origin: CGPoint(), size: size)) + context.translate(x: size.width / 2.0, y: size.height / 2.0) + context.scale(x: 1.0, y: -1.0) + context.translate(x: -size.width / 2.0, y: -size.height / 2.0) + //node.view.drawHierarchy(in: CGRect(origin: CGPoint(), size: size), afterScreenUpdates: false) + node.layer.render(in: context) + })*/ + + //let copyView = UIImageView(image: image) + let copyView = node.view.snapshotContentTree()! + + self.view.insertSubview(copyView, belowSubview: self.scrollView) + copyView.frame = transformedSelfFrame + + copyView.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.25, removeOnCompletion: false, completion: { [weak copyView] _ in + copyView?.removeFromSuperview() + }) + + //copyView.layer.animateFrame(from: transformedSelfFrame, to: transformedCopyViewFinalFrame, duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring, removeOnCompletion: false) + copyView.layer.animatePosition(from: CGPoint(x: transformedSelfFrame.midX, y: transformedSelfFrame.midY), to: CGPoint(x: transformedCopyViewFinalFrame.midX, y: transformedCopyViewFinalFrame.midY), duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring, removeOnCompletion: false) + let scale = CGSize(width: transformedCopyViewFinalFrame.size.width / transformedSelfFrame.size.width, height: transformedCopyViewFinalFrame.size.height / transformedSelfFrame.size.height) + copyView.layer.animate(from: NSValue(caTransform3D: CATransform3DIdentity), to: NSValue(caTransform3D: CATransform3DMakeScale(scale.width, scale.height, 1.0)), keyPath: "transform", timingFunction: kCAMediaTimingFunctionSpring, duration: 0.25, removeOnCompletion: false) + + self.imageNode.layer.animatePosition(from: CGPoint(x: transformedSuperFrame.midX, y: transformedSuperFrame.midY), to: self.imageNode.layer.position, duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring) + self.imageNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.1) + + transformedFrame.origin = CGPoint() + self.imageNode.layer.animateBounds(from: transformedFrame, to: self.imageNode.layer.bounds, duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring) + } + + override func animateOut(to node: ASDisplayNode, completion: @escaping () -> Void) { + var transformedFrame = node.view.convert(node.view.bounds, to: self.imageNode.view) + let transformedSuperFrame = node.view.convert(node.view.bounds, to: self.imageNode.view.superview) + let transformedSelfFrame = node.view.convert(node.view.bounds, to: self.view) + let transformedCopyViewInitialFrame = self.imageNode.view.convert(self.imageNode.view.bounds, to: self.view) + + var positionCompleted = false + var boundsCompleted = false + var copyCompleted = false + + let copyView = node.view.snapshotContentTree()! + + self.view.insertSubview(copyView, belowSubview: self.scrollView) + copyView.frame = transformedSelfFrame + + let intermediateCompletion = { [weak copyView] in + if positionCompleted && boundsCompleted && copyCompleted { + copyView?.removeFromSuperview() + completion() + } + } + + copyView.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.1, removeOnCompletion: false) + + copyView.layer.animatePosition(from: CGPoint(x: transformedCopyViewInitialFrame.midX, y: transformedCopyViewInitialFrame.midY), to: CGPoint(x: transformedSelfFrame.midX, y: transformedSelfFrame.midY), duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring, removeOnCompletion: false) + let scale = CGSize(width: transformedCopyViewInitialFrame.size.width / transformedSelfFrame.size.width, height: transformedCopyViewInitialFrame.size.height / transformedSelfFrame.size.height) + copyView.layer.animate(from: NSValue(caTransform3D: CATransform3DMakeScale(scale.width, scale.height, 1.0)), to: NSValue(caTransform3D: CATransform3DIdentity), keyPath: "transform", timingFunction: kCAMediaTimingFunctionSpring, duration: 0.25, removeOnCompletion: false, completion: { _ in + copyCompleted = true + intermediateCompletion() + }) + + self.imageNode.layer.animatePosition(from: self.imageNode.layer.position, to: CGPoint(x: transformedSuperFrame.midX, y: transformedSuperFrame.midY), duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring, removeOnCompletion: false, completion: { _ in + positionCompleted = true + intermediateCompletion() + }) + + self.imageNode.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.25, removeOnCompletion: false) + + transformedFrame.origin = CGPoint() + self.imageNode.layer.animateBounds(from: self.imageNode.layer.bounds, to: transformedFrame, duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring, removeOnCompletion: false, completion: { _ in + boundsCompleted = true + intermediateCompletion() + }) + } + + override func visibilityUpdated(isVisible: Bool) { + super.visibilityUpdated(isVisible: isVisible) + + if let (account, media) = self.accountAndMedia, let file = media as? TelegramMediaFile { + if isVisible { + self.fetchDisposable.set(account.postbox.mediaBox.fetchedResource(CloudFileMediaResource(location: file.location, size: file.size)).start()) + } else { + self.fetchDisposable.set(nil) + } + } + } + + override func title() -> Signal { + return self._title.get() + } +} diff --git a/TelegramUI/ChatInputView.swift b/TelegramUI/ChatInputView.swift new file mode 100644 index 0000000000..df3dbc2eeb --- /dev/null +++ b/TelegramUI/ChatInputView.swift @@ -0,0 +1,199 @@ +import Foundation +import UIKit +import Display +import AsyncDisplayKit +import WebKit + +private let textInputViewBackground: UIImage = { + let diameter: CGFloat = 10.0 + UIGraphicsBeginImageContextWithOptions(CGSize(width: diameter, height: diameter), true, 0.0) + let context = UIGraphicsGetCurrentContext()! + context.setFillColor(UIColor(0xfafafa).cgColor) + context.fill(CGRect(x: 0.0, y: 0.0, width: diameter, height: diameter)) + context.setFillColor(UIColor.white.cgColor) + context.fillEllipse(in: CGRect(x: 0.0, y: 0.0, width: diameter, height: diameter)) + context.setStrokeColor(UIColor(0xc7c7cc).cgColor) + let strokeWidth: CGFloat = 0.5 + context.setLineWidth(strokeWidth) + context.strokeEllipse(in: CGRect(x: strokeWidth / 2.0, y: strokeWidth / 2.0, width: diameter - strokeWidth, height: diameter - strokeWidth)) + let image = UIGraphicsGetImageFromCurrentImageContext()!.stretchableImage(withLeftCapWidth: Int(diameter / 2.0), topCapHeight: Int(diameter / 2.0)) + UIGraphicsEndImageContext() + + return image +}() + +private let attachmentIcon = UIImage(bundleImageName: "Chat/Input/Text/IconAttachment")?.precomposed() + +class ChatInputNode: ASDisplayNode, ASEditableTextNodeDelegate { + var textPlaceholderNode: TextNode + var textInputNode: ASEditableTextNode? + + let textInputBackgroundView: UIImageView + let sendButton: UIButton + let attachmentButton: UIButton + + var displayAttachmentMenu: () -> Void = { } + var sendMessage: () -> Void = { } + var updateHeight: () -> Void = { } + + var text: String { + get { + return self.textInputNode?.attributedText?.string ?? "" + } set(value) { + if let textInputNode = self.textInputNode { + textInputNode.attributedText = NSAttributedString(string: value, font: Font.regular(16.0), textColor: UIColor.black) + self.editableTextNodeDidUpdateText(textInputNode) + } + } + } + + let textFieldInsets = UIEdgeInsets(top: 9.0, left: 41.0, bottom: 8.0, right: 0.0) + let textInputViewInternalInsets = UIEdgeInsets(top: 4.0, left: 5.0, bottom: 4.0, right: 5.0) + + override init() { + self.textInputBackgroundView = UIImageView(image: textInputViewBackground) + self.textPlaceholderNode = TextNode() + self.attachmentButton = UIButton() + self.sendButton = UIButton() + + super.init() + + self.backgroundColor = UIColor(0xfafafa) + + self.attachmentButton.setImage(attachmentIcon, for: []) + self.attachmentButton.addTarget(self, action: #selector(self.attachmentButtonPressed), for: .touchUpInside) + self.view.addSubview(self.attachmentButton) + + self.sendButton.titleLabel?.font = Font.medium(17.0) + self.sendButton.contentEdgeInsets = UIEdgeInsets(top: 8.0, left: 6.0, bottom: 8.0, right: 6.0) + self.sendButton.setTitleColor(UIColor.blue, for: []) + self.sendButton.setTitleColor(UIColor.gray, for: [.highlighted]) + self.sendButton.setTitle("Send", for: []) + self.sendButton.sizeToFit() + self.sendButton.addTarget(self, action: #selector(self.sendButtonPressed), for: .touchUpInside) + + self.view.addSubview(self.textInputBackgroundView) + + let placeholderLayout = TextNode.asyncLayout(self.textPlaceholderNode) + let (placeholderSize, placeholderApply) = placeholderLayout(NSAttributedString(string: "Message", font: Font.regular(16.0), textColor: UIColor(0xbebec0)), nil, 1, .end, CGSize(width: 320.0, height: CGFloat.greatestFiniteMagnitude), nil) + self.textPlaceholderNode.frame = CGRect(origin: CGPoint(), size: placeholderSize.size) + let _ = placeholderApply() + self.addSubnode(self.textPlaceholderNode) + + self.view.addSubview(self.sendButton) + + self.textInputBackgroundView.clipsToBounds = true + self.textInputBackgroundView.addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(self.textInputBackgroundViewTap(_:)))) + self.textInputBackgroundView.isUserInteractionEnabled = true + } + + required init?(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + private func loadTextInputNode() { + let textInputNode = ASEditableTextNode() + textInputNode.typingAttributes = [NSFontAttributeName: Font.regular(16.0)] + textInputNode.clipsToBounds = true + textInputNode.delegate = self + self.addSubnode(textInputNode) + self.textInputNode = textInputNode + + let sendButtonSize = self.sendButton.bounds.size + + textInputNode.frame = CGRect(x: self.textFieldInsets.left + self.textInputViewInternalInsets.left, y: self.textFieldInsets.top + self.textInputViewInternalInsets.top, width: self.frame.size.width - self.textFieldInsets.left - self.textFieldInsets.right - sendButtonSize.width - self.textInputViewInternalInsets.left - self.textInputViewInternalInsets.right, height: self.frame.size.height - self.textFieldInsets.top - self.textFieldInsets.bottom - self.textInputViewInternalInsets.top - self.textInputViewInternalInsets.bottom) + + self.textInputBackgroundView.isUserInteractionEnabled = false + self.textInputBackgroundView.removeGestureRecognizer(self.textInputBackgroundView.gestureRecognizers![0]) + } + + override func calculateSizeThatFits(_ constrainedSize: CGSize) -> CGSize { + let sendButtonSize = self.sendButton.bounds.size + let textFieldHeight: CGFloat + if let textInputNode = self.textInputNode { + textFieldHeight = min(115.0, max(20.0, ceil(textInputNode.measure(CGSize(width: constrainedSize.width - self.textFieldInsets.left - self.textFieldInsets.right - sendButtonSize.width - self.textInputViewInternalInsets.left - self.textInputViewInternalInsets.right, height: constrainedSize.height)).height))) + } else { + textFieldHeight = 20.0 + } + + return CGSize(width: constrainedSize.width, height: textFieldHeight + self.textFieldInsets.top + self.textFieldInsets.bottom + self.textInputViewInternalInsets.top + self.textInputViewInternalInsets.bottom) + } + + override var frame: CGRect { + get { + return super.frame + } set(value) { + super.frame = value + + let sendButtonSize = self.sendButton.bounds.size + let minimalHeight: CGFloat = 45.0 + self.sendButton.frame = CGRect(x: value.size.width - sendButtonSize.width, y: value.height - minimalHeight + floor((minimalHeight - sendButtonSize.height) / 2.0), width: sendButtonSize.width, height: sendButtonSize.height) + + self.attachmentButton.frame = CGRect(origin: CGPoint(x: 0.0, y: value.height - minimalHeight), size: CGSize(width: 40.0, height: minimalHeight)) + + self.textInputNode?.frame = CGRect(x: self.textFieldInsets.left + self.textInputViewInternalInsets.left, y: self.textFieldInsets.top + self.textInputViewInternalInsets.top, width: value.size.width - self.textFieldInsets.left - self.textFieldInsets.right - sendButtonSize.width - self.textInputViewInternalInsets.left - self.textInputViewInternalInsets.right, height: value.size.height - self.textFieldInsets.top - self.textFieldInsets.bottom - self.textInputViewInternalInsets.top - self.textInputViewInternalInsets.bottom) + + self.textPlaceholderNode.frame = CGRect(origin: CGPoint(x: self.textFieldInsets.left + self.textInputViewInternalInsets.left, y: self.textFieldInsets.top + self.textInputViewInternalInsets.top + 0.5), size: self.textPlaceholderNode.frame.size) + + self.textInputBackgroundView.frame = CGRect(x: self.textFieldInsets.left, y: self.textFieldInsets.top, width: value.size.width - self.textFieldInsets.left - self.textFieldInsets.right - sendButtonSize.width, height: value.size.height - self.textFieldInsets.top - self.textFieldInsets.bottom) + } + } + + @objc func editableTextNodeDidUpdateText(_ editableTextNode: ASEditableTextNode) { + if let textInputNode = self.textInputNode { + self.textPlaceholderNode.isHidden = editableTextNode.attributedText?.length ?? 0 != 0 + + let constrainedSize = CGSize(width: self.frame.size.width, height: CGFloat.greatestFiniteMagnitude) + let sendButtonSize = self.sendButton.bounds.size + + let textFieldHeight: CGFloat = min(115.0, max(20.0, ceil(textInputNode.measure(CGSize(width: constrainedSize.width - self.textFieldInsets.left - self.textFieldInsets.right - sendButtonSize.width - self.textInputViewInternalInsets.left - self.textInputViewInternalInsets.right, height: constrainedSize.height)).height))) + if abs(textFieldHeight - textInputNode.frame.size.height) > CGFloat(FLT_EPSILON) { + self.invalidateCalculatedLayout() + self.updateHeight() + } + } + } + + @objc func sendButtonPressed() { + let text = self.textInputNode?.attributedText?.string ?? "" + if !text.isEmpty { + self.sendMessage() + } + } + + @objc func attachmentButtonPressed() { + self.displayAttachmentMenu() + } + + @objc func textInputBackgroundViewTap(_ recognizer: UITapGestureRecognizer) { + if case .ended = recognizer.state { + if self.textInputNode == nil { + self.loadTextInputNode() + } + + self.textInputNode?.becomeFirstResponder() + } + } + + func animateTextSend() { + /*if let textInputNode = self.textInputNode { + let snapshot = textInputNode.view.snapshotViewAfterScreenUpdates(false) + snapshot.frame = self.textInputBackgroundView.convertRect(textInputNode.view.bounds, fromView: textInputNode.view) + self.textInputBackgroundView.addSubview(snapshot) + UIView.animateWithDuration(0.3, animations: { + snapshot.alpha = 0.0 + snapshot.transform = CGAffineTransformMakeTranslation(0.0, -20.0) + }, completion: { _ in + snapshot.removeFromSuperview() + }) + }*/ + } + + /*override func hitTest(point: CGPoint, withEvent event: UIEvent!) -> UIView! { + if let textInputNode = self.textInputNode where self.textInputBackgroundView.frame.contains(point) { + return textInputNode.view + } + + return super.hitTest(point, withEvent: event) + }*/ +} diff --git a/TelegramUI/ChatListAvatarNode.swift b/TelegramUI/ChatListAvatarNode.swift new file mode 100644 index 0000000000..7cbdd2a09a --- /dev/null +++ b/TelegramUI/ChatListAvatarNode.swift @@ -0,0 +1,177 @@ +import Foundation +import AsyncDisplayKit +import Postbox +import UIKit +import Display +import TelegramCore + +private class ChatListAvatarNodeParameters: NSObject { + let account: Account + let peerId: PeerId + let letters: [String] + let font: UIFont + + init(account: Account, peerId: PeerId, letters: [String], font: UIFont) { + self.account = account + self.peerId = peerId + self.letters = letters + self.font = font + + super.init() + } +} + +let gradientColors: [NSArray] = [ + [UIColor(0xff516a).cgColor, UIColor(0xff885e).cgColor], + [UIColor(0xffa85c).cgColor, UIColor(0xffcd6a).cgColor], + [UIColor(0x54cb68).cgColor, UIColor(0xa0de7e).cgColor], + [UIColor(0x2a9ef1).cgColor, UIColor(0x72d5fd).cgColor], + [UIColor(0x665fff).cgColor, UIColor(0x82b1ff).cgColor], + [UIColor(0xd669ed).cgColor, UIColor(0xe0a2f3).cgColor] +] + +private enum ChatListAvatarNodeState: Equatable { + case Empty + case PeerAvatar(Peer) +} + +private func ==(lhs: ChatListAvatarNodeState, rhs: ChatListAvatarNodeState) -> Bool { + switch (lhs, rhs) { + case (.Empty, .Empty): + return true + case let (.PeerAvatar(lhsPeer), .PeerAvatar(rhsPeer)) where lhsPeer.isEqual(rhsPeer): + return true + default: + return false + } +} + +public final class ChatListAvatarNode: ASDisplayNode { + let font: UIFont + private var parameters: ChatListAvatarNodeParameters? + let imageNode: ImageNode + + private var state: ChatListAvatarNodeState = .Empty + + public init(font: UIFont) { + self.font = font + self.imageNode = ImageNode() + + super.init() + + self.isOpaque = false + self.displaysAsynchronously = true + + self.imageNode.isLayerBacked = true + self.addSubnode(self.imageNode) + } + + override public var frame: CGRect { + get { + return super.frame + } set(value) { + super.frame = value + self.imageNode.frame = CGRect(origin: CGPoint(), size: value.size) + } + } + + public func setPeer(account: Account, peer: Peer) { + let updatedState = ChatListAvatarNodeState.PeerAvatar(peer) + if updatedState != self.state { + self.state = updatedState + + let parameters = ChatListAvatarNodeParameters(account: account, peerId: peer.id, letters: peer.displayLetters, font: self.font) + + self.displaySuspended = true + self.contents = nil + + if let signal = peerAvatarImage(account: account, peer: peer) { + self.imageNode.setSignal(signal) + } else { + self.displaySuspended = false + } + if self.parameters == nil || self.parameters != parameters { + self.parameters = parameters + self.setNeedsDisplay() + } + } + } + + override public func drawParameters(forAsyncLayer layer: _ASDisplayLayer) -> NSObjectProtocol { + return parameters ?? NSObject() + } + + @objc override public class func draw(_ bounds: CGRect, withParameters parameters: NSObjectProtocol!, isCancelled: asdisplaynode_iscancelled_block_t, isRasterizing: Bool) { + assertNotOnMainThread() + + let context = UIGraphicsGetCurrentContext()! + + if !isRasterizing { + context.setBlendMode(.copy) + context.setFillColor(UIColor.clear.cgColor) + context.fill(bounds) + } + + context.beginPath() + context.addEllipse(in: CGRect(x: 0.0, y: 0.0, width: bounds.size.width, height: + bounds.size.height)) + context.clip() + + let colorIndex: Int + if let parameters = parameters as? ChatListAvatarNodeParameters { + colorIndex = Int(parameters.account.peerId.id + parameters.peerId.id) + } else { + colorIndex = 0 + } + + let colorsArray: NSArray = gradientColors[colorIndex % gradientColors.count] + + var locations: [CGFloat] = [1.0, 0.2]; + + let colorSpace = CGColorSpaceCreateDeviceRGB() + let gradient = CGGradient(colorsSpace: colorSpace, colors: colorsArray, locations: &locations)! + + context.drawLinearGradient(gradient, start: CGPoint(), end: CGPoint(x: 0.0, y: bounds.size.height), options: CGGradientDrawingOptions()) + + //CGContextDrawRadialGradient(context, gradient, CGPoint(x: bounds.size.width * 0.5, y: -bounds.size.width * 0.2), 0.0, CGPoint(x: bounds.midX, y: bounds.midY), bounds.width, CGGradientDrawingOptions()) + + context.setBlendMode(.normal) + + if let parameters = parameters as? ChatListAvatarNodeParameters { + let letters = parameters.letters + let string = letters.count == 0 ? "" : (letters[0] + (letters.count == 1 ? "" : letters[1])) + let attributedString = NSAttributedString(string: string, attributes: [NSFontAttributeName: parameters.font, NSForegroundColorAttributeName: UIColor.white]) + + let line = CTLineCreateWithAttributedString(attributedString) + let lineBounds = CTLineGetBoundsWithOptions(line, .useGlyphPathBounds) + + /*var ascent: CGFloat = 0.0 + var descent: CGFloat = 0.0 + var leading: CGFloat = 0.0 + let lineWidth = CGFloat(CTLineGetTypographicBounds(line, &ascent, &descent, &leading)) + let opticalBounds = CGRect(origin: CGPoint(), size: CGSize(width: lineWidth, height: ascent + descent + leading))*/ + + //let opticalBounds = CTLineGetImageBounds(line, context) + + let lineOffset = CGPoint(x: string == "B" ? 1.0 : 0.0, y: 0.0) + let lineOrigin = CGPoint(x: floorToScreenPixels(-lineBounds.origin.x + (bounds.size.width - lineBounds.size.width) / 2.0) + lineOffset.x, y: floorToScreenPixels(-lineBounds.origin.y + (bounds.size.height - lineBounds.size.height) / 2.0)) + + //let lineOrigin = CGPoint(x: floorToScreenPixels(-opticalBounds.origin.x + (bounds.size.width - opticalBounds.size.width) / 2.0), y: floorToScreenPixels(-opticalBounds.origin.y + (bounds.size.height - opticalBounds.size.height) / 2.0)) + + context.translateBy(x: bounds.size.width / 2.0, y: bounds.size.height / 2.0) + context.scaleBy(x: 1.0, y: -1.0) + context.translateBy(x: -bounds.size.width / 2.0, y: -bounds.size.height / 2.0) + + context.translateBy(x: lineOrigin.x, y: lineOrigin.y) + CTLineDraw(line, context) + context.translateBy(x: -lineOrigin.x, y: -lineOrigin.y) + + /*var attributes: [String : AnyObject] = [:] + attributes[NSFontAttributeName] = parameters.font + attributes[NSForegroundColorAttributeName] = UIColor.whiteColor() + let lettersSize = string.sizeWithAttributes(attributes) + + string.drawAtPoint(CGPoint(x: floor((bounds.size.width - lettersSize.width) / 2.0), y: floor((bounds.size.height - lettersSize.height) / 2.0)), withAttributes: attributes)*/ + } + } +} diff --git a/TelegramUI/ChatListController.swift b/TelegramUI/ChatListController.swift new file mode 100644 index 0000000000..567d13dc42 --- /dev/null +++ b/TelegramUI/ChatListController.swift @@ -0,0 +1,499 @@ +import UIKit +import Postbox +import SwiftSignalKit +import Display +import TelegramCore + +enum ChatListMessageViewPosition: Equatable { + case Tail(count: Int) + case Around(index: MessageIndex, anchorIndex: MessageIndex, scrollPosition: ListViewScrollPosition?) +} + +func ==(lhs: ChatListMessageViewPosition, rhs: ChatListMessageViewPosition) -> Bool { + switch lhs { + case let .Tail(lhsCount): + switch rhs { + case let .Tail(rhsCount) where lhsCount == rhsCount: + return true + default: + return false + } + case let .Around(lhsId, lhsAnchorIndex, lhsScrollPosition): + switch rhs { + case let .Around(rhsId, rhsAnchorIndex, rhsScrollPosition) where lhsId == rhsId && lhsAnchorIndex == rhsAnchorIndex && lhsScrollPosition == rhsScrollPosition: + return true + default: + return false + } + } +} + +private enum ChatListControllerEntryId: Hashable { + case Search + case PeerId(Int64) + + var hashValue: Int { + switch self { + case .Search: + return 0 + case let .PeerId(peerId): + return peerId.hashValue + } + } +} + +private func <(lhs: ChatListControllerEntryId, rhs: ChatListControllerEntryId) -> Bool { + return lhs.hashValue < rhs.hashValue +} + +private func ==(lhs: ChatListControllerEntryId, rhs: ChatListControllerEntryId) -> Bool { + switch lhs { + case .Search: + switch rhs { + case .Search: + return true + default: + return false + } + case let .PeerId(lhsId): + switch rhs { + case let .PeerId(rhsId): + return lhsId == rhsId + default: + return false + } + } +} + +private enum ChatListControllerEntry: Comparable, Identifiable { + case SearchEntry + case MessageEntry(Message, Int) + case HoleEntry(ChatListHole) + case Nothing(MessageIndex) + + var index: MessageIndex { + switch self { + case .SearchEntry: + return MessageIndex.absoluteUpperBound() + case let .MessageEntry(message, _): + return MessageIndex(message) + case let .HoleEntry(hole): + return hole.index + case let .Nothing(index): + return index + } + } + + var stableId: ChatListControllerEntryId { + switch self { + case .SearchEntry: + return .Search + default: + return .PeerId(self.index.id.peerId.toInt64()) + } + } +} + +private func <(lhs: ChatListControllerEntry, rhs: ChatListControllerEntry) -> Bool { + return lhs.index < rhs.index +} + +private func ==(lhs: ChatListControllerEntry, rhs: ChatListControllerEntry) -> Bool { + switch lhs { + case .SearchEntry: + switch rhs { + case .SearchEntry: + return true + default: + return false + } + case let .MessageEntry(lhsMessage, lhsUnreadCount): + switch rhs { + case let .MessageEntry(rhsMessage, rhsUnreadCount): + return lhsMessage.id == rhsMessage.id && lhsMessage.flags == rhsMessage.flags && lhsUnreadCount == rhsUnreadCount + default: + break + } + case let .HoleEntry(lhsHole): + switch rhs { + case let .HoleEntry(rhsHole): + return lhsHole == rhsHole + default: + return false + } + case let .Nothing(lhsIndex): + switch rhs { + case let .Nothing(rhsIndex): + return lhsIndex == rhsIndex + default: + return false + } + } + return false +} + +extension ChatListEntry: Identifiable { + public var stableId: Int64 { + return self.index.id.peerId.toInt64() + } +} + +public class ChatListController: ViewController { + let account: Account + + private var chatListViewAndEntries: (ChatListView, [ChatListControllerEntry])? + + var chatListPosition: ChatListMessageViewPosition? + let chatListDisposable: MetaDisposable = MetaDisposable() + + let messageViewQueue = Queue() + let messageViewTransactionQueue = ListViewTransactionQueue() + var settingView = false + + let openMessageFromSearchDisposable: MetaDisposable = MetaDisposable() + + var chatListDisplayNode: ChatListControllerNode { + get { + return super.displayNode as! ChatListControllerNode + } + } + + public init(account: Account) { + self.account = account + + super.init() + + self.title = "Chats" + self.tabBarItem.title = "Chats" + self.tabBarItem.image = UIImage(bundleImageName: "Chat List/Tabs/IconChats") + self.tabBarItem.selectedImage = UIImage(bundleImageName: "Chat List/Tabs/IconChatsSelected") + + self.navigationItem.leftBarButtonItem = UIBarButtonItem(title: "Edit", style: .plain, target: self, action: #selector(self.editPressed)) + //self.navigationItem.rightBarButtonItem = UIBarButtonItem(barButtonSystemItem: .Compose, target: self, action: Selector("composePressed")) + + self.scrollToTop = { [weak self] in + if let strongSelf = self { + if let (view, _) = strongSelf.chatListViewAndEntries, view.laterIndex == nil { + strongSelf.chatListDisplayNode.listView.deleteAndInsertItems(deleteIndices: [], insertIndicesAndItems: [], updateIndicesAndItems: [], options: [.Synchronous], scrollToItem: ListViewScrollToItem(index: 0, position: .Top, animated: true, curve: .Default, directionHint: .Up), updateSizeAndInsets: nil, stationaryItemRange: nil, completion: { _ in }) + } else { + strongSelf.setMessageViewPosition(.Around(index: MessageIndex.absoluteUpperBound(), anchorIndex: MessageIndex.absoluteUpperBound(), scrollPosition: .Top), hint: "later", force: true) + } + } + } + + self.setMessageViewPosition(.Tail(count: 50), hint: "initial", force: false) + } + + required public init(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + deinit { + self.chatListDisposable.dispose() + self.openMessageFromSearchDisposable.dispose() + } + + override public func loadDisplayNode() { + self.displayNode = ChatListControllerNode(account: self.account) + + self.chatListDisplayNode.listView.displayedItemRangeChanged = { [weak self] range in + if let strongSelf = self, !strongSelf.settingView { + if let range = range.loadedRange, let (view, _) = strongSelf.chatListViewAndEntries { + if range.firstIndex < 5 && view.laterIndex != nil { + strongSelf.setMessageViewPosition(.Around(index: view.entries[view.entries.count - 1].index, anchorIndex: MessageIndex.absoluteUpperBound(), scrollPosition: nil), hint: "later", force: false) + } else if range.firstIndex >= 5 && range.lastIndex >= view.entries.count - 5 && view.earlierIndex != nil { + strongSelf.setMessageViewPosition(.Around(index: view.entries[0].index, anchorIndex: MessageIndex.absoluteUpperBound(), scrollPosition: nil), hint: "earlier", force: false) + } + } + } + } + + self.chatListDisplayNode.navigationBar = self.navigationBar + + self.chatListDisplayNode.requestDeactivateSearch = { [weak self] in + self?.deactivateSearch() + } + + self.chatListDisplayNode.requestOpenMessageFromSearch = { [weak self] peer, messageId in + if let strongSelf = self { + let storedPeer = strongSelf.account.postbox.modify { modifier -> Void in + if modifier.getPeer(peer.id) == nil { + modifier.updatePeers([peer], update: { previousPeer, updatedPeer in + return updatedPeer + }) + } + } + strongSelf.openMessageFromSearchDisposable.set((storedPeer |> deliverOnMainQueue).start(completed: { [weak strongSelf] in + if let strongSelf = strongSelf { + (strongSelf.navigationController as? NavigationController)?.pushViewController(ChatController(account: strongSelf.account, peerId: messageId.peerId, messageId: messageId)) + } + })) + } + } + + self.chatListDisplayNode.requestOpenPeerFromSearch = { [weak self] peerId in + if let strongSelf = self { + (strongSelf.navigationController as? NavigationController)?.pushViewController(ChatController(account: strongSelf.account, peerId: peerId)) + } + } + + self.displayNodeDidLoad() + } + + private func setMessageViewPosition(_ position: ChatListMessageViewPosition, hint: String, force: Bool) { + if self.chatListPosition == nil || self.chatListPosition! != position || force { + let signal: Signal<(ChatListView, ViewUpdateType), NoError> + self.chatListPosition = position + var scrollPosition: (MessageIndex, ListViewScrollPosition, ListViewScrollToItemDirectionHint)? + switch position { + case let .Tail(count): + signal = self.account.postbox.tailChatListView(count) + case let .Around(index, _, position): + trace("request around \(index.id.id) \(hint)") + signal = self.account.postbox.aroundChatListView(index, count: 80) + if let position = position { + var directionHint: ListViewScrollToItemDirectionHint = .Up + if let visibleItemRange = self.chatListDisplayNode.listView.displayedItemRange.loadedRange, let (_, entries) = self.chatListViewAndEntries { + if visibleItemRange.firstIndex >= 0 && visibleItemRange.firstIndex < entries.count { + if entries[visibleItemRange.firstIndex].index < index { + directionHint = .Up + } else { + directionHint = .Down + } + } + } + scrollPosition = (index, position, directionHint) + } + } + + var firstTime = true + chatListDisposable.set(( + signal |> deliverOnMainQueue + ).start(next: {[weak self] (view, updateType) in + if let strongSelf = self { + let animated: Bool + switch updateType { + case .Generic: + animated = !firstTime + case .FillHole: + animated = false + case .InitialUnread: + animated = false + case .UpdateVisible: + animated = false + } + + strongSelf.setPeerView(view, firstTime: strongSelf.chatListViewAndEntries == nil, scrollPosition: firstTime ?scrollPosition : nil, animated: animated) + firstTime = false + } + })) + } + } + + override public func viewWillAppear(_ animated: Bool) { + super.viewWillAppear(animated) + } + + override public func viewDidDisappear(_ animated: Bool) { + super.viewDidDisappear(animated) + } + + private func chatListControllerEntries(_ view: ChatListView) -> [ChatListControllerEntry] { + var result: [ChatListControllerEntry] = [] + for entry in view.entries { + switch entry { + case let .MessageEntry(message, unreadCount): + result.append(.MessageEntry(message, unreadCount)) + case let .HoleEntry(hole): + result.append(.HoleEntry(hole)) + case let .Nothing(index): + result.append(.Nothing(index)) + } + } + if view.laterIndex == nil { + result.append(.SearchEntry) + } + return result + } + + private func setPeerView(_ view: ChatListView, firstTime: Bool, scrollPosition: (MessageIndex, ListViewScrollPosition, ListViewScrollToItemDirectionHint)?, animated: Bool) { + self.messageViewTransactionQueue.addTransaction { [weak self] completed in + if let strongSelf = self { + strongSelf.settingView = true + let currentEntries = strongSelf.chatListViewAndEntries?.1 ?? [] + let viewEntries = strongSelf.chatListControllerEntries(view) + + strongSelf.messageViewQueue.async { + //let (deleteIndices, indicesAndItems, updateIndices) = mergeListsStableWithUpdates(leftList: currentEntries, rightList: viewEntries) + let (deleteIndices, indicesAndItems) = mergeListsStable(leftList: currentEntries, rightList: viewEntries) + let updateIndices: [(Int, ChatListControllerEntry)] = [] + + Queue.mainQueue().async { + var adjustedDeleteIndices: [ListViewDeleteItem] = [] + let previousCount = currentEntries.count + if deleteIndices.count != 0 { + for index in deleteIndices { + adjustedDeleteIndices.append(ListViewDeleteItem(index: previousCount - 1 - index, directionHint: nil)) + } + } + + let updatedCount = viewEntries.count + + var maxAnimatedInsertionIndex = -1 + if animated { + for (index, _, _) in indicesAndItems.sorted(by: { $0.0 > $1.0 }) { + let adjustedIndex = updatedCount - 1 - index + if adjustedIndex == maxAnimatedInsertionIndex + 1 { + maxAnimatedInsertionIndex += 1 + } + } + } + + var adjustedIndicesAndItems: [ListViewInsertItem] = [] + for (index, entry, previousIndex) in indicesAndItems { + let adjustedIndex = updatedCount - 1 - index + + var adjustedPreviousIndex: Int? + if let previousIndex = previousIndex { + adjustedPreviousIndex = previousCount - 1 - previousIndex + } + + var directionHint: ListViewItemOperationDirectionHint? + if maxAnimatedInsertionIndex >= 0 && adjustedIndex <= maxAnimatedInsertionIndex { + directionHint = .Down + } + + switch entry { + case .SearchEntry: + adjustedIndicesAndItems.append(ListViewInsertItem(index: updatedCount - 1 - index, previousIndex: adjustedPreviousIndex, item: ChatListSearchItem(placeholder: "Search for messages or users", activate: { [weak self] in + self?.activateSearch() + }), directionHint: directionHint)) + case let .MessageEntry(message, unreadCount): + adjustedIndicesAndItems.append(ListViewInsertItem(index: adjustedIndex, previousIndex: adjustedPreviousIndex, item: ChatListItem(account: strongSelf.account, message: message, unreadCount: unreadCount, action: { [weak self] message in + if let strongSelf = self { + strongSelf.entrySelected(entry) + strongSelf.chatListDisplayNode.listView.clearHighlightAnimated(true) + } + }), directionHint: directionHint)) + case .HoleEntry: + adjustedIndicesAndItems.append(ListViewInsertItem(index: updatedCount - 1 - index, previousIndex: adjustedPreviousIndex, item: ChatListHoleItem(), directionHint: directionHint)) + case .Nothing: + adjustedIndicesAndItems.append(ListViewInsertItem(index: updatedCount - 1 - index, previousIndex: adjustedPreviousIndex, item: ChatListEmptyItem(), directionHint: directionHint)) + } + } + + var adjustedUpdateItems: [ListViewUpdateItem] = [] + for (index, entry) in updateIndices { + let adjustedIndex = updatedCount - 1 - index + + let directionHint: ListViewItemOperationDirectionHint? = nil + + switch entry { + case .SearchEntry: + adjustedUpdateItems.append(ListViewUpdateItem(index: updatedCount - 1 - index, item: ChatListSearchItem(placeholder: "Search for messages or users", activate: { [weak self] in + self?.activateSearch() + }), directionHint: directionHint)) + case let .MessageEntry(message, unreadCount): + adjustedUpdateItems.append(ListViewUpdateItem(index: adjustedIndex, item: ChatListItem(account: strongSelf.account, message: message, unreadCount: unreadCount, action: { [weak self] message in + if let strongSelf = self { + strongSelf.entrySelected(entry) + strongSelf.chatListDisplayNode.listView.clearHighlightAnimated(true) + } + }), directionHint: directionHint)) + case .HoleEntry: + adjustedUpdateItems.append(ListViewUpdateItem(index: updatedCount - 1 - index, item: ChatListHoleItem(), directionHint: directionHint)) + case .Nothing: + adjustedUpdateItems.append(ListViewUpdateItem(index: updatedCount - 1 - index, item: ChatListEmptyItem(), directionHint: directionHint)) + } + } + + if !adjustedDeleteIndices.isEmpty || !adjustedIndicesAndItems.isEmpty || !adjustedUpdateItems.isEmpty || scrollPosition != nil { + var options: ListViewDeleteAndInsertOptions = [] + if firstTime { + } else { + let _ = options.insert(.AnimateAlpha) + + if animated { + let _ = options.insert(.AnimateInsertion) + } + } + + var scrollToItem: ListViewScrollToItem? + if let (itemIndex, itemPosition, directionHint) = scrollPosition { + var index = viewEntries.count - 1 + for entry in viewEntries { + if entry.index >= itemIndex { + scrollToItem = ListViewScrollToItem(index: index, position: itemPosition, animated: true, curve: .Default, directionHint: directionHint) + break + } + index -= 1 + } + + if scrollToItem == nil { + var index = 0 + for entry in viewEntries.reversed() { + if entry.index < itemIndex { + scrollToItem = ListViewScrollToItem(index: index, position: itemPosition, animated: true, curve: .Default, directionHint: directionHint) + break + } + index += 1 + } + } + } + + strongSelf.chatListDisplayNode.listView.deleteAndInsertItems(deleteIndices: adjustedDeleteIndices, insertIndicesAndItems: adjustedIndicesAndItems, updateIndicesAndItems: adjustedUpdateItems, options: options, scrollToItem: scrollToItem, completion: { [weak self] _ in + if let strongSelf = self { + strongSelf.ready.set(single(true, NoError.self)) + strongSelf.settingView = false + completed() + } + }) + } else { + strongSelf.ready.set(single(true, NoError.self)) + strongSelf.settingView = false + completed() + } + + strongSelf.chatListViewAndEntries = (view, viewEntries) + } + } + } else { + completed() + } + } + } + + private func entrySelected(_ entry: ChatListControllerEntry) { + if case let .MessageEntry(message, _) = entry { + (self.navigationController as? NavigationController)?.pushViewController(ChatController(account: self.account, peerId: message.id.peerId)) + } + } + + override public func containerLayoutUpdated(_ layout: ContainerViewLayout, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, transition: transition) + + self.chatListDisplayNode.containerLayoutUpdated(layout, navigationBarHeight: self.navigationBar.frame.maxY, transition: transition) + } + + @objc func editPressed() { + + } + + private func activateSearch() { + if self.displayNavigationBar { + if let scrollToTop = self.scrollToTop { + scrollToTop() + } + self.chatListDisplayNode.activateSearch() + self.setDisplayNavigationBar(false, transition: .animated(duration: 0.5, curve: .spring)) + } + } + + private func deactivateSearch() { + if !self.displayNavigationBar { + self.chatListDisplayNode.deactivateSearch() + self.setDisplayNavigationBar(true, transition: .animated(duration: 0.5, curve: .spring)) + } + } +} + diff --git a/TelegramUI/ChatListControllerNode.swift b/TelegramUI/ChatListControllerNode.swift new file mode 100644 index 0000000000..99537fdfa3 --- /dev/null +++ b/TelegramUI/ChatListControllerNode.swift @@ -0,0 +1,125 @@ +import Foundation +import AsyncDisplayKit +import Display +import Postbox +import TelegramCore + +class ChatListControllerNode: ASDisplayNode { + private let account: Account + + let listView: ListView + var navigationBar: NavigationBar? + + private var searchDisplayController: SearchDisplayController? + + private var containerLayout: (ContainerViewLayout, CGFloat)? + + var requestDeactivateSearch: (() -> Void)? + var requestOpenPeerFromSearch: ((PeerId) -> Void)? + var requestOpenMessageFromSearch: ((Peer, MessageId) -> Void)? + + init(account: Account) { + self.account = account + self.listView = ListView() + + super.init(viewBlock: { + return UITracingLayerView() + }, didLoad: nil) + + self.addSubnode(self.listView) + } + + func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + self.containerLayout = (layout, navigationBarHeight) + + var insets = layout.insets(options: [.input]) + insets.top += max(navigationBarHeight, layout.insets(options: [.statusBar]).top) + + self.listView.bounds = CGRect(x: 0.0, y: 0.0, width: layout.size.width, height: layout.size.height) + self.listView.position = CGPoint(x: layout.size.width / 2.0, y: layout.size.height / 2.0) + + var duration: Double = 0.0 + var curve: UInt = 0 + switch transition { + case .immediate: + break + case let .animated(animationDuration, animationCurve): + duration = animationDuration + switch animationCurve { + case .easeInOut: + break + case .spring: + curve = 7 + } + } + + let listViewCurve: ListViewAnimationCurve + var speedFactor: CGFloat = 1.0 + if curve == 7 { + speedFactor = CGFloat(duration) / 0.5 + listViewCurve = .Spring(speed: CGFloat(speedFactor)) + } else { + listViewCurve = .Default + } + + let updateSizeAndInsets = ListViewUpdateSizeAndInsets(size: layout.size, insets: insets, duration: duration, curve: listViewCurve) + + self.listView.deleteAndInsertItems(deleteIndices: [], insertIndicesAndItems: [], updateIndicesAndItems: [], options: [.Synchronous, .LowLatency], scrollToItem: nil, updateSizeAndInsets: updateSizeAndInsets, stationaryItemRange: nil, completion: { _ in }) + + if let searchDisplayController = self.searchDisplayController { + searchDisplayController.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: transition) + } + } + + func activateSearch() { + guard let (containerLayout, navigationBarHeight) = self.containerLayout, let navigationBar = self.navigationBar else { + return + } + + var maybePlaceholderNode: SearchBarPlaceholderNode? + self.listView.forEachItemNode { node in + if let node = node as? ChatListSearchItemNode { + maybePlaceholderNode = node.searchBarNode + } + } + + if let _ = self.searchDisplayController { + return + } + + if let placeholderNode = maybePlaceholderNode { + self.searchDisplayController = SearchDisplayController(contentNode: ChatListSearchContainerNode(account: self.account, openPeer: { [weak self] peerId in + if let requestOpenPeerFromSearch = self?.requestOpenPeerFromSearch { + requestOpenPeerFromSearch(peerId) + } + }, openMessage: { [weak self] peer, messageId in + if let requestOpenMessageFromSearch = self?.requestOpenMessageFromSearch { + requestOpenMessageFromSearch(peer, messageId) + } + }), cancel: { [weak self] in + if let requestDeactivateSearch = self?.requestDeactivateSearch { + requestDeactivateSearch() + } + }) + + self.searchDisplayController?.containerLayoutUpdated(containerLayout, navigationBarHeight: navigationBarHeight, transition: .immediate) + self.searchDisplayController?.activate(insertSubnode: { subnode in + self.insertSubnode(subnode, belowSubnode: navigationBar) + }, placeholder: placeholderNode) + } + } + + func deactivateSearch() { + if let searchDisplayController = self.searchDisplayController { + var maybePlaceholderNode: SearchBarPlaceholderNode? + self.listView.forEachItemNode { node in + if let node = node as? ChatListSearchItemNode { + maybePlaceholderNode = node.searchBarNode + } + } + + searchDisplayController.deactivate(placeholder: maybePlaceholderNode) + self.searchDisplayController = nil + } + } +} diff --git a/TelegramUI/ChatListEmptyItem.swift b/TelegramUI/ChatListEmptyItem.swift new file mode 100644 index 0000000000..47c1afe52e --- /dev/null +++ b/TelegramUI/ChatListEmptyItem.swift @@ -0,0 +1,47 @@ +import Foundation +import UIKit +import AsyncDisplayKit +import Postbox +import Display + +class ChatListEmptyItem: ListViewItem { + let selectable: Bool = false + + init() { + } + + func nodeConfiguredForWidth(async: @escaping (@escaping () -> Void) -> Void, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, completion: @escaping (ListViewItemNode, @escaping () -> Void) -> Void) { + async { + let node = ChatListEmptyItemNode() + node.layoutForWidth(width, item: self, previousItem: previousItem, nextItem: nextItem) + node.updateItemPosition(first: previousItem == nil, last: nextItem == nil) + completion(node, {}) + } + } +} + +private let separatorHeight = 1.0 / UIScreen.main.scale + +class ChatListEmptyItemNode: ListViewItemNode { + let separatorNode: ASDisplayNode + + required init() { + self.separatorNode = ASDisplayNode() + self.separatorNode.backgroundColor = UIColor(0xc8c7cc) + self.separatorNode.isLayerBacked = true + + super.init(layerBacked: false, dynamicBounce: false) + + self.addSubnode(self.separatorNode) + } + + override func layoutForWidth(_ width: CGFloat, item: ListViewItem, previousItem: ListViewItem?, nextItem: ListViewItem?) { + self.separatorNode.frame = CGRect(origin: CGPoint(x: 0.0, y: 68.0 - separatorHeight), size: CGSize(width: width, height: separatorHeight)) + + self.contentSize = CGSize(width: width, height: 68.0) + } + + func updateItemPosition(first: Bool, last: Bool) { + self.insets = UIEdgeInsets(top: first ? 4.0 : 0.0, left: 0.0, bottom: 0.0, right: 0.0) + } +} diff --git a/TelegramUI/ChatListHoleItem.swift b/TelegramUI/ChatListHoleItem.swift new file mode 100644 index 0000000000..5ad19e50fa --- /dev/null +++ b/TelegramUI/ChatListHoleItem.swift @@ -0,0 +1,105 @@ +import Foundation +import UIKit +import AsyncDisplayKit +import Postbox +import Display +import SwiftSignalKit + +private let titleFont = Font.regular(17.0) + +class ChatListHoleItem: ListViewItem { + let selectable: Bool = false + + init() { + } + + func nodeConfiguredForWidth(async: @escaping (@escaping () -> Void) -> Void, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, completion: @escaping (ListViewItemNode, @escaping () -> Void) -> Void) { + async { + let node = ChatListHoleItemNode() + node.relativePosition = (first: previousItem == nil, last: nextItem == nil) + node.insets = ChatListItemNode.insets(first: node.relativePosition.first, last: node.relativePosition.last) + node.layoutForWidth(width, item: self, previousItem: previousItem, nextItem: nextItem) + completion(node, {}) + } + } + + func updateNode(async: @escaping (@escaping () -> Void) -> Void, node: ListViewItemNode, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, animation: ListViewItemUpdateAnimation, completion: @escaping (ListViewItemNodeLayout, @escaping () -> Void) -> Void) { + if let node = node as? ChatListHoleItemNode { + Queue.mainQueue().async { + let layout = node.asyncLayout() + async { + let first = previousItem == nil + let last = nextItem == nil + + let (nodeLayout, apply) = layout(width, first, last) + Queue.mainQueue().async { + completion(nodeLayout, { [weak node] in + apply() + node?.updateBackgroundAndSeparatorsLayout() + }) + } + } + } + } + } +} + +private let separatorHeight = 1.0 / UIScreen.main.scale + +class ChatListHoleItemNode: ListViewItemNode { + let separatorNode: ASDisplayNode + let labelNode: TextNode + + var relativePosition: (first: Bool, last: Bool) = (false, false) + + required init() { + self.separatorNode = ASDisplayNode() + self.separatorNode.backgroundColor = UIColor(0xc8c7cc) + self.separatorNode.isLayerBacked = true + + self.labelNode = TextNode() + + super.init(layerBacked: false, dynamicBounce: false) + + self.addSubnode(self.separatorNode) + self.addSubnode(self.labelNode) + } + + override func layoutForWidth(_ width: CGFloat, item: ListViewItem, previousItem: ListViewItem?, nextItem: ListViewItem?) { + let layout = self.asyncLayout() + let (_, apply) = layout(width, self.relativePosition.first, self.relativePosition.last) + apply() + } + + func asyncLayout() -> (_ width: CGFloat, _ first: Bool, _ last: Bool) -> (ListViewItemNodeLayout, () -> Void) { + let labelNodeLayout = TextNode.asyncLayout(self.labelNode) + + return { width, first, last in + let (labelLayout, labelApply) = labelNodeLayout(NSAttributedString(string: "Loading", font: titleFont, textColor: UIColor(0xc8c7cc)), nil, 1, .end, CGSize(width: width, height: CGFloat.greatestFiniteMagnitude), nil) + + let insets = ChatListItemNode.insets(first: first, last: last) + let layout = ListViewItemNodeLayout(contentSize: CGSize(width: width, height: 68.0), insets: insets) + + return (layout, { [weak self] in + if let strongSelf = self { + strongSelf.relativePosition = (first, last) + + let _ = labelApply() + + strongSelf.labelNode.frame = CGRect(origin: CGPoint(x: floor((width - labelLayout.size.width) / 2.0), y: floor((layout.contentSize.height - labelLayout.size.height) / 2.0)), size: labelLayout.size) + + strongSelf.separatorNode.frame = CGRect(origin: CGPoint(x: 80.0, y: 68.0 - separatorHeight), size: CGSize(width: width - 78.0, height: separatorHeight)) + + strongSelf.contentSize = layout.contentSize + strongSelf.insets = layout.insets + strongSelf.updateBackgroundAndSeparatorsLayout() + } + }) + } + } + + func updateBackgroundAndSeparatorsLayout() { + //let size = self.bounds.size + //let insets = self.insets + } +} diff --git a/TelegramUI/ChatListItem.swift b/TelegramUI/ChatListItem.swift new file mode 100644 index 0000000000..69f4fc46a5 --- /dev/null +++ b/TelegramUI/ChatListItem.swift @@ -0,0 +1,409 @@ +import Foundation +import UIKit +import AsyncDisplayKit +import Postbox +import Display +import SwiftSignalKit +import TelegramCore + +class ChatListItem: ListViewItem { + let account: Account + let message: Message + let unreadCount: Int + let action: (Message) -> Void + + let selectable: Bool = true + + init(account: Account, message: Message, unreadCount: Int, action: @escaping (Message) -> Void) { + self.account = account + self.message = message + self.unreadCount = unreadCount + self.action = action + } + + func nodeConfiguredForWidth(async: @escaping (@escaping () -> Void) -> Void, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, completion: @escaping (ListViewItemNode, @escaping () -> Void) -> Void) { + async { + let node = ChatListItemNode() + node.setupItem(account: self.account, message: self.message, unreadCount: self.unreadCount) + node.relativePosition = (first: previousItem == nil, last: nextItem == nil) + node.insets = ChatListItemNode.insets(first: node.relativePosition.first, last: node.relativePosition.last) + node.layoutForWidth(width, item: self, previousItem: previousItem, nextItem: nextItem) + completion(node, {}) + } + } + + func updateNode(async: @escaping (@escaping () -> Void) -> Void, node: ListViewItemNode, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, animation: ListViewItemUpdateAnimation, completion: @escaping (ListViewItemNodeLayout, @escaping () -> Void) -> Void) { + if let node = node as? ChatListItemNode { + Queue.mainQueue().async { + node.setupItem(account: self.account, message: self.message, unreadCount: self.unreadCount) + let layout = node.asyncLayout() + async { + let first = previousItem == nil + let last = nextItem == nil + + let (nodeLayout, apply) = layout(self.account, width, first, last) + Queue.mainQueue().async { + completion(nodeLayout, { [weak node] in + apply() + node?.updateBackgroundAndSeparatorsLayout() + }) + } + } + } + } + } + + func selected() { + self.action(self.message) + } +} + +private let titleFont = Font.medium(17.0) +private let textFont = Font.regular(15.0) +private let dateFont = Font.regular(floorToScreenPixels(14.0)) +private let badgeFont = Font.regular(14.0) + +private func generateStatusCheckImage(single: Bool) -> UIImage? { + return generateImage(CGSize(width: single ? 13.0 : 18.0, height: 13.0), contextGenerator: { size, context in + context.clear(CGRect(origin: CGPoint(), size: size)) + + context.translateBy(x: size.width / 2.0, y: size.height / 2.0) + context.scaleBy(x: 1.0, y: -1.0) + context.translateBy(x: -size.width / 2.0 + 1.0, y: -size.height / 2.0 + 1.0) + + //CGContextSetFillColorWithColor(context, UIColor.lightGrayColor().CGColor) + //CGContextFillRect(context, CGRect(origin: CGPoint(), size: size)) + + context.scaleBy(x: 0.5, y: 0.5) + context.setStrokeColor(UIColor(0x19C700).cgColor) + context.setLineWidth(2.8) + if single { + let _ = try? drawSvgPath(context, path: "M0,12 L6.75230742,19.080349 L22.4821014,0.277229071 ") + } else { + let _ = try? drawSvgPath(context, path: "M0,12 L6.75230742,19.080349 L22.4821014,0.277229071 ") + let _ = try? drawSvgPath(context, path: "M13.4492402,16.500967 L15.7523074,18.8031199 L31.4821014,0 ") + } + context.strokePath() + }) +} + +private func generateBadgeBackgroundImage() -> UIImage? { + return generateImage(CGSize(width: 20.0, height: 20.0), contextGenerator: { size, context in + context.clear(CGRect(origin: CGPoint(), size: size)) + context.setFillColor(UIColor(0x1195f2).cgColor) + context.fillEllipse(in: CGRect(origin: CGPoint(), size: size)) + })?.stretchableImage(withLeftCapWidth: 10, topCapHeight: 10) +} + +private let statusSingleCheckImage = generateStatusCheckImage(single: true) +private let statusDoubleCheckImage = generateStatusCheckImage(single: false) +private let badgeBackgroundImage = generateBadgeBackgroundImage() + +private let separatorHeight = 1.0 / UIScreen.main.scale + +class ChatListItemNode: ListViewItemNode { + var account: Account? + var message: Message? + var unreadCount: Int = 0 + + private let highlightedBackgroundNode: ASDisplayNode + + let avatarNode: ChatListAvatarNode + let contentNode: ASDisplayNode + let titleNode: TextNode + let textNode: TextNode + let dateNode: TextNode + let statusNode: ASImageNode + let separatorNode: ASDisplayNode + let badgeBackgroundNode: ASImageNode + let badgeTextNode: TextNode + + var relativePosition: (first: Bool, last: Bool) = (false, false) + + required init() { + self.avatarNode = ChatListAvatarNode(font: Font.regular(24.0)) + self.avatarNode.isLayerBacked = true + + self.highlightedBackgroundNode = ASDisplayNode() + self.highlightedBackgroundNode.backgroundColor = UIColor(0xd9d9d9) + self.highlightedBackgroundNode.isLayerBacked = true + + self.contentNode = ASDisplayNode() + self.contentNode.isLayerBacked = true + self.contentNode.displaysAsynchronously = true + self.contentNode.shouldRasterizeDescendants = true + self.contentNode.isOpaque = true + self.contentNode.backgroundColor = UIColor.white + self.contentNode.contentMode = .left + self.contentNode.contentsScale = UIScreenScale + + self.titleNode = TextNode() + self.titleNode.isLayerBacked = true + self.titleNode.displaysAsynchronously = true + + self.textNode = TextNode() + self.textNode.isLayerBacked = true + self.textNode.displaysAsynchronously = true + + self.dateNode = TextNode() + self.dateNode.isLayerBacked = true + self.dateNode.displaysAsynchronously = true + + self.statusNode = ASImageNode() + self.statusNode.isLayerBacked = true + self.statusNode.displaysAsynchronously = false + self.statusNode.displayWithoutProcessing = true + + self.badgeBackgroundNode = ASImageNode() + self.badgeBackgroundNode.isLayerBacked = true + self.badgeBackgroundNode.displaysAsynchronously = false + self.badgeBackgroundNode.displayWithoutProcessing = true + + self.badgeTextNode = TextNode() + self.badgeTextNode.isLayerBacked = true + self.badgeTextNode.displaysAsynchronously = true + + self.separatorNode = ASDisplayNode() + self.separatorNode.backgroundColor = UIColor(0xc8c7cc) + self.separatorNode.isLayerBacked = true + + super.init(layerBacked: false, dynamicBounce: false) + + self.addSubnode(self.separatorNode) + self.addSubnode(self.avatarNode) + self.addSubnode(self.contentNode) + + self.contentNode.addSubnode(self.titleNode) + self.contentNode.addSubnode(self.textNode) + self.contentNode.addSubnode(self.dateNode) + self.contentNode.addSubnode(self.statusNode) + self.contentNode.addSubnode(self.badgeBackgroundNode) + self.contentNode.addSubnode(self.badgeTextNode) + } + + func setupItem(account: Account, message: Message, unreadCount: Int) { + self.account = account + self.message = message + self.unreadCount = unreadCount + + let peer = message.peers[message.id.peerId] + if let peer = peer { + self.avatarNode.setPeer(account: account, peer: peer) + } + } + + override func layoutForWidth(_ width: CGFloat, item: ListViewItem, previousItem: ListViewItem?, nextItem: ListViewItem?) { + let layout = self.asyncLayout() + let (_, apply) = layout(self.account, width, self.relativePosition.first, self.relativePosition.last) + apply() + } + + func updateBackgroundAndSeparatorsLayout() { + let size = self.bounds.size + let insets = self.insets + + self.highlightedBackgroundNode.frame = CGRect(origin: CGPoint(x: 0.0, y: -insets.top - separatorHeight), size: CGSize(width: size.width, height: size.height + separatorHeight)) + } + + class func insets(first: Bool, last: Bool) -> UIEdgeInsets { + return UIEdgeInsets(top: first ? 4.0 : 0.0, left: 0.0, bottom: 0.0, right: 0.0) + } + + override func setHighlighted(_ highlighted: Bool, animated: Bool) { + super.setHighlighted(highlighted, animated: animated) + + if highlighted { + self.contentNode.displaysAsynchronously = false + self.contentNode.backgroundColor = UIColor.clear + self.contentNode.isOpaque = false + + self.highlightedBackgroundNode.alpha = 1.0 + if self.highlightedBackgroundNode.supernode == nil { + self.insertSubnode(self.highlightedBackgroundNode, aboveSubnode: self.separatorNode) + } + } else { + if self.highlightedBackgroundNode.supernode != nil { + if animated { + self.highlightedBackgroundNode.layer.animateAlpha(from: self.highlightedBackgroundNode.alpha, to: 0.0, duration: 0.4, completion: { [weak self] completed in + if let strongSelf = self { + if completed { + strongSelf.highlightedBackgroundNode.removeFromSupernode() + strongSelf.contentNode.backgroundColor = UIColor.white + strongSelf.contentNode.isOpaque = true + strongSelf.contentNode.displaysAsynchronously = true + } + } + }) + self.highlightedBackgroundNode.alpha = 0.0 + } else { + self.highlightedBackgroundNode.removeFromSupernode() + self.contentNode.backgroundColor = UIColor.white + self.contentNode.isOpaque = true + self.contentNode.displaysAsynchronously = true + } + } + } + } + + func asyncLayout() -> (_ account: Account?, _ width: CGFloat, _ first: Bool, _ last: Bool) -> (ListViewItemNodeLayout, () -> Void) { + let dateLayout = TextNode.asyncLayout(self.dateNode) + let textLayout = TextNode.asyncLayout(self.textNode) + let titleLayout = TextNode.asyncLayout(self.titleNode) + let badgeTextLayout = TextNode.asyncLayout(self.badgeTextNode) + + let message = self.message + let unreadCount = self.unreadCount + + return { account, width, first, last in + var textAttributedString: NSAttributedString? + var dateAttributedString: NSAttributedString? + var titleAttributedString: NSAttributedString? + var badgeAttributedString: NSAttributedString? + + var statusImage: UIImage? + var currentBadgeBackgroundImage: UIImage? + + if let message = message { + let peer = message.peers[message.id.peerId] + + var messageText: NSString = message.text as NSString + if message.text.isEmpty { + for media in message.media { + switch media { + case _ as TelegramMediaImage: + messageText = "Photo" + case let fileMedia as TelegramMediaFile: + if fileMedia.isSticker { + messageText = "Sticker" + } else { + messageText = "File" + } + case _ as TelegramMediaMap: + messageText = "Map" + case _ as TelegramMediaContact: + messageText = "Contact" + default: + break + } + } + } + + let attributedText: NSAttributedString + if let author = message.author as? TelegramUser, let peer = peer, peer as? TelegramUser == nil { + let peerText: NSString = (author.id == account?.peerId ? "You: " : author.compactDisplayTitle + ": ") as NSString + + let mutableAttributedText = NSMutableAttributedString(string: peerText.appending(messageText as String), attributes: [kCTFontAttributeName as String: textFont]) + mutableAttributedText.addAttribute(kCTForegroundColorAttributeName as String, value: UIColor.black.cgColor, range: NSMakeRange(0, peerText.length)) + mutableAttributedText.addAttribute(kCTForegroundColorAttributeName as String, value: UIColor(0x8e8e93).cgColor, range: NSMakeRange(peerText.length, messageText.length)) + attributedText = mutableAttributedText; + } else { + attributedText = NSAttributedString(string: messageText as String, font: textFont, textColor: UIColor(0x8e8e93)) + } + + if let displayTitle = peer?.displayTitle { + titleAttributedString = NSAttributedString(string: displayTitle, font: titleFont, textColor: UIColor.black) + } + + textAttributedString = attributedText + + var t = Int(message.timestamp) + var timeinfo = tm() + localtime_r(&t, &timeinfo) + + let dateText = String(format: "%02d:%02d", arguments: [Int(timeinfo.tm_hour), Int(timeinfo.tm_min)]) + + dateAttributedString = NSAttributedString(string: dateText, font: dateFont, textColor: UIColor(0x8e8e93)) + + if message.author?.id == account?.peerId { + if !message.flags.contains(.Unsent) && !message.flags.contains(.Failed) { + statusImage = statusDoubleCheckImage + } + } + + if unreadCount != 0 { + currentBadgeBackgroundImage = badgeBackgroundImage + badgeAttributedString = NSAttributedString(string: "\(unreadCount)", font: badgeFont, textColor: UIColor.white) + } + } + + let statusWidth = statusImage?.size.width ?? 0.0 + + let contentRect = CGRect(origin: CGPoint(x: 2.0, y: 12.0), size: CGSize(width: width - 78.0 - 10.0 - 1.0, height: 68.0 - 12.0 - 9.0)) + + let (dateLayout, dateApply) = dateLayout(dateAttributedString, nil, 1, .end, CGSize(width: contentRect.width, height: CGFloat.greatestFiniteMagnitude), nil) + + let (badgeLayout, badgeApply) = badgeTextLayout(badgeAttributedString, nil, 1, .end, CGSize(width: 50.0, height: CGFloat.greatestFiniteMagnitude), nil) + + let badgeSize: CGFloat + if let currentBadgeBackgroundImage = currentBadgeBackgroundImage { + badgeSize = max(currentBadgeBackgroundImage.size.width, badgeLayout.size.width + 10.0) + 2.0 + } else { + badgeSize = 0.0 + } + + let (textLayout, textApply) = textLayout(textAttributedString, nil, 1, .end, CGSize(width: contentRect.width - badgeSize, height: CGFloat.greatestFiniteMagnitude), nil) + + let titleRect = CGRect(origin: contentRect.origin, size: CGSize(width: contentRect.width - dateLayout.size.width - 10.0 - statusWidth, height: contentRect.height)) + let (titleLayout, titleApply) = titleLayout(titleAttributedString, nil, 1, .end, CGSize(width: titleRect.width, height: CGFloat.greatestFiniteMagnitude), nil) + + let insets = ChatListItemNode.insets(first: first, last: last) + let layout = ListViewItemNodeLayout(contentSize: CGSize(width: width, height: 68.0), insets: insets) + + return (layout, { [weak self] in + if let strongSelf = self { + strongSelf.relativePosition = (first, last) + + strongSelf.avatarNode.frame = CGRect(origin: CGPoint(x: 10.0, y: 4.0), size: CGSize(width: 60.0, height: 60.0)) + strongSelf.contentNode.frame = CGRect(origin: CGPoint(x: 78.0, y: 0.0), size: CGSize(width: width - 78.0, height: 60.0)) + + let _ = dateApply() + let _ = textApply() + let _ = titleApply() + let _ = badgeApply() + + strongSelf.dateNode.frame = CGRect(origin: CGPoint(x: contentRect.size.width - dateLayout.size.width, y: contentRect.origin.y + 2.0), size: dateLayout.size) + + if let statusImage = statusImage { + strongSelf.statusNode.image = statusImage + strongSelf.statusNode.isHidden = false + let statusSize = statusImage.size + strongSelf.statusNode.frame = CGRect(origin: CGPoint(x: contentRect.size.width - dateLayout.size.width - 2.0 - statusSize.width, y: contentRect.origin.y + 5.0), size: statusSize) + } else { + strongSelf.statusNode.image = nil + strongSelf.statusNode.isHidden = true + } + + if let currentBadgeBackgroundImage = currentBadgeBackgroundImage { + strongSelf.badgeBackgroundNode.image = currentBadgeBackgroundImage + strongSelf.badgeBackgroundNode.isHidden = false + + let badgeBackgroundWidth = max(badgeLayout.size.width + 10.0, currentBadgeBackgroundImage.size.width) + let badgeBackgroundFrame = CGRect(x: contentRect.maxX - badgeBackgroundWidth, y: contentRect.maxY - currentBadgeBackgroundImage.size.height - 2.0, width: badgeBackgroundWidth, height: currentBadgeBackgroundImage.size.height) + let badgeTextFrame = CGRect(origin: CGPoint(x: badgeBackgroundFrame.midX - badgeLayout.size.width / 2.0, y: badgeBackgroundFrame.minY + 1.0), size: badgeLayout.size) + + strongSelf.badgeTextNode.frame = badgeTextFrame + strongSelf.badgeBackgroundNode.frame = badgeBackgroundFrame + } else { + strongSelf.badgeBackgroundNode.image = nil + strongSelf.badgeBackgroundNode.isHidden = true + } + + strongSelf.titleNode.frame = CGRect(origin: CGPoint(x: contentRect.origin.x, y: contentRect.origin.y), size: titleLayout.size) + + strongSelf.textNode.frame = CGRect(origin: CGPoint(x: contentRect.origin.x, y: contentRect.maxY - textLayout.size.height - 1.0), size: textLayout.size) + + strongSelf.separatorNode.frame = CGRect(origin: CGPoint(x: 78.0 + contentRect.origin.x, y: 68.0 - separatorHeight), size: CGSize(width: width - 78.0, height: separatorHeight)) + + strongSelf.contentSize = layout.contentSize + strongSelf.insets = layout.insets + strongSelf.updateBackgroundAndSeparatorsLayout() + } + }) + } + } + + override func animateInsertion(_ currentTimestamp: Double, duration: Double) { + self.contentNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: duration * 0.5) + self.avatarNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: duration * 0.5) + } +} diff --git a/TelegramUI/ChatListSearchContainerNode.swift b/TelegramUI/ChatListSearchContainerNode.swift new file mode 100644 index 0000000000..86a15ab596 --- /dev/null +++ b/TelegramUI/ChatListSearchContainerNode.swift @@ -0,0 +1,126 @@ +import Foundation +import AsyncDisplayKit +import Display +import SwiftSignalKit +import Postbox +import TelegramCore + +private enum ChatListSearchEntry { + case message(Message) +} + +final class ChatListSearchContainerNode: SearchDisplayControllerContentNode { + private let account: Account + private let openMessage: (Peer, MessageId) -> Void + + private let recentPeersNode: ChatListSearchRecentPeersNode + private let listNode: ListView + + private let searchQuery = Promise() + private let searchDisposable = MetaDisposable() + + init(account: Account, openPeer: @escaping (PeerId) -> Void, openMessage: @escaping (Peer, MessageId) -> Void) { + self.account = account + self.openMessage = openMessage + + self.recentPeersNode = ChatListSearchRecentPeersNode(account: account, peerSelected: openPeer) + self.listNode = ListView() + + super.init() + + self.backgroundColor = UIColor.white + self.addSubnode(self.recentPeersNode) + self.addSubnode(self.listNode) + + self.listNode.isHidden = true + + let searchItems = searchQuery.get() + |> mapToSignal { query -> Signal<[ChatListSearchEntry], NoError> in + if let query = query, !query.isEmpty { + return searchMessages(account: account, query: query) + |> delay(0.2, queue: Queue.concurrentDefaultQueue()) + |> map { messages -> [ChatListSearchEntry] in + return messages.map({ .message($0) }) + } + } else { + return .single([]) + } + } + + let previousSearchItems = Atomic<[ChatListSearchEntry]>(value: []) + + self.searchDisposable.set((searchItems + |> deliverOnMainQueue).start(next: { [weak self] items in + if let strongSelf = self { + let previousItems = previousSearchItems.swap(items) + + var listItems: [ListViewItem] = [] + for item in items { + switch item { + case let .message(message): + listItems.append(ChatListItem(account: account, message: message, unreadCount: 0, action: { [weak strongSelf] _ in + if let strongSelf = strongSelf, let peer = message.peers[message.id.peerId] { + strongSelf.listNode.clearHighlightAnimated(true) + strongSelf.openMessage(peer, message.id) + } + })) + } + } + + strongSelf.listNode.deleteAndInsertItems(deleteIndices: (0 ..< previousItems.count).map({ ListViewDeleteItem(index: $0, directionHint: nil) }), insertIndicesAndItems: (0 ..< listItems.count).map({ ListViewInsertItem(index: $0, previousIndex: nil, item: listItems[$0], directionHint: .Down) }), updateIndicesAndItems: [], options: []) + } + })) + } + + deinit { + self.searchDisposable.dispose() + } + + override func searchTextUpdated(text: String) { + if text.isEmpty { + self.searchQuery.set(.single(nil)) + self.recentPeersNode.isHidden = false + self.listNode.isHidden = true + } else { + self.searchQuery.set(.single(text)) + self.recentPeersNode.isHidden = true + self.listNode.isHidden = false + } + } + + override func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: transition) + + let recentPeersSize = self.recentPeersNode.measure(CGSize(width: layout.size.width, height: CGFloat.infinity)) + self.recentPeersNode.frame = CGRect(origin: CGPoint(x: 0.0, y: navigationBarHeight), size: recentPeersSize) + self.recentPeersNode.layout() + + var duration: Double = 0.0 + var curve: UInt = 0 + switch transition { + case .immediate: + break + case let .animated(animationDuration, animationCurve): + duration = animationDuration + switch animationCurve { + case .easeInOut: + break + case .spring: + curve = 7 + } + } + + + let listViewCurve: ListViewAnimationCurve + var speedFactor: CGFloat = 1.0 + if curve == 7 { + speedFactor = CGFloat(duration) / 0.5 + listViewCurve = .Spring(speed: CGFloat(speedFactor)) + } else { + listViewCurve = .Default + } + + self.listNode.frame = CGRect(origin: CGPoint(), size: layout.size) + self.listNode.deleteAndInsertItems(deleteIndices: [], insertIndicesAndItems: [], updateIndicesAndItems: [], options: [.Synchronous], scrollToItem: nil, updateSizeAndInsets: ListViewUpdateSizeAndInsets(size: layout.size, insets: UIEdgeInsets(top: navigationBarHeight, left: 0.0, bottom: layout.insets(options: [.input]).bottom, right: 0.0), duration: duration, curve: listViewCurve), stationaryItemRange: nil, completion: { _ in }) + } +} diff --git a/TelegramUI/ChatListSearchItem.swift b/TelegramUI/ChatListSearchItem.swift new file mode 100644 index 0000000000..55a1543374 --- /dev/null +++ b/TelegramUI/ChatListSearchItem.swift @@ -0,0 +1,101 @@ +import Foundation +import UIKit +import AsyncDisplayKit +import Postbox +import Display +import SwiftSignalKit + +private let searchBarFont = Font.regular(15.0) + +class ChatListSearchItem: ListViewItem { + let selectable: Bool = false + + private let placeholder: String + private let activate: () -> Void + + init(placeholder: String, activate: @escaping () -> Void) { + self.placeholder = placeholder + self.activate = activate + } + + func nodeConfiguredForWidth(async: @escaping (@escaping () -> Void) -> Void, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, completion: @escaping (ListViewItemNode, @escaping () -> Void) -> Void) { + async { + let node = ChatListSearchItemNode() + node.placeholder = self.placeholder + + let makeLayout = node.asyncLayout() + let (layout, apply) = makeLayout(width) + + node.contentSize = layout.contentSize + node.insets = layout.insets + + node.activate = self.activate + completion(node, { + apply() + }) + } + } + + func updateNode(async: @escaping (@escaping () -> Void) -> Void, node: ListViewItemNode, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, animation: ListViewItemUpdateAnimation, completion: @escaping (ListViewItemNodeLayout, @escaping () -> Void) -> Void) { + if let node = node as? ChatListSearchItemNode { + Queue.mainQueue().async { + let layout = node.asyncLayout() + async { + let (nodeLayout, apply) = layout(width) + Queue.mainQueue().async { + completion(nodeLayout, { + apply() + }) + } + } + } + } + } +} + +class ChatListSearchItemNode: ListViewItemNode { + let searchBarNode: SearchBarPlaceholderNode + var placeholder: String? + + fileprivate var activate: (() -> Void)? { + didSet { + self.searchBarNode.activate = self.activate + } + } + + required init() { + self.searchBarNode = SearchBarPlaceholderNode() + + super.init(layerBacked: false, dynamicBounce: false) + + self.addSubnode(self.searchBarNode) + } + + override func layoutForWidth(_ width: CGFloat, item: ListViewItem, previousItem: ListViewItem?, nextItem: ListViewItem?) { + let makeLayout = self.asyncLayout() + let (layout, apply) = makeLayout(width) + apply() + self.contentSize = layout.contentSize + self.insets = layout.insets + } + + func asyncLayout() -> (_ width: CGFloat) -> (ListViewItemNodeLayout, () -> Void) { + let searchBarNodeLayout = self.searchBarNode.asyncLayout() + let placeholder = self.placeholder + + return { width in + let searchBarApply = searchBarNodeLayout(NSAttributedString(string: placeholder ?? "Search", font: searchBarFont, textColor: UIColor(0x8e8e93)), CGSize(width: width - 16.0, height: CGFloat.greatestFiniteMagnitude)) + + let layout = ListViewItemNodeLayout(contentSize: CGSize(width: width, height: 44.0), insets: UIEdgeInsets()) + + return (layout, { [weak self] in + if let strongSelf = self { + strongSelf.searchBarNode.frame = CGRect(origin: CGPoint(x: 8.0, y: 8.0), size: CGSize(width: width - 16.0, height: 28.0)) + searchBarApply() + + strongSelf.searchBarNode.bounds = CGRect(origin: CGPoint(), size: CGSize(width: width - 16.0, height: 28.0)) + } + }) + } + } +} diff --git a/TelegramUI/ChatListSearchRecentPeersNode.swift b/TelegramUI/ChatListSearchRecentPeersNode.swift new file mode 100644 index 0000000000..8234c5865a --- /dev/null +++ b/TelegramUI/ChatListSearchRecentPeersNode.swift @@ -0,0 +1,57 @@ +import Foundation +import AsyncDisplayKit +import Display +import SwiftSignalKit +import Postbox +import TelegramCore + +final class ChatListSearchRecentPeersNode: ASDisplayNode { + private let sectionHeaderNode: ListSectionHeaderNode + private let listView: ListView + + private let disposable = MetaDisposable() + + init(account: Account, peerSelected: @escaping (PeerId) -> Void) { + self.sectionHeaderNode = ListSectionHeaderNode() + self.sectionHeaderNode.title = "PEOPLE" + + self.listView = ListView() + self.listView.transform = CATransform3DMakeRotation(-CGFloat(M_PI / 2.0), 0.0, 0.0, 1.0) + + super.init() + + self.addSubnode(self.sectionHeaderNode) + self.addSubnode(self.listView) + + self.disposable.set((recentPeers(account: account) |> take(1) |> deliverOnMainQueue).start(next: { [weak self] peers in + if let strongSelf = self { + var items: [ListViewItem] = [] + for peer in peers { + items.append(HorizontalPeerItem(account: account, peer: peer, action: peerSelected)) + } + strongSelf.listView.deleteAndInsertItems(deleteIndices: [], insertIndicesAndItems: (0 ..< items.count).map({ ListViewInsertItem(index: $0, previousIndex: nil, item: items[$0], directionHint: .Down) }), updateIndicesAndItems: [], options: []) + } + })) + } + + deinit { + disposable.dispose() + } + + override func calculateSizeThatFits(_ constrainedSize: CGSize) -> CGSize { + return CGSize(width: constrainedSize.width, height: 120.0) + } + + override func layout() { + super.layout() + + let bounds = self.bounds + + self.sectionHeaderNode.frame = CGRect(origin: CGPoint(), size: CGSize(width: self.bounds.size.width, height: 29.0)) + self.sectionHeaderNode.layout() + + self.listView.bounds = CGRect(x: 0.0, y: 0.0, width: 92.0, height: bounds.size.width) + self.listView.position = CGPoint(x: bounds.size.width / 2.0, y: 92.0 / 2.0 + 29.0) + self.listView.deleteAndInsertItems(deleteIndices: [], insertIndicesAndItems: [], updateIndicesAndItems: [], options: [.Synchronous], scrollToItem: nil, updateSizeAndInsets: ListViewUpdateSizeAndInsets(size: CGSize(width: 92.0, height: bounds.size.width), insets: UIEdgeInsets(), duration: 0.0, curve: .Default), stationaryItemRange: nil, completion: { _ in }) + } +} diff --git a/TelegramUI/ChatMediaActionSheetController.swift b/TelegramUI/ChatMediaActionSheetController.swift new file mode 100644 index 0000000000..3cf65d35ae --- /dev/null +++ b/TelegramUI/ChatMediaActionSheetController.swift @@ -0,0 +1,50 @@ +import Foundation +import Display +import AsyncDisplayKit +import UIKit +import SwiftSignalKit + +final class ChatMediaActionSheetController: ActionSheetController { + private let _ready = Promise() + override var ready: Promise { + return self._ready + } + private var didSetReady = false + + var location: () -> Void = { } + var contacts: () -> Void = { } + + override init() { + super.init() + + self._ready.set(.single(true)) + + self.setItemGroups([ + ActionSheetItemGroup(items: [ + ChatMediaActionSheetRollItem(), + ActionSheetButtonItem(title: "File", action: {}), + ActionSheetButtonItem(title: "Location", action: { [weak self] in + self?.dismissAnimated() + if let location = self?.location { + location() + } + }), + ActionSheetButtonItem(title: "Contact", action: { [weak self] in + self?.dismissAnimated() + if let contacts = self?.contacts { + contacts() + } + }) + ]), + ActionSheetItemGroup(items: [ + ActionSheetButtonItem(title: "Cancel", action: { [weak self] in + self?.dismissAnimated() + }), + ]) + ]) + } + + required init(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } +} diff --git a/TelegramUI/ChatMediaActionSheetRollItem.swift b/TelegramUI/ChatMediaActionSheetRollItem.swift new file mode 100644 index 0000000000..76c3817b47 --- /dev/null +++ b/TelegramUI/ChatMediaActionSheetRollItem.swift @@ -0,0 +1,104 @@ +import Foundation +import UIKit +import AsyncDisplayKit +import Display +import Photos +import SwiftSignalKit + +final class ChatMediaActionSheetRollItem: ActionSheetItem { + func node() -> ActionSheetItemNode { + return ChatMediaActionSheetRollItemNode() + } +} + +private final class ChatMediaActionSheetRollItemNode: ActionSheetItemNode, PHPhotoLibraryChangeObserver { + private let listView: ListView + private let label: UILabel + private let button: HighlightTrackingButton + + private var assetCollection: PHAssetCollection? + private var fetchResult: PHFetchResult? + + override init() { + self.listView = ListView() + self.listView.transform = CATransform3DMakeRotation(-CGFloat(M_PI / 2.0), 0.0, 0.0, 1.0) + + self.label = UILabel() + self.label.backgroundColor = nil + self.label.isOpaque = false + self.label.textColor = UIColor(0x1195f2) + self.label.text = "Photo or Video" + self.label.font = Font.regular(20.0) + self.label.sizeToFit() + + self.button = HighlightTrackingButton() + + super.init() + + self.button.highligthedChanged = { [weak self] highlighted in + if let strongSelf = self { + if highlighted { + strongSelf.backgroundNode.backgroundColor = ActionSheetItemNode.highlightedBackgroundColor + } else { + UIView.animate(withDuration: 0.3, animations: { + strongSelf.backgroundNode.backgroundColor = ActionSheetItemNode.defaultBackgroundColor + }) + } + } + } + self.view.addSubview(self.button) + + self.view.addSubview(self.label) + self.addSubnode(self.listView) + + PHPhotoLibrary.requestAuthorization({ _ in + + }) + + let allPhotosOptions = PHFetchOptions() + allPhotosOptions.sortDescriptors = [NSSortDescriptor(key: "creationDate", ascending: false)] + + self.fetchResult = PHAsset.fetchAssets(with: .image, options: allPhotosOptions) + + var items: [ListViewItem] = [] + if let fetchResult = self.fetchResult { + for i in 0 ..< fetchResult.count { + let asset = fetchResult.object(at: i) + items.append(ActionSheetRollImageItem(asset: asset)) + } + } + + if !items.isEmpty { + self.listView.deleteAndInsertItems(deleteIndices: [], insertIndicesAndItems: (0 ..< items.count).map({ ListViewInsertItem(index: $0, previousIndex: nil, item: items[$0], directionHint: .Down) }), updateIndicesAndItems: [], options: []) + } + + //PHPhotoLibrary.shared().register(self) + } + + override func calculateSizeThatFits(_ constrainedSize: CGSize) -> CGSize { + return CGSize(width: constrainedSize.width, height: 157.0) + } + + override func layout() { + super.layout() + + let bounds = self.bounds + + self.button.frame = CGRect(origin: CGPoint(), size: bounds.size) + + self.listView.bounds = CGRect(x: 0.0, y: 0.0, width: 84.0, height: bounds.size.width) + self.listView.position = CGPoint(x: bounds.size.width / 2.0, y: 84.0 / 2.0 + 8.0) + self.listView.updateSizeAndInsets(size: CGSize(width: 84.0, height: bounds.size.width), insets: UIEdgeInsets(top: 4.0, left: 0.0, bottom: 4.0, right: 0.0), duration: 0.0, options: UIViewAnimationOptions(rawValue: UInt(0))) + + let labelSize = self.label.bounds.size + self.label.frame = CGRect(origin: CGPoint(x: floorToScreenPixels((bounds.size.width - labelSize.width) / 2.0), y: 84.0 + 16.0 + floorToScreenPixels((bounds.height - 84.0 - 16.0 - labelSize.height) / 2.0)), size: labelSize) + } + + func photoLibraryDidChange(_ changeInstance: PHChange) { + Queue.concurrentDefaultQueue().async { + //let collectionChanges = changeInstance.changeDetailsForFetchResult(self.fetchResult) + //self.fetchResult = collectionChanges.fetchResultAfterChanges() + + } + } +} diff --git a/TelegramUI/ChatMessageActionItemNode.swift b/TelegramUI/ChatMessageActionItemNode.swift new file mode 100644 index 0000000000..e1dbbfcb39 --- /dev/null +++ b/TelegramUI/ChatMessageActionItemNode.swift @@ -0,0 +1,133 @@ +import Foundation +import AsyncDisplayKit +import Display +import SwiftSignalKit +import Postbox +import TelegramCore + +private func backgroundImage(color: UIColor) -> UIImage? { + return generateImage(CGSize(width: 20.0, height: 20.0), contextGenerator: { size, context -> Void in + context.clear(CGRect(origin: CGPoint(), size: size)) + context.setFillColor(UIColor(0x748391, 0.45).cgColor) + context.fillEllipse(in: CGRect(origin: CGPoint(), size: size)) + })?.stretchableImage(withLeftCapWidth: 8, topCapHeight: 8) +} + +private let titleFont = UIFont.systemFont(ofSize: 13.0) + +class ChatMessageActionItemNode: ChatMessageItemView { + let labelNode: TextNode + let backgroundNode: ASImageNode + + private let fetchDisposable = MetaDisposable() + + required init() { + self.labelNode = TextNode() + self.labelNode.isLayerBacked = true + self.labelNode.displaysAsynchronously = true + + self.backgroundNode = ASImageNode() + self.backgroundNode.isLayerBacked = true + self.backgroundNode.displayWithoutProcessing = true + self.backgroundNode.displaysAsynchronously = false + + super.init(layerBacked: false) + + self.backgroundNode.image = backgroundImage(color: UIColor.blue) + self.addSubnode(self.backgroundNode) + self.addSubnode(self.labelNode) + } + + required init?(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + deinit { + self.fetchDisposable.dispose() + } + + override func setupItem(_ item: ChatMessageItem) { + super.setupItem(item) + } + + override func asyncLayout() -> (_ item: ChatMessageItem, _ width: CGFloat, _ mergedTop: Bool, _ mergedBottom: Bool) -> (ListViewItemNodeLayout, (ListViewItemUpdateAnimation) -> Void) { + let labelLayout = TextNode.asyncLayout(self.labelNode) + + return { item, width, mergedTop, mergedBottom in + var attributedString: NSAttributedString? + + for media in item.message.media { + if let action = media as? TelegramMediaAction { + let authorName = item.message.author?.displayTitle ?? "" + switch action.action { + case .groupCreated: + attributedString = NSAttributedString(string: tr(.ChatServiceGroupCreated), font: titleFont, textColor: UIColor.white) + case let .addedMembers(peerIds): + if peerIds.first == item.message.author?.id { + attributedString = NSAttributedString(string: tr(.ChatServiceGroupAddedSelf(authorName)), font: titleFont, textColor: UIColor.white) + } else { + attributedString = NSAttributedString(string: tr(.ChatServiceGroupAddedMembers(authorName, peerDisplayTitles(peerIds, item.message.peers))), font: titleFont, textColor: UIColor.white) + } + case let .removedMembers(peerIds): + if peerIds.first == item.message.author?.id { + attributedString = NSAttributedString(string: tr(.ChatServiceGroupRemovedSelf(authorName)), font: titleFont, textColor: UIColor.white) + } else { + attributedString = NSAttributedString(string: tr(.ChatServiceGroupRemovedMembers(authorName, peerDisplayTitles(peerIds, item.message.peers))), font: titleFont, textColor: UIColor.white) + } + case let .photoUpdated(image): + if let _ = image { + attributedString = NSAttributedString(string: tr(.ChatServiceGroupUpdatedPhoto(authorName)), font: titleFont, textColor: UIColor.white) + } else { + attributedString = NSAttributedString(string: tr(.ChatServiceGroupRemovedPhoto(authorName)), font: titleFont, textColor: UIColor.white) + } + case let .titleUpdated(title): + attributedString = NSAttributedString(string: tr(.ChatServiceGroupUpdatedTitle(authorName, title)), font: titleFont, textColor: UIColor.white) + case .pinnedMessageUpdated: + var replyMessageText = "" + for attribute in item.message.attributes { + if let attribute = attribute as? ReplyMessageAttribute, let message = item.message.associatedMessages[attribute.messageId] { + replyMessageText = message.text + } + } + attributedString = NSAttributedString(string: tr(.ChatServiceGroupUpdatedPinnedMessage(authorName, replyMessageText)), font: titleFont, textColor: UIColor.white) + case .joinedByLink: + attributedString = NSAttributedString(string: tr(.ChatServiceGroupJoinedByLink(authorName)), font: titleFont, textColor: UIColor.white) + case .channelMigratedFromGroup, .groupMigratedToChannel: + attributedString = NSAttributedString(string: tr(.ChatServiceGroupMigratedToSupergroup), font: titleFont, textColor: UIColor.white) + default: + attributedString = nil + } + + break + } + } + + let (size, apply) = labelLayout(attributedString, nil, 1, .end, CGSize(width: width, height: CGFloat.greatestFiniteMagnitude), nil) + + let backgroundSize = CGSize(width: size.size.width + 8.0 + 8.0, height: 20.0) + + return (ListViewItemNodeLayout(contentSize: CGSize(width: width, height: 20.0), insets: UIEdgeInsets(top: 4.0, left: 0.0, bottom: 4.0, right: 0.0)), { [weak self] animation in + if let strongSelf = self { + let _ = apply() + + strongSelf.backgroundNode.frame = CGRect(origin: CGPoint(x: floorToScreenPixels((width - backgroundSize.width) / 2.0), y: 0.0), size: backgroundSize) + strongSelf.labelNode.frame = CGRect(origin: CGPoint(x: strongSelf.backgroundNode.frame.origin.x + 8.0, y: floorToScreenPixels((backgroundSize.height - size.size.height) / 2.0) - 1.0), size: size.size) + } + }) + } + } + + override func animateInsertion(_ currentTimestamp: Double, duration: Double) { + super.animateInsertion(currentTimestamp, duration: duration) + + self.backgroundNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + self.labelNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + } + + override func animateAdded(_ currentTimestamp: Double, duration: Double) { + super.animateAdded(currentTimestamp, duration: duration) + + self.backgroundNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + self.labelNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + } +} diff --git a/TelegramUI/ChatMessageAvatarAccessoryItem.swift b/TelegramUI/ChatMessageAvatarAccessoryItem.swift new file mode 100644 index 0000000000..3ae4cdd0e4 --- /dev/null +++ b/TelegramUI/ChatMessageAvatarAccessoryItem.swift @@ -0,0 +1,54 @@ +import Foundation +import Postbox +import Display +import TelegramCore + +final class ChatMessageAvatarAccessoryItem: ListViewAccessoryItem { + private let account: Account + private let peerId: PeerId + private let peer: Peer? + private let messageTimestamp: Int32 + + init(account: Account, peerId: PeerId, peer: Peer?, messageTimestamp: Int32) { + self.account = account + self.peerId = peerId + self.peer = peer + self.messageTimestamp = messageTimestamp + } + + func isEqualToItem(_ other: ListViewAccessoryItem) -> Bool { + if case let other as ChatMessageAvatarAccessoryItem = other { + return other.peerId == self.peerId && abs(other.messageTimestamp - self.messageTimestamp) < 5 * 60 + } + + return false + } + + func node() -> ListViewAccessoryItemNode { + let node = ChatMessageAvatarAccessoryItemNode() + node.frame = CGRect(origin: CGPoint(), size: CGSize(width: 38.0, height: 38.0)) + if let peer = self.peer { + node.setPeer(account: account, peer: peer) + } + return node + } +} + +final class ChatMessageAvatarAccessoryItemNode: ListViewAccessoryItemNode { + let avatarNode: ChatListAvatarNode + + override init() { + self.avatarNode = ChatListAvatarNode(font: Font.regular(14.0)) + self.avatarNode.isLayerBacked = true + self.avatarNode.frame = CGRect(origin: CGPoint(), size: CGSize(width: 38.0, height: 38.0)) + + super.init() + + self.isLayerBacked = true + self.addSubnode(self.avatarNode) + } + + func setPeer(account: Account, peer: Peer) { + self.avatarNode.setPeer(account: account, peer: peer) + } +} diff --git a/TelegramUI/ChatMessageBubbleContentCalclulateImageCorners.swift b/TelegramUI/ChatMessageBubbleContentCalclulateImageCorners.swift new file mode 100644 index 0000000000..b2c8106798 --- /dev/null +++ b/TelegramUI/ChatMessageBubbleContentCalclulateImageCorners.swift @@ -0,0 +1,52 @@ + +func chatMessageBubbleImageContentCorners(relativeContentPosition position: ChatMessageBubbleContentPosition, normalRadius: CGFloat, mergedRadius: CGFloat, mergedWithAnotherContentRadius: CGFloat) -> ImageCorners { + let topLeftCorner: ImageCorner + let topRightCorner: ImageCorner + + switch position.top { + case .Neighbour: + topLeftCorner = .Corner(mergedWithAnotherContentRadius) + topRightCorner = .Corner(mergedWithAnotherContentRadius) + case let .None(mergeStatus): + switch mergeStatus { + case .Left: + topLeftCorner = .Corner(mergedRadius) + topRightCorner = .Corner(normalRadius) + case .None: + topLeftCorner = .Corner(normalRadius) + topRightCorner = .Corner(normalRadius) + case .Right: + topLeftCorner = .Corner(normalRadius) + topRightCorner = .Corner(mergedRadius) + } + } + + let bottomLeftCorner: ImageCorner + let bottomRightCorner: ImageCorner + + switch position.bottom { + case .Neighbour: + bottomLeftCorner = .Corner(mergedWithAnotherContentRadius) + bottomRightCorner = .Corner(mergedWithAnotherContentRadius) + case let .None(mergeStatus): + switch mergeStatus { + case .Left: + bottomLeftCorner = .Corner(mergedRadius) + bottomRightCorner = .Corner(normalRadius) + case let .None(status): + switch status { + case .Incoming: + bottomLeftCorner = .Tail(normalRadius) + bottomRightCorner = .Corner(normalRadius) + case .Outgoing: + bottomLeftCorner = .Corner(normalRadius) + bottomRightCorner = .Tail(normalRadius) + } + case .Right: + bottomLeftCorner = .Corner(normalRadius) + bottomRightCorner = .Corner(mergedRadius) + } + } + + return ImageCorners(topLeft: topLeftCorner, topRight: topRightCorner, bottomLeft: bottomLeftCorner, bottomRight: bottomRightCorner) +} diff --git a/TelegramUI/ChatMessageBubbleContentNode.swift b/TelegramUI/ChatMessageBubbleContentNode.swift new file mode 100644 index 0000000000..79fbc3a983 --- /dev/null +++ b/TelegramUI/ChatMessageBubbleContentNode.swift @@ -0,0 +1,63 @@ +import Foundation +import AsyncDisplayKit +import Display +import Postbox + +struct ChatMessageBubbleContentProperties { + let hidesSimpleAuthorHeader: Bool + let headerSpacing: CGFloat +} + +enum ChatMessageBubbleNoneMergeStatus { + case Incoming + case Outgoing +} + +enum ChatMessageBubbleMergeStatus { + case None(ChatMessageBubbleNoneMergeStatus) + case Left + case Right +} + +enum ChatMessageBubbleRelativePosition { + case None(ChatMessageBubbleMergeStatus) + case Neighbour +} + +struct ChatMessageBubbleContentPosition { + let top: ChatMessageBubbleRelativePosition + let bottom: ChatMessageBubbleRelativePosition +} + +class ChatMessageBubbleContentNode: ASDisplayNode { + var properties: ChatMessageBubbleContentProperties { + return ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: false, headerSpacing: 0.0) + } + + var controllerInteraction: ChatControllerInteraction? + + required override init() { + //super.init(layerBacked: false) + super.init() + } + + func asyncLayoutContent() -> (_ item: ChatMessageItem, _ layoutConstants: ChatMessageItemLayoutConstants, _ position: ChatMessageBubbleContentPosition, _ constrainedSize: CGSize) -> (CGFloat, (CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> Void))) { + preconditionFailure() + } + + func animateInsertion(_ currentTimestamp: Double, duration: Double) { + } + + func animateAdded(_ currentTimestamp: Double, duration: Double) { + } + + func animateInsertionIntoBubble(_ duration: Double) { + } + + func transitionNode(media: Media) -> ASDisplayNode? { + return nil + } + + func updateHiddenMedia(_ media: [Media]?) { + } +} diff --git a/TelegramUI/ChatMessageBubbleItemNode.swift b/TelegramUI/ChatMessageBubbleItemNode.swift new file mode 100644 index 0000000000..4085b31932 --- /dev/null +++ b/TelegramUI/ChatMessageBubbleItemNode.swift @@ -0,0 +1,660 @@ +import Foundation +import AsyncDisplayKit +import Display +import Postbox +import TelegramCore + +enum ChatMessageBackgroundMergeType { + case None, Top, Bottom, Both + + init(top: Bool, bottom: Bool) { + if top && bottom { + self = .Both + } else if top { + self = .Top + } else if bottom { + self = .Bottom + } else { + self = .None + } + } +} + +private enum ChatMessageBackgroundType: Equatable { + case Incoming(ChatMessageBackgroundMergeType), Outgoing(ChatMessageBackgroundMergeType) +} + +private func ==(lhs: ChatMessageBackgroundType, rhs: ChatMessageBackgroundType) -> Bool { + switch lhs { + case let .Incoming(lhsMergeType): + switch rhs { + case let .Incoming(rhsMergeType): + return lhsMergeType == rhsMergeType + case .Outgoing: + return false + } + case let .Outgoing(lhsMergeType): + switch rhs { + case .Incoming: + return false + case let .Outgoing(rhsMergeType): + return lhsMergeType == rhsMergeType + } + } +} + +private let chatMessageBackgroundIncomingImage = UIImage(bundleImageName: "Chat/Message/Background/BubbleIncoming")?.precomposed() +private let chatMessageBackgroundOutgoingImage = UIImage(bundleImageName: "Chat/Message/Background/BubbleOutgoing")?.precomposed() +private let chatMessageBackgroundIncomingMergedTopImage = UIImage(bundleImageName: "Chat/Message/Background/BubbleIncomingMergedTop")?.precomposed() +private let chatMessageBackgroundIncomingMergedBottomImage = UIImage(bundleImageName: "Chat/Message/Background/BubbleIncomingMergedBottom")?.precomposed() +private let chatMessageBackgroundIncomingMergedBothImage = UIImage(bundleImageName: "Chat/Message/Background/BubbleIncomingMergedBoth")?.precomposed() +private let chatMessageBackgroundOutgoingMergedImage = UIImage(bundleImageName: "Chat/Message/Background/BubbleOutgoingMerged")?.precomposed() +private let chatMessageBackgroundOutgoingMergedTopImage = UIImage(bundleImageName: "Chat/Message/Background/BubbleOutgoingMerged")?.precomposed() +private let chatMessageBackgroundOutgoingMergedBottomImage = UIImage(bundleImageName: "Chat/Message/Background/BubbleOutgoingMerged")?.precomposed() +private let chatMessageBackgroundOutgoingMergedBothImage = UIImage(bundleImageName: "Chat/Message/Background/BubbleOutgoingMerged")?.precomposed() + +class ChatMessageBackground: ASImageNode { + private var type: ChatMessageBackgroundType? + + override init() { + super.init() + + self.isLayerBacked = true + self.displaysAsynchronously = false + self.displayWithoutProcessing = true + } + + fileprivate func setType(type: ChatMessageBackgroundType) { + if let currentType = self.type, currentType == type { + return + } + self.type = type + + let image: UIImage? + switch type { + case let .Incoming(mergeType): + switch mergeType { + case .None: + image = chatMessageBackgroundIncomingImage + case .Top: + image = chatMessageBackgroundIncomingMergedBottomImage + case .Bottom: + image = chatMessageBackgroundIncomingMergedTopImage + case .Both: + image = chatMessageBackgroundIncomingMergedBothImage + } + case let .Outgoing(mergeType): + switch mergeType { + case .None: + image = chatMessageBackgroundOutgoingImage + case .Top: + image = chatMessageBackgroundOutgoingMergedTopImage + case .Bottom: + image = chatMessageBackgroundOutgoingMergedBottomImage + case .Both: + image = chatMessageBackgroundOutgoingMergedBothImage + } + } + self.image = image + } +} + +private func contentNodeClassesForItem(_ item: ChatMessageItem) -> [AnyClass] { + var result: [AnyClass] = [] + for media in item.message.media { + if let _ = media as? TelegramMediaImage { + result.append(ChatMessageMediaBubbleContentNode.self) + } else if let file = media as? TelegramMediaFile { + if file.isVideo { + result.append(ChatMessageMediaBubbleContentNode.self) + } else { + result.append(ChatMessageFileBubbleContentNode.self) + } + } + } + + if !item.message.text.isEmpty { + result.append(ChatMessageTextBubbleContentNode.self) + } + + for media in item.message.media { + if let webpage = media as? TelegramMediaWebpage { + if case .Loaded = webpage.content { + result.append(ChatMessageWebpageBubbleContentNode.self) + } + break + } + } + + return result +} + +private let nameFont: UIFont = { + if #available(iOS 8.2, *) { + return UIFont.systemFont(ofSize: 14.0, weight: UIFontWeightMedium) + } else { + return CTFontCreateWithName("HelveticaNeue-Medium" as CFString, 14.0, nil) + } +}() + +private let inlineBotPrefixFont = Font.regular(14.0) +private let inlineBotNameFont = nameFont + +private let chatMessagePeerIdColors: [UIColor] = [ + UIColor(0xfc5c51), + UIColor(0xfa790f), + UIColor(0x0fb297), + UIColor(0x3ca5ec), + UIColor(0x3d72ed), + UIColor(0x895dd5) +] + +class ChatMessageBubbleItemNode: ChatMessageItemView { + private let backgroundNode: ChatMessageBackground + private var transitionClippingNode: ASDisplayNode? + + private var nameNode: TextNode? + private var forwardInfoNode: ChatMessageForwardInfoNode? + private var replyInfoNode: ChatMessageReplyInfoNode? + + private var contentNodes: [ChatMessageBubbleContentNode] = [] + + private var messageId: MessageId? + + private var backgroundFrameTransition: (CGRect, CGRect)? + + required init() { + self.backgroundNode = ChatMessageBackground() + + super.init(layerBacked: false) + + self.addSubnode(self.backgroundNode) + } + + required init?(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + override func animateInsertion(_ currentTimestamp: Double, duration: Double) { + super.animateInsertion(currentTimestamp, duration: duration) + + self.backgroundNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + + for contentNode in self.contentNodes { + contentNode.animateInsertion(currentTimestamp, duration: duration) + } + } + + override func animateAdded(_ currentTimestamp: Double, duration: Double) { + super.animateAdded(currentTimestamp, duration: duration) + + self.backgroundNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + + self.nameNode?.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + self.forwardInfoNode?.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + self.replyInfoNode?.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + + for contentNode in self.contentNodes { + contentNode.animateAdded(currentTimestamp, duration: duration) + } + } + + override func didLoad() { + super.didLoad() + + self.view.addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(self.tapGesture(_:)))) + } + + override func asyncLayout() -> (_ item: ChatMessageItem, _ width: CGFloat, _ mergedTop: Bool, _ mergedBottom: Bool) -> (ListViewItemNodeLayout, (ListViewItemUpdateAnimation) -> Void) { + var currentContentClassesPropertiesAndLayouts: [(AnyClass, ChatMessageBubbleContentProperties, (_ item: ChatMessageItem, _ layoutConstants: ChatMessageItemLayoutConstants, _ position: ChatMessageBubbleContentPosition, _ constrainedSize: CGSize) -> (CGFloat, (CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> Void))))] = [] + for contentNode in self.contentNodes { + currentContentClassesPropertiesAndLayouts.append((type(of: contentNode) as AnyClass, contentNode.properties, contentNode.asyncLayoutContent())) + } + + let authorNameLayout = TextNode.asyncLayout(self.nameNode) + let forwardInfoLayout = ChatMessageForwardInfoNode.asyncLayout(self.forwardInfoNode) + let replyInfoLayout = ChatMessageReplyInfoNode.asyncLayout(self.replyInfoNode) + + let layoutConstants = self.layoutConstants + + return { item, width, mergedTop, mergedBottom in + let message = item.message + + let incoming = item.account.peerId != message.author?.id + let displayAuthorInfo = !mergedTop && incoming && item.peerId.isGroup && item.message.author != nil + + let avatarInset: CGFloat = (item.peerId.isGroup && item.message.author != nil) ? layoutConstants.avatarDiameter : 0.0 + + let tmpWidth = width * layoutConstants.bubble.maximumWidthFillFactor + let maximumContentWidth = floor(tmpWidth - layoutConstants.bubble.edgeInset - layoutConstants.bubble.edgeInset - layoutConstants.bubble.contentInsets.left - layoutConstants.bubble.contentInsets.right - avatarInset) + + var contentPropertiesAndPrepareLayouts: [(ChatMessageBubbleContentProperties, (_ item: ChatMessageItem, _ layoutConstants: ChatMessageItemLayoutConstants, _ position: ChatMessageBubbleContentPosition, _ constrainedSize: CGSize) -> (CGFloat, (CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> Void))))] = [] + var addedContentNodes: [ChatMessageBubbleContentNode]? + + let contentNodeClasses = contentNodeClassesForItem(item) + for contentNodeClass in contentNodeClasses { + var found = false + for (currentClass, currentProperties, currentLayout) in currentContentClassesPropertiesAndLayouts { + if currentClass == contentNodeClass { + contentPropertiesAndPrepareLayouts.append((currentProperties, currentLayout)) + found = true + break + } + } + if !found { + let contentNode = (contentNodeClass as! ChatMessageBubbleContentNode.Type).init() + contentPropertiesAndPrepareLayouts.append((contentNode.properties, contentNode.asyncLayoutContent())) + if addedContentNodes == nil { + addedContentNodes = [contentNode] + } else { + addedContentNodes!.append(contentNode) + } + } + } + + var authorNameString: String? + var inlineBotNameString: String? + var replyMessage: Message? + + for attribute in message.attributes { + if let attribute = attribute as? InlineBotMessageAttribute, let bot = message.peers[attribute.peerId] as? TelegramUser { + inlineBotNameString = bot.username + } else if let attribute = attribute as? ReplyMessageAttribute { + replyMessage = message.associatedMessages[attribute.messageId] + } + } + + var displayHeader = true + if inlineBotNameString == nil && message.forwardInfo == nil && replyMessage == nil { + if let first = contentPropertiesAndPrepareLayouts.first, first.0.hidesSimpleAuthorHeader { + displayHeader = false + } + } + + var contentPropertiesAndLayouts: [(ChatMessageBubbleContentProperties, (CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> Void)))] = [] + + let topNodeMergeStatus: ChatMessageBubbleMergeStatus = mergedTop ? (incoming ? .Left : .Right) : .None(incoming ? .Incoming : .Outgoing) + let bottomNodeMergeStatus: ChatMessageBubbleMergeStatus = mergedBottom ? (incoming ? .Left : .Right) : .None(incoming ? .Incoming : .Outgoing) + + let firstNodeTopPosition: ChatMessageBubbleRelativePosition + if displayHeader { + firstNodeTopPosition = .Neighbour + } else { + firstNodeTopPosition = .None(topNodeMergeStatus) + } + let lastNodeTopPosition: ChatMessageBubbleRelativePosition = .None(bottomNodeMergeStatus) + + var maximumNodeWidth = maximumContentWidth + let contentNodeCount = contentPropertiesAndPrepareLayouts.count + var index = 0 + for (properties, prepareLayout) in contentPropertiesAndPrepareLayouts { + let topPosition: ChatMessageBubbleRelativePosition + let bottomPosition: ChatMessageBubbleRelativePosition + + if index == 0 { + topPosition = firstNodeTopPosition + } else { + topPosition = .Neighbour + } + + if index == contentNodeCount - 1 { + bottomPosition = lastNodeTopPosition + } else { + bottomPosition = .Neighbour + } + + let (maxNodeWidth, nodeLayout) = prepareLayout(item, layoutConstants, ChatMessageBubbleContentPosition(top: topPosition, bottom: bottomPosition), CGSize(width: maximumContentWidth, height: CGFloat.greatestFiniteMagnitude)) + maximumNodeWidth = min(maximumNodeWidth, maxNodeWidth) + + contentPropertiesAndLayouts.append((properties, nodeLayout)) + index += 1 + } + + var headerSize = CGSize() + + var nameNodeOriginY: CGFloat = 0.0 + var nameNodeSizeApply: (CGSize, () -> TextNode?) = (CGSize(), { nil }) + var authorNameColor: UIColor? + + var replyInfoOriginY: CGFloat = 0.0 + var replyInfoSizeApply: (CGSize, () -> ChatMessageReplyInfoNode?) = (CGSize(), { nil }) + + var forwardInfoOriginY: CGFloat = 0.0 + var forwardInfoSizeApply: (CGSize, () -> ChatMessageForwardInfoNode?) = (CGSize(), { nil }) + + if displayHeader { + if let author = message.author, displayAuthorInfo { + authorNameString = author.displayTitle + authorNameColor = chatMessagePeerIdColors[Int(author.id.id % 6)] + } + + if authorNameString != nil || inlineBotNameString != nil { + if headerSize.height < CGFloat(FLT_EPSILON) { + headerSize.height += 4.0 + } + + let inlineBotNameColor = incoming ? UIColor(0x1195f2) : UIColor(0x00a700) + + let attributedString: NSAttributedString + if let authorNameString = authorNameString, let authorNameColor = authorNameColor, let inlineBotNameString = inlineBotNameString { + let botPrefixString: NSString = " via " + let mutableString = NSMutableAttributedString(string: "\(authorNameString)\(botPrefixString)@\(inlineBotNameString)", attributes: [NSFontAttributeName: inlineBotNameFont, NSForegroundColorAttributeName: inlineBotNameColor]) + mutableString.addAttributes([NSFontAttributeName: nameFont, NSForegroundColorAttributeName: authorNameColor], range: NSMakeRange(0, (authorNameString as NSString).length)) + mutableString.addAttributes([NSFontAttributeName: inlineBotPrefixFont, NSForegroundColorAttributeName: inlineBotNameColor], range: NSMakeRange((authorNameString as NSString).length, botPrefixString.length)) + attributedString = mutableString + } else if let authorNameString = authorNameString, let authorNameColor = authorNameColor { + attributedString = NSAttributedString(string: authorNameString, font: nameFont, textColor: authorNameColor) + } else if let inlineBotNameString = inlineBotNameString { + attributedString = NSAttributedString(string: "via @\(inlineBotNameString)", font: inlineBotNameFont, textColor: inlineBotNameColor) + } else { + attributedString = NSAttributedString(string: "", font: nameFont, textColor: UIColor.black) + } + + let sizeAndApply = authorNameLayout(attributedString, nil, 1, .end, CGSize(width: maximumNodeWidth, height: CGFloat.greatestFiniteMagnitude), nil) + nameNodeSizeApply = (sizeAndApply.0.size, { + return sizeAndApply.1() + }) + nameNodeOriginY = headerSize.height + headerSize.width = max(headerSize.width, nameNodeSizeApply.0.width + layoutConstants.text.bubbleInsets.left + layoutConstants.text.bubbleInsets.right) + headerSize.height += nameNodeSizeApply.0.height + } + + if let forwardInfo = message.forwardInfo { + if headerSize.height < CGFloat(FLT_EPSILON) { + headerSize.height += 4.0 + } + let sizeAndApply = forwardInfoLayout(incoming, forwardInfo.source == nil ? forwardInfo.author : forwardInfo.source!, forwardInfo.source == nil ? nil : forwardInfo.author, CGSize(width: maximumNodeWidth, height: CGFloat.greatestFiniteMagnitude)) + forwardInfoSizeApply = (sizeAndApply.0, { sizeAndApply.1() }) + + forwardInfoOriginY = headerSize.height + headerSize.width = max(headerSize.width, forwardInfoSizeApply.0.width + layoutConstants.text.bubbleInsets.left + layoutConstants.text.bubbleInsets.right) + headerSize.height += forwardInfoSizeApply.0.height + } + + if let replyMessage = replyMessage { + if headerSize.height < CGFloat(FLT_EPSILON) { + headerSize.height += 6.0 + } else { + headerSize.height += 2.0 + } + let sizeAndApply = replyInfoLayout(incoming, replyMessage, CGSize(width: maximumNodeWidth, height: CGFloat.greatestFiniteMagnitude)) + replyInfoSizeApply = (sizeAndApply.0, { sizeAndApply.1() }) + + replyInfoOriginY = headerSize.height + headerSize.width = max(headerSize.width, replyInfoSizeApply.0.width + layoutConstants.text.bubbleInsets.left + layoutConstants.text.bubbleInsets.right) + headerSize.height += replyInfoSizeApply.0.height + 2.0 + } + + if headerSize.height > CGFloat(FLT_EPSILON) { + headerSize.height -= 3.0 + } + } + + var removedContentNodeIndices: [Int]? + findRemoved: for i in 0 ..< currentContentClassesPropertiesAndLayouts.count { + let currentClass: AnyClass = currentContentClassesPropertiesAndLayouts[i].0 + for contentNodeClass in contentNodeClasses { + if currentClass == contentNodeClass { + continue findRemoved + } + } + if removedContentNodeIndices == nil { + removedContentNodeIndices = [i] + } else { + removedContentNodeIndices!.append(i) + } + } + + var contentNodePropertiesAndFinalize: [(ChatMessageBubbleContentProperties, (CGFloat) -> (CGSize, () -> Void))] = [] + + var maxContentWidth: CGFloat = headerSize.width + for (contentNodeProperties, contentNodeLayout) in contentPropertiesAndLayouts { + let (contentNodeWidth, contentNodeFinalize) = contentNodeLayout(CGSize(width: maximumNodeWidth, height: CGFloat.greatestFiniteMagnitude)) + maxContentWidth = max(maxContentWidth, contentNodeWidth) + + contentNodePropertiesAndFinalize.append((contentNodeProperties, contentNodeFinalize)) + } + + var contentSize = CGSize(width: maxContentWidth, height: 0.0) + index = 0 + var contentNodeSizesPropertiesAndApply: [(CGSize, ChatMessageBubbleContentProperties, () -> Void)] = [] + for (properties, finalize) in contentNodePropertiesAndFinalize { + let (size, apply) = finalize(maxContentWidth) + contentNodeSizesPropertiesAndApply.append((size, properties, apply)) + + contentSize.height += size.height + + if index == 0 && headerSize.height > CGFloat(FLT_EPSILON) { + contentSize.height += properties.headerSpacing + } + + index += 1 + } + + let layoutBubbleSize = CGSize(width: max(contentSize.width, headerSize.width) + layoutConstants.bubble.contentInsets.left + layoutConstants.bubble.contentInsets.right, height: max(layoutConstants.bubble.minimumSize.height, headerSize.height + contentSize.height + layoutConstants.bubble.contentInsets.top + layoutConstants.bubble.contentInsets.bottom)) + + let backgroundFrame = CGRect(origin: CGPoint(x: incoming ? (layoutConstants.bubble.edgeInset + avatarInset) : (width - layoutBubbleSize.width - layoutConstants.bubble.edgeInset), y: 0.0), size: layoutBubbleSize) + + let contentOrigin = CGPoint(x: backgroundFrame.origin.x + (incoming ? layoutConstants.bubble.contentInsets.left : layoutConstants.bubble.contentInsets.right), y: backgroundFrame.origin.y + layoutConstants.bubble.contentInsets.top + headerSize.height) + + let layoutSize = CGSize(width: width, height: layoutBubbleSize.height) + let layoutInsets = UIEdgeInsets(top: mergedTop ? layoutConstants.bubble.mergedSpacing : layoutConstants.bubble.defaultSpacing, left: 0.0, bottom: mergedBottom ? layoutConstants.bubble.mergedSpacing : layoutConstants.bubble.defaultSpacing, right: 0.0) + + let layout = ListViewItemNodeLayout(contentSize: layoutSize, insets: layoutInsets) + + return (layout, { [weak self] animation in + if let strongSelf = self { + strongSelf.messageId = message.id + + if let nameNode = nameNodeSizeApply.1() { + strongSelf.nameNode = nameNode + if nameNode.supernode == nil { + if !nameNode.isNodeLoaded { + nameNode.isLayerBacked = true + } + strongSelf.addSubnode(nameNode) + } + nameNode.frame = CGRect(origin: CGPoint(x: contentOrigin.x + layoutConstants.text.bubbleInsets.left, y: layoutConstants.bubble.contentInsets.top + nameNodeOriginY), size: nameNodeSizeApply.0) + } else { + strongSelf.nameNode?.removeFromSupernode() + strongSelf.nameNode = nil + } + + if let forwardInfoNode = forwardInfoSizeApply.1() { + strongSelf.forwardInfoNode = forwardInfoNode + if forwardInfoNode.supernode == nil { + strongSelf.addSubnode(forwardInfoNode) + } + forwardInfoNode.frame = CGRect(origin: CGPoint(x: contentOrigin.x + layoutConstants.text.bubbleInsets.left, y: layoutConstants.bubble.contentInsets.top + forwardInfoOriginY), size: forwardInfoSizeApply.0) + } else { + strongSelf.forwardInfoNode?.removeFromSupernode() + strongSelf.forwardInfoNode = nil + } + + if let replyInfoNode = replyInfoSizeApply.1() { + strongSelf.replyInfoNode = replyInfoNode + if replyInfoNode.supernode == nil { + strongSelf.addSubnode(replyInfoNode) + } + replyInfoNode.frame = CGRect(origin: CGPoint(x: contentOrigin.x + layoutConstants.text.bubbleInsets.left, y: layoutConstants.bubble.contentInsets.top + replyInfoOriginY), size: replyInfoSizeApply.0) + } else { + strongSelf.replyInfoNode?.removeFromSupernode() + strongSelf.replyInfoNode = nil + } + + if removedContentNodeIndices?.count ?? 0 != 0 || addedContentNodes?.count ?? 0 != 0 { + var updatedContentNodes = strongSelf.contentNodes + + if let removedContentNodeIndices = removedContentNodeIndices { + for index in removedContentNodeIndices.reversed() { + updatedContentNodes[index].removeFromSupernode() + let _ = updatedContentNodes.remove(at: index) + } + } + + if let addedContentNodes = addedContentNodes { + for contentNode in addedContentNodes { + updatedContentNodes.append(contentNode) + strongSelf.addSubnode(contentNode) + contentNode.controllerInteraction = strongSelf.controllerInteraction + } + } + + strongSelf.contentNodes = updatedContentNodes + } + + var contentNodeOrigin = contentOrigin + var contentNodeIndex = 0 + for (size, properties, apply) in contentNodeSizesPropertiesAndApply { + apply() + if contentNodeIndex == 0 && headerSize.height > CGFloat(FLT_EPSILON) { + contentNodeOrigin.y += properties.headerSpacing + } + let contentNode = strongSelf.contentNodes[contentNodeIndex] + let contentNodeFrame = CGRect(origin: contentNodeOrigin, size: size) + let previousContentNodeFrame = contentNode.frame + contentNode.frame = contentNodeFrame + + if case let .System(duration) = animation { + var animateFrame = false + var animateAlpha = false + if let addedContentNodes = addedContentNodes { + if !addedContentNodes.contains(where: { $0 === contentNode }) { + animateFrame = true + } else { + animateAlpha = true + } + } else { + animateFrame = true + } + + if animateFrame { + contentNode.layer.animateFrame(from: previousContentNodeFrame, to: contentNodeFrame, duration: duration, timingFunction: kCAMediaTimingFunctionSpring) + } else if animateAlpha { + contentNode.animateInsertionIntoBubble(duration) + var previousAlignedContentNodeFrame = contentNodeFrame + previousAlignedContentNodeFrame.origin.x += backgroundFrame.size.width - strongSelf.backgroundNode.frame.size.width + contentNode.layer.animateFrame(from: previousAlignedContentNodeFrame, to: contentNodeFrame, duration: duration, timingFunction: kCAMediaTimingFunctionSpring) + } + } + contentNodeIndex += 1 + contentNodeOrigin.y += size.height + } + + let mergeType = ChatMessageBackgroundMergeType(top: mergedBottom, bottom: mergedTop) + if !incoming { + strongSelf.backgroundNode.setType(type: .Outgoing(mergeType)) + } else { + strongSelf.backgroundNode.setType(type: .Incoming(mergeType)) + } + + if case .System = animation { + strongSelf.backgroundFrameTransition = (strongSelf.backgroundNode.frame, backgroundFrame) + strongSelf.enableTransitionClippingNode() + } else { + if let _ = strongSelf.backgroundFrameTransition { + strongSelf.animateFrameTransition(1.0) + strongSelf.backgroundFrameTransition = nil + } + strongSelf.backgroundNode.frame = backgroundFrame + strongSelf.disableTransitionClippingNode() + } + } + }) + } + } + + private func addContentNode(node: ChatMessageBubbleContentNode) { + if let transitionClippingNode = self.transitionClippingNode { + transitionClippingNode.addSubnode(node) + } else { + self.addSubnode(node) + } + } + + private func enableTransitionClippingNode() { + if self.transitionClippingNode == nil { + let node = ASDisplayNode() + node.clipsToBounds = true + var backgroundFrame = self.backgroundNode.frame + backgroundFrame = backgroundFrame.insetBy(dx: 0.0, dy: 1.0) + node.frame = backgroundFrame + node.bounds = CGRect(origin: CGPoint(x: backgroundFrame.origin.x, y: backgroundFrame.origin.y), size: backgroundFrame.size) + for contentNode in self.contentNodes { + node.addSubnode(contentNode) + } + self.addSubnode(node) + self.transitionClippingNode = node + } + } + + private func disableTransitionClippingNode() { + if let transitionClippingNode = self.transitionClippingNode { + for contentNode in self.contentNodes { + self.addSubnode(contentNode) + } + transitionClippingNode.removeFromSupernode() + self.transitionClippingNode = nil + } + } + + override func animateFrameTransition(_ progress: CGFloat) { + super.animateFrameTransition(progress) + + if let backgroundFrameTransition = self.backgroundFrameTransition { + let backgroundFrame = CGRect.interpolator()(backgroundFrameTransition.0, backgroundFrameTransition.1, progress) as! CGRect + self.backgroundNode.frame = backgroundFrame + + if let transitionClippingNode = self.transitionClippingNode { + var fixedBackgroundFrame = backgroundFrame + fixedBackgroundFrame = fixedBackgroundFrame.insetBy(dx: 0.0, dy: 1.0) + + transitionClippingNode.frame = fixedBackgroundFrame + transitionClippingNode.bounds = CGRect(origin: CGPoint(x: fixedBackgroundFrame.origin.x, y: fixedBackgroundFrame.origin.y), size: fixedBackgroundFrame.size) + + if progress >= 1.0 - CGFloat(FLT_EPSILON) { + self.disableTransitionClippingNode() + } + } + } + } + + @objc func tapGesture(_ recognizer: UITapGestureRecognizer) { + switch recognizer.state { + case .ended: + let location = recognizer.location(in: self.view) + if let replyInfoNode = self.replyInfoNode, replyInfoNode.frame.contains(location) { + if let item = self.item { + for attribute in item.message.attributes { + if let attribute = attribute as? ReplyMessageAttribute { + self.controllerInteraction?.testNavigateToMessage(item.message.id, attribute.messageId) + break + } + } + } + //self.controllerInteraction?.testNavigateToMessage(messageId) + } + default: + break + } + } + + override func transitionNode(id: MessageId, media: Media) -> ASDisplayNode? { + if let item = self.item, item.message.id == id { + for contentNode in self.contentNodes { + if let result = contentNode.transitionNode(media: media) { + return result + } + } + } + return nil + } + + override func updateHiddenMedia() { + if let item = self.item, let controllerInteraction = self.controllerInteraction { + for contentNode in self.contentNodes { + contentNode.updateHiddenMedia(controllerInteraction.hiddenMedia[item.message.id]) + } + } + } +} diff --git a/TelegramUI/ChatMessageDateAndStatusNode.swift b/TelegramUI/ChatMessageDateAndStatusNode.swift new file mode 100644 index 0000000000..fb01507170 --- /dev/null +++ b/TelegramUI/ChatMessageDateAndStatusNode.swift @@ -0,0 +1,284 @@ +import Foundation +import AsyncDisplayKit +import Postbox +import Display +import SwiftSignalKit + +private let dateFont = UIFont.italicSystemFont(ofSize: 11.0) + +private func generateCheckImage(partial: Bool) -> UIImage? { + return generateImage(CGSize(width: 11.0, height: 9.0), contextGenerator: { size, context in + context.translateBy(x: size.width / 2.0, y: size.height / 2.0) + context.scaleBy(x: 1.0, y: -1.0) + context.translateBy(x: -size.width / 2.0, y: -size.height / 2.0) + + context.clear(CGRect(origin: CGPoint(), size: size)) + context.scaleBy(x: 0.5, y: 0.5) + context.setStrokeColor(UIColor(0x19C700).cgColor) + context.setLineWidth(2.5) + if partial { + let _ = try? drawSvgPath(context, path: "M1,14.5 L2.5,16 L16.4985125,1 ") + } else { + let _ = try? drawSvgPath(context, path: "M1,10 L7,16 L20.9985125,1 ") + } + context.strokePath() + }) +} + +private func generateClockFrameImage() -> UIImage? { + return generateImage(CGSize(width: 11.0, height: 11.0), contextGenerator: { size, context in + context.clear(CGRect(origin: CGPoint(), size: size)) + context.setStrokeColor(UIColor(0x42b649).cgColor) + context.setFillColor(UIColor(0x42b649).cgColor) + let strokeWidth: CGFloat = 1.0 + context.setLineWidth(strokeWidth) + context.strokeEllipse(in: CGRect(x: strokeWidth / 2.0, y: strokeWidth / 2.0, width: size.width - strokeWidth, height: size.height - strokeWidth)) + context.fill(CGRect(x: (11.0 - strokeWidth) / 2.0, y: strokeWidth * 3.0, width: strokeWidth, height: 11.0 / 2.0 - strokeWidth * 3.0)) + }) +} + +private func generateClockMinImage() -> UIImage? { + return generateImage(CGSize(width: 11.0, height: 11.0), contextGenerator: { size, context in + context.clear(CGRect(origin: CGPoint(), size: size)) + context.setFillColor(UIColor(0x42b649).cgColor) + let strokeWidth: CGFloat = 1.0 + context.fill(CGRect(x: (11.0 - strokeWidth) / 2.0, y: (11.0 - strokeWidth) / 2.0, width: 11.0 / 2.0 - strokeWidth, height: strokeWidth)) + }) +} + +private func maybeAddRotationAnimation(_ layer: CALayer, duration: Double) { + if let _ = layer.animation(forKey: "clockFrameAnimation") { + return + } + + let basicAnimation = CABasicAnimation(keyPath: "transform.rotation.z") + basicAnimation.timingFunction = CAMediaTimingFunction(name: kCAMediaTimingFunctionEaseInEaseOut) + basicAnimation.duration = duration + basicAnimation.fromValue = NSNumber(value: Float(0.0)) + basicAnimation.toValue = NSNumber(value: Float(M_PI * 2.0)) + basicAnimation.repeatCount = Float.infinity + basicAnimation.timingFunction = CAMediaTimingFunction(name: kCAMediaTimingFunctionLinear) + layer.add(basicAnimation, forKey: "clockFrameAnimation") +} + +private let checkFullImage = generateCheckImage(partial: false) +private let checkPartialImage = generateCheckImage(partial: true) + +private let incomingDateColor = UIColor(0x525252, 0.6) +private let outgoingDateColor = UIColor(0x008c09, 0.8) + +private let clockFrameImage = generateClockFrameImage() +private let clockMinImage = generateClockMinImage() + +enum ChatMessageDateAndStatusOutgoingType { + case Sent(read: Bool) + case Sending + case Failed +} + +enum ChatMessageDateAndStatusType { + case BubbleIncoming + case BubbleOutgoing(ChatMessageDateAndStatusOutgoingType) +} + +class ChatMessageDateAndStatusNode: ASTransformLayerNode { + private var checkSentNode: ASImageNode? + private var checkReadNode: ASImageNode? + private var clockFrameNode: ASImageNode? + private var clockMinNode: ASImageNode? + private let dateNode: TextNode + + override init() { + self.dateNode = TextNode() + self.dateNode.isLayerBacked = true + self.dateNode.displaysAsynchronously = true + + super.init() + + self.addSubnode(self.dateNode) + } + + func asyncLayout() -> (_ dateText: String, _ type: ChatMessageDateAndStatusType, _ constrainedSize: CGSize) -> (CGSize, () -> Void) { + let dateLayout = TextNode.asyncLayout(self.dateNode) + + var checkReadNode = self.checkReadNode + var checkSentNode = self.checkSentNode + var clockFrameNode = self.clockFrameNode + var clockMinNode = self.clockMinNode + + return { dateText, type, constrainedSize in + let dateColor: UIColor + var outgoingStatus: ChatMessageDateAndStatusOutgoingType? + switch type { + case .BubbleIncoming: + dateColor = incomingDateColor + case let .BubbleOutgoing(status): + dateColor = outgoingDateColor + outgoingStatus = status + } + + let (date, dateApply) = dateLayout(NSAttributedString(string: dateText, font: dateFont, textColor: dateColor), nil, 1, .end, constrainedSize, nil) + + let leftInset: CGFloat = 10.0 + + let statusWidth: CGFloat + + var checkSentFrame: CGRect? + var checkReadFrame: CGRect? + + var clockPosition = CGPoint() + + let loadedCheckFullImage = checkFullImage + let loadedCheckPartialImage = checkPartialImage + + if let outgoingStatus = outgoingStatus { + switch outgoingStatus { + case .Sending: + statusWidth = 13.0 + + if checkReadNode == nil { + checkReadNode = ASImageNode() + checkReadNode?.isLayerBacked = true + checkReadNode?.displaysAsynchronously = false + checkReadNode?.displayWithoutProcessing = true + } + + if checkSentNode == nil { + checkSentNode = ASImageNode() + checkSentNode?.isLayerBacked = true + checkSentNode?.displaysAsynchronously = false + checkSentNode?.displayWithoutProcessing = true + } + + if clockFrameNode == nil { + clockFrameNode = ASImageNode() + clockFrameNode?.isLayerBacked = true + clockFrameNode?.displaysAsynchronously = false + clockFrameNode?.displayWithoutProcessing = true + clockFrameNode?.image = clockFrameImage + clockFrameNode?.frame = CGRect(origin: CGPoint(), size: clockFrameImage?.size ?? CGSize()) + } + + if clockMinNode == nil { + clockMinNode = ASImageNode() + clockMinNode?.isLayerBacked = true + clockMinNode?.displaysAsynchronously = false + clockMinNode?.displayWithoutProcessing = true + clockMinNode?.image = clockMinImage + clockMinNode?.frame = CGRect(origin: CGPoint(), size: clockMinImage?.size ?? CGSize()) + } + clockPosition = CGPoint(x: leftInset + date.size.width + 8.5, y: 7.5) + case let .Sent(read): + statusWidth = 13.0 + + if checkReadNode == nil { + checkReadNode = ASImageNode() + checkReadNode?.isLayerBacked = true + checkReadNode?.displaysAsynchronously = false + checkReadNode?.displayWithoutProcessing = true + } + + if checkSentNode == nil { + checkSentNode = ASImageNode() + checkSentNode?.isLayerBacked = true + checkSentNode?.displaysAsynchronously = false + checkSentNode?.displayWithoutProcessing = true + } + + clockFrameNode = nil + clockMinNode = nil + + let checkSize = checkFullImage!.size + + checkSentFrame = CGRect(origin: CGPoint(x: leftInset + date.size.width + 5.0 + statusWidth - checkSize.width, y: 3.0), size: checkSize) + if read { + checkReadFrame = CGRect(origin: CGPoint(x: checkSentFrame!.origin.x - 6.0, y: checkSentFrame!.origin.y), size: checkSize) + } + case .Failed: + statusWidth = 0.0 + + checkReadNode = nil + checkSentNode = nil + clockFrameNode = nil + clockMinNode = nil + } + } else { + statusWidth = 0.0 + + checkReadNode = nil + checkSentNode = nil + clockFrameNode = nil + clockMinNode = nil + } + + return (CGSize(width: leftInset + date.size.width + statusWidth, height: date.size.height), { [weak self] in + if let strongSelf = self { + let _ = dateApply() + + strongSelf.dateNode.frame = CGRect(origin: CGPoint(x: leftInset, y: 0.0), size: date.size) + + if let clockFrameNode = clockFrameNode { + if strongSelf.clockFrameNode == nil { + strongSelf.clockFrameNode = clockFrameNode + strongSelf.addSubnode(clockFrameNode) + } + clockFrameNode.position = clockPosition + if let clockFrameNode = strongSelf.clockFrameNode { + maybeAddRotationAnimation(clockFrameNode.layer, duration: 6.0) + } + } else if let clockFrameNode = strongSelf.clockFrameNode { + clockFrameNode.removeFromSupernode() + strongSelf.clockFrameNode = nil + } + + if let clockMinNode = clockMinNode { + if strongSelf.clockMinNode == nil { + strongSelf.clockMinNode = clockMinNode + strongSelf.addSubnode(clockMinNode) + } + clockMinNode.position = clockPosition + if let clockMinNode = strongSelf.clockMinNode { + maybeAddRotationAnimation(clockMinNode.layer, duration: 1.0) + } + } else if let clockMinNode = strongSelf.clockMinNode { + clockMinNode.removeFromSupernode() + strongSelf.clockMinNode = nil + } + + if let checkSentNode = checkSentNode, let checkReadNode = checkReadNode { + if strongSelf.checkSentNode == nil { + strongSelf.checkSentNode = checkSentNode + strongSelf.addSubnode(checkSentNode) + } + checkSentNode.image = loadedCheckPartialImage + + if let checkSentFrame = checkSentFrame { + checkSentNode.isHidden = false + checkSentNode.frame = checkSentFrame + } else { + checkSentNode.isHidden = true + } + + if strongSelf.checkReadNode == nil { + strongSelf.checkReadNode = checkReadNode + strongSelf.addSubnode(checkReadNode) + } + checkReadNode.image = loadedCheckFullImage + + if let checkReadFrame = checkReadFrame { + checkReadNode.isHidden = false + checkReadNode.frame = checkReadFrame + } else { + checkReadNode.isHidden = true + } + } else if let checkSentNode = strongSelf.checkSentNode, let checkReadNode = strongSelf.checkReadNode { + checkSentNode.removeFromSupernode() + checkReadNode.removeFromSupernode() + strongSelf.checkSentNode = nil + strongSelf.checkReadNode = nil + } + } + }) + } + } +} diff --git a/TelegramUI/ChatMessageFileBubbleContentNode.swift b/TelegramUI/ChatMessageFileBubbleContentNode.swift new file mode 100644 index 0000000000..43be54acba --- /dev/null +++ b/TelegramUI/ChatMessageFileBubbleContentNode.swift @@ -0,0 +1,73 @@ +import Foundation +import AsyncDisplayKit +import Display +import SwiftSignalKit +import Postbox +import TelegramCore + +class ChatMessageFileBubbleContentNode: ChatMessageBubbleContentNode { + private let interactiveFileNode: ChatMessageInteractiveFileNode + + private var item: ChatMessageItem? + + required init() { + self.interactiveFileNode = ChatMessageInteractiveFileNode() + + super.init() + + self.addSubnode(self.interactiveFileNode) + + self.interactiveFileNode.activateLocalContent = { [weak self] in + if let strongSelf = self { + if let item = strongSelf.item, let controllerInteraction = strongSelf.controllerInteraction { + controllerInteraction.openMessage(item.message.id) + } + } + } + } + + required init?(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + override func asyncLayoutContent() -> (_ item: ChatMessageItem, _ layoutConstants: ChatMessageItemLayoutConstants, _ position: ChatMessageBubbleContentPosition, _ constrainedSize: CGSize) -> (CGFloat, (CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> Void))) { + let interactiveFileLayout = self.interactiveFileNode.asyncLayout() + + return { item, layoutConstants, position, constrainedSize in + var selectedFile: TelegramMediaFile? + for media in item.message.media { + if let telegramFile = media as? TelegramMediaFile { + selectedFile = telegramFile + } + } + + let (initialWidth, refineLayout) = interactiveFileLayout(item.account, selectedFile!, item.message.flags.contains(.Incoming), CGSize(width: constrainedSize.width, height: constrainedSize.height)) + + return (initialWidth + layoutConstants.file.bubbleInsets.left + layoutConstants.file.bubbleInsets.right, { constrainedSize in + let (refinedWidth, finishLayout) = refineLayout(constrainedSize) + + return (refinedWidth + layoutConstants.file.bubbleInsets.left + layoutConstants.file.bubbleInsets.right, { boundingWidth in + let (fileSize, fileApply) = finishLayout(boundingWidth - layoutConstants.file.bubbleInsets.left - layoutConstants.file.bubbleInsets.right) + + return (CGSize(width: fileSize.width + layoutConstants.file.bubbleInsets.left + layoutConstants.file.bubbleInsets.right, height: fileSize.height + layoutConstants.file.bubbleInsets.top + layoutConstants.file.bubbleInsets.bottom), { [weak self] in + if let strongSelf = self { + strongSelf.item = item + + strongSelf.interactiveFileNode.frame = CGRect(origin: CGPoint(x: layoutConstants.file.bubbleInsets.left, y: layoutConstants.file.bubbleInsets.top), size: fileSize) + + fileApply() + } + }) + }) + }) + } + } + + override func animateInsertion(_ currentTimestamp: Double, duration: Double) { + self.interactiveFileNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + } + + override func animateAdded(_ currentTimestamp: Double, duration: Double) { + self.interactiveFileNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + } +} diff --git a/TelegramUI/ChatMessageForwardInfoNode.swift b/TelegramUI/ChatMessageForwardInfoNode.swift new file mode 100644 index 0000000000..fba362ea88 --- /dev/null +++ b/TelegramUI/ChatMessageForwardInfoNode.swift @@ -0,0 +1,53 @@ +import Foundation +import AsyncDisplayKit +import Display +import Postbox + +private let prefixFont = Font.regular(13.0) +private let peerFont = Font.medium(13.0) + +class ChatMessageForwardInfoNode: ASTransformLayerNode { + private var textNode: TextNode? + + override init() { + super.init() + } + + class func asyncLayout(_ maybeNode: ChatMessageForwardInfoNode?) -> (_ incoming: Bool, _ peer: Peer, _ authorPeer: Peer?, _ constrainedSize: CGSize) -> (CGSize, () -> ChatMessageForwardInfoNode) { + let textNodeLayout = TextNode.asyncLayout(maybeNode?.textNode) + + return { incoming, peer, authorPeer, constrainedSize in + let prefix: NSString = "Forwarded Message\nFrom: " + let peerString: String + if let authorPeer = authorPeer { + peerString = "\(peer.displayTitle) (\(authorPeer.displayTitle))" + } else { + peerString = peer.displayTitle + } + let completeString: NSString = "\(prefix)\(peerString)" as NSString + let color = incoming ? UIColor(0x007bff) : UIColor(0x00a516) + let string = NSMutableAttributedString(string: completeString as String, attributes: [NSForegroundColorAttributeName: color, NSFontAttributeName: prefixFont]) + string.addAttributes([NSFontAttributeName: peerFont], range: NSMakeRange(prefix.length, completeString.length - prefix.length)) + let (textLayout, textApply) = textNodeLayout(string, nil, 2, .end, constrainedSize, nil) + + return (textLayout.size, { + let node: ChatMessageForwardInfoNode + if let maybeNode = maybeNode { + node = maybeNode + } else { + node = ChatMessageForwardInfoNode() + } + + let textNode = textApply() + if node.textNode == nil { + textNode.isLayerBacked = true + node.textNode = textNode + node.addSubnode(textNode) + } + textNode.frame = CGRect(origin: CGPoint(), size: textLayout.size) + + return node + }) + } + } +} diff --git a/TelegramUI/ChatMessageInteractiveFileNode.swift b/TelegramUI/ChatMessageInteractiveFileNode.swift new file mode 100644 index 0000000000..fa233b45e1 --- /dev/null +++ b/TelegramUI/ChatMessageInteractiveFileNode.swift @@ -0,0 +1,247 @@ +import Foundation +import AsyncDisplayKit +import Postbox +import SwiftSignalKit +import Display +import TelegramCore + +private struct FetchControls { + let fetch: () -> Void + let cancel: () -> Void +} + +private let titleFont = Font.regular(16.0) +private let descriptionFont = Font.regular(13.0) + +private let incomingTitleColor = UIColor(0x0b8bed) +private let outgoingTitleColor = UIColor(0x3faa3c) +private let incomingDescriptionColor = UIColor(0x999999) +private let outgoingDescriptionColor = UIColor(0x6fb26a) + +private let fileIconIncomingImage = UIImage(bundleImageName: "Chat/Message/RadialProgressIconDocumentIncoming")?.precomposed() +private let fileIconOutgoingImage = UIImage(bundleImageName: "Chat/Message/RadialProgressIconDocumentOutgoing")?.precomposed() + +final class ChatMessageInteractiveFileNode: ASTransformNode { + private let titleNode: TextNode + private let descriptionNode: TextNode + + private var iconNode: TransformImageNode? + private var progressNode: RadialProgressNode? + private var tapRecognizer: UITapGestureRecognizer? + + private let statusDisposable = MetaDisposable() + private let fetchControls = Atomic(value: nil) + private var fetchStatus: MediaResourceStatus? + private let fetchDisposable = MetaDisposable() + + var activateLocalContent: () -> Void = { } + private var file: TelegramMediaFile? + + init() { + self.titleNode = TextNode() + self.titleNode.displaysAsynchronously = true + self.titleNode.isLayerBacked = true + + self.descriptionNode = TextNode() + self.descriptionNode.displaysAsynchronously = true + self.descriptionNode.isLayerBacked = true + + super.init(layerBacked: false) + + self.addSubnode(self.titleNode) + self.addSubnode(self.descriptionNode) + } + + deinit { + self.statusDisposable.dispose() + self.fetchDisposable.dispose() + } + + override func didLoad() { + let tapRecognizer = UITapGestureRecognizer(target: self, action: #selector(self.fileTap(_:))) + self.view.addGestureRecognizer(tapRecognizer) + self.tapRecognizer = tapRecognizer + } + + @objc func progressPressed() { + if let fetchStatus = self.fetchStatus { + switch fetchStatus { + case .Fetching: + if let cancel = self.fetchControls.with({ return $0?.cancel }) { + cancel() + } + case .Remote: + if let fetch = self.fetchControls.with({ return $0?.fetch }) { + fetch() + } + case .Local: + break + } + } + } + + @objc func fileTap(_ recognizer: UITapGestureRecognizer) { + if case .ended = recognizer.state { + if let fetchStatus = self.fetchStatus, case .Local = fetchStatus { + self.activateLocalContent() + } else { + self.activateLocalContent() + //self.progressPressed() + } + } + } + + func asyncLayout() -> (_ account: Account, _ file: TelegramMediaFile, _ incoming: Bool, _ constrainedSize: CGSize) -> (CGFloat, (CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> Void))) { + let currentFile = self.file + + let titleAsyncLayout = TextNode.asyncLayout(self.titleNode) + let descriptionAsyncLayout = TextNode.asyncLayout(self.descriptionNode) + + return { account, file, incoming, constrainedSize in + return (CGFloat.greatestFiniteMagnitude, { constrainedSize in + //var updateImageSignal: Signal DrawingContext, NoError>? + var updatedStatusSignal: Signal? + var updatedFetchControls: FetchControls? + + var mediaUpdated = false + if let currentFile = currentFile { + mediaUpdated = file != currentFile + } else { + mediaUpdated = true + } + + if mediaUpdated { + //updateImageSignal = chatMessagePhoto(account, photo: image) + updatedStatusSignal = chatMessageFileStatus(account: account, file: file) + updatedFetchControls = FetchControls(fetch: { [weak self] in + if let strongSelf = self { + strongSelf.fetchDisposable.set(chatMessageFileInteractiveFetched(account: account, file: file).start()) + } + }, cancel: { + chatMessageFileCancelInteractiveFetch(account: account, file: file) + }) + } + + var candidateTitleString: NSAttributedString? + var candidateDescriptionString: NSAttributedString? + + for attribute in file.attributes { + if case let .Audio(_, _, title, performer, _) = attribute { + candidateTitleString = NSAttributedString(string: title ?? "Unknown Track", font: titleFont, textColor: incoming ? incomingTitleColor : outgoingTitleColor) + candidateDescriptionString = NSAttributedString(string: performer ?? dataSizeString(file.size), font: descriptionFont, textColor:incoming ? incomingDescriptionColor : outgoingDescriptionColor) + break + } + } + + var titleString: NSAttributedString + let descriptionString: NSAttributedString + + if let candidateTitleString = candidateTitleString { + titleString = candidateTitleString + } else { + titleString = NSAttributedString(string: file.fileName ?? "File", font: titleFont, textColor: incoming ? incomingTitleColor : outgoingTitleColor) + } + + if let candidateDescriptionString = candidateDescriptionString { + descriptionString = candidateDescriptionString + } else { + descriptionString = NSAttributedString(string: dataSizeString(file.size), font: descriptionFont, textColor:incoming ? incomingDescriptionColor : outgoingDescriptionColor) + } + + let textConstrainedSize = CGSize(width: constrainedSize.width - 44.0 - 8.0, height: constrainedSize.height) + + let (titleLayout, titleApply) = titleAsyncLayout(titleString, nil, 1, .middle, textConstrainedSize, nil) + let (descriptionLayout, descriptionApply) = descriptionAsyncLayout(descriptionString, nil, 1, .middle, textConstrainedSize, nil) + + return (max(titleLayout.size.width, descriptionLayout.size.width) + 44.0 + 8.0, { boundingWidth in + let progressFrame = CGRect(origin: CGPoint(x: 0.0, y: 0.0), size: CGSize(width: 44.0, height: 44.0)) + + let titleAndDescriptionHeight = titleLayout.size.height - 1.0 + descriptionLayout.size.height + + let titleFrame = CGRect(origin: CGPoint(x: progressFrame.maxX + 8.0, y: floor((44.0 - titleAndDescriptionHeight) / 2.0)), size: titleLayout.size) + let descriptionFrame = CGRect(origin: CGPoint(x: titleFrame.minX, y: titleFrame.maxY - 1.0), size: descriptionLayout.size) + + return (titleFrame.union(descriptionFrame).union(progressFrame).size, { [weak self] in + if let strongSelf = self { + strongSelf.file = file + + let _ = titleApply() + let _ = descriptionApply() + + strongSelf.titleNode.frame = titleFrame + strongSelf.descriptionNode.frame = descriptionFrame + + /*if let updateImageSignal = updateImageSignal { + strongSelf.imageNode.setSignal(account, signal: updateImageSignal) + }*/ + + if let updatedStatusSignal = updatedStatusSignal { + strongSelf.statusDisposable.set((updatedStatusSignal |> deliverOnMainQueue).start(next: { [weak strongSelf] status in + displayLinkDispatcher.dispatch { + if let strongSelf = strongSelf { + strongSelf.fetchStatus = status + + if strongSelf.progressNode == nil { + let progressNode = RadialProgressNode(theme: RadialProgressTheme(backgroundColor: UIColor(incoming ? 0x1195f2 : 0x3fc33b), foregroundColor: incoming ? UIColor.white : UIColor(0xe1ffc7), icon: incoming ? fileIconIncomingImage : fileIconOutgoingImage)) + strongSelf.progressNode = progressNode + progressNode.frame = progressFrame + strongSelf.addSubnode(progressNode) + } + + switch status { + case let .Fetching(progress): + strongSelf.progressNode?.state = .Fetching(progress: progress) + case .Local: + strongSelf.progressNode?.state = .Play + case .Remote: + strongSelf.progressNode?.state = .Remote + } + } + } + })) + } + + strongSelf.progressNode?.frame = progressFrame + + if let updatedFetchControls = updatedFetchControls { + let _ = strongSelf.fetchControls.swap(updatedFetchControls) + } + } + }) + }) + }) + } + } + + static func asyncLayout(_ node: ChatMessageInteractiveFileNode?) -> (_ account: Account, _ file: TelegramMediaFile, _ incoming: Bool, _ constrainedSize: CGSize) -> (CGFloat, (CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> ChatMessageInteractiveFileNode))) { + let currentAsyncLayout = node?.asyncLayout() + + return { account, file, incoming, constrainedSize in + var fileNode: ChatMessageInteractiveFileNode + var fileLayout: (_ account: Account, _ file: TelegramMediaFile, _ incoming: Bool, _ constrainedSize: CGSize) -> (CGFloat, (CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> Void))) + + if let node = node, let currentAsyncLayout = currentAsyncLayout { + fileNode = node + fileLayout = currentAsyncLayout + } else { + fileNode = ChatMessageInteractiveFileNode() + fileLayout = fileNode.asyncLayout() + } + + let (initialWidth, continueLayout) = fileLayout(account, file, incoming, constrainedSize) + + return (initialWidth, { constrainedSize in + let (finalWidth, finalLayout) = continueLayout(constrainedSize) + + return (finalWidth, { boundingWidth in + let (finalSize, apply) = finalLayout(boundingWidth) + + return (finalSize, { + apply() + return fileNode + }) + }) + }) + } + } +} diff --git a/TelegramUI/ChatMessageInteractiveMediaNode.swift b/TelegramUI/ChatMessageInteractiveMediaNode.swift new file mode 100644 index 0000000000..9e3711184d --- /dev/null +++ b/TelegramUI/ChatMessageInteractiveMediaNode.swift @@ -0,0 +1,260 @@ +import Foundation +import AsyncDisplayKit +import Postbox +import SwiftSignalKit +import Display +import TelegramCore + +private struct FetchControls { + let fetch: () -> Void + let cancel: () -> Void +} + +final class ChatMessageInteractiveMediaNode: ASTransformNode { + private let imageNode: TransformImageNode + private var progressNode: RadialProgressNode? + private var tapRecognizer: UITapGestureRecognizer? + + private var media: Media? + + private let statusDisposable = MetaDisposable() + private let fetchControls = Atomic(value: nil) + private var fetchStatus: MediaResourceStatus? + private let fetchDisposable = MetaDisposable() + + var activateLocalContent: () -> Void = { } + + init() { + self.imageNode = TransformImageNode() + + super.init(layerBacked: false) + + self.imageNode.displaysAsynchronously = false + self.addSubnode(self.imageNode) + } + + deinit { + self.statusDisposable.dispose() + self.fetchDisposable.dispose() + } + + override func didLoad() { + let tapRecognizer = UITapGestureRecognizer(target: self, action: #selector(self.imageTap(_:))) + self.imageNode.view.addGestureRecognizer(tapRecognizer) + self.tapRecognizer = tapRecognizer + } + + @objc func progressPressed() { + if let fetchStatus = self.fetchStatus { + switch fetchStatus { + case .Fetching: + if let cancel = self.fetchControls.with({ return $0?.cancel }) { + cancel() + } + case .Remote: + if let fetch = self.fetchControls.with({ return $0?.fetch }) { + fetch() + } + case .Local: + break + } + } + } + + @objc func imageTap(_ recognizer: UITapGestureRecognizer) { + if case .ended = recognizer.state { + if let file = media as? TelegramMediaFile, (file.isVideo || file.mimeType.hasPrefix("video/")) { + self.activateLocalContent() + } else { + if let fetchStatus = self.fetchStatus, case .Local = fetchStatus { + self.activateLocalContent() + } else { + self.progressPressed() + } + } + } + } + + func asyncLayout() -> (_ account: Account, _ media: Media, _ corners: ImageCorners, _ automaticDownload: Bool, _ constrainedSize: CGSize) -> (CGFloat, (CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> Void))) { + let currentMedia = self.media + let imageLayout = self.imageNode.asyncLayout() + + return { account, media, corners, automaticDownload, constrainedSize in + var initialBoundingSize: CGSize + var nativeSize: CGSize + + if let image = media as? TelegramMediaImage, let dimensions = largestImageRepresentation(image.representations)?.dimensions { + initialBoundingSize = dimensions.fitted(CGSize(width: min(200.0, constrainedSize.width - 60.0), height: 200.0)) + nativeSize = CGSize(width: floor(dimensions.width * 0.5), height: floor(dimensions.height * 0.5)).fitted(constrainedSize) + } else if let file = media as? TelegramMediaFile, let dimensions = file.dimensions { + initialBoundingSize = dimensions.fitted(CGSize(width: min(200.0, constrainedSize.width - 60.0), height: 200.0)) + nativeSize = CGSize(width: floor(dimensions.width * 0.5), height: floor(dimensions.height * 0.5)).fitted(constrainedSize) + } else { + initialBoundingSize = CGSize(width: 32.0, height: 32.0) + nativeSize = initialBoundingSize + } + + initialBoundingSize.width = max(initialBoundingSize.width, 60.0) + initialBoundingSize.height = max(initialBoundingSize.height, 60.0) + nativeSize.width = max(nativeSize.width, 60.0) + nativeSize.height = max(nativeSize.height, 60.0) + + return (nativeSize.width, { constrainedSize in + let boundingSize = initialBoundingSize.fitted(constrainedSize) + + var updateImageSignal: Signal<(TransformImageArguments) -> DrawingContext, NoError>? + var updatedStatusSignal: Signal? + var updatedFetchControls: FetchControls? + + var mediaUpdated = false + if let currentMedia = currentMedia { + mediaUpdated = !media.isEqual(currentMedia) + } else { + mediaUpdated = true + } + + if mediaUpdated { + if let image = media as? TelegramMediaImage { + updateImageSignal = chatMessagePhoto(account: account, photo: image) + updatedStatusSignal = chatMessagePhotoStatus(account: account, photo: image) + updatedFetchControls = FetchControls(fetch: { [weak self] in + if let strongSelf = self { + strongSelf.fetchDisposable.set(chatMessagePhotoInteractiveFetched(account: account, photo: image).start()) + } + }, cancel: { + chatMessagePhotoCancelInteractiveFetch(account: account, photo: image) + }) + } else if let file = media as? TelegramMediaFile { + updateImageSignal = chatMessageVideo(account: account, video: file) + updatedStatusSignal = chatMessageFileStatus(account: account, file: file) + updatedFetchControls = FetchControls(fetch: { [weak self] in + if let strongSelf = self { + strongSelf.fetchDisposable.set(chatMessageFileInteractiveFetched(account: account, file: file).start()) + } + }, cancel: { + chatMessageFileCancelInteractiveFetch(account: account, file: file) + }) + } + } + + let arguments = TransformImageArguments(corners: corners, imageSize: boundingSize, boundingSize: boundingSize, intrinsicInsets: UIEdgeInsets()) + + let imageFrame = CGRect(origin: CGPoint(x: -arguments.insets.left, y: -arguments.insets.top), size: arguments.drawingSize) + + return (boundingSize.width, { boundingWidth in + let adjustedWidth = boundingWidth + let adjustedHeight = boundingSize.aspectFitted(CGSize(width: adjustedWidth, height: CGFloat.greatestFiniteMagnitude)).height + let adjustedImageSize = CGSize(width: adjustedWidth, height: min(adjustedHeight, floorToScreenPixels(boundingSize.height * 1.4))) + + let adjustedArguments = TransformImageArguments(corners: corners, imageSize: nativeSize, boundingSize: adjustedImageSize, intrinsicInsets: UIEdgeInsets()) + + let adjustedImageFrame = CGRect(origin: imageFrame.origin, size: adjustedArguments.drawingSize) + let imageApply = imageLayout(adjustedArguments) + + return (CGSize(width: adjustedImageSize.width, height: adjustedImageSize.height), { [weak self] in + if let strongSelf = self { + strongSelf.media = media + strongSelf.imageNode.frame = adjustedImageFrame + strongSelf.progressNode?.position = CGPoint(x: adjustedImageFrame.midX, y: adjustedImageFrame.midY) + + if let updateImageSignal = updateImageSignal { + strongSelf.imageNode.setSignal(account: account, signal: updateImageSignal) + } + + if let updatedStatusSignal = updatedStatusSignal { + strongSelf.statusDisposable.set((updatedStatusSignal |> deliverOnMainQueue).start(next: { [weak strongSelf] status in + displayLinkDispatcher.dispatch { + if let strongSelf = strongSelf { + strongSelf.fetchStatus = status + + if let file = media as? TelegramMediaFile, (file.isVideo || file.mimeType.hasPrefix("video/")) { + if let progressNode = strongSelf.progressNode { + progressNode.removeFromSupernode() + strongSelf.progressNode = nil + } + } else { + if case .Local = status { + if let progressNode = strongSelf.progressNode { + progressNode.removeFromSupernode() + strongSelf.progressNode = nil + } + } else { + if strongSelf.progressNode == nil { + let progressNode = RadialProgressNode() + progressNode.frame = CGRect(origin: CGPoint(), size: CGSize(width: 50.0, height: 50.0)) + progressNode.position = strongSelf.imageNode.position + strongSelf.progressNode = progressNode + strongSelf.addSubnode(progressNode) + } + } + + switch status { + case let .Fetching(progress): + strongSelf.progressNode?.state = .Fetching(progress: progress) + case .Local: + var state: RadialProgressState = .None + if let file = media as? TelegramMediaFile { + if file.isVideo { + state = .Play + } + } + strongSelf.progressNode?.state = state + case .Remote: + strongSelf.progressNode?.state = .Remote + } + } + } + } + })) + } + + if let updatedFetchControls = updatedFetchControls { + let _ = strongSelf.fetchControls.swap(updatedFetchControls) + if automaticDownload { + if let image = media as? TelegramMediaImage { + strongSelf.fetchDisposable.set(chatMessagePhotoInteractiveFetched(account: account, photo: image).start()) + } + } + } + + imageApply() + } + }) + }) + }) + } + } + + static func asyncLayout(_ node: ChatMessageInteractiveMediaNode?) -> (_ account: Account, _ media: Media, _ corners: ImageCorners, _ automaticDownload: Bool, _ constrainedSize: CGSize) -> (CGFloat, (CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> ChatMessageInteractiveMediaNode))) { + let currentAsyncLayout = node?.asyncLayout() + + return { account, media, corners, automaticDownload, constrainedSize in + var imageNode: ChatMessageInteractiveMediaNode + var imageLayout: (_ account: Account, _ media: Media, _ corners: ImageCorners, _ automaticDownload: Bool, _ constrainedSize: CGSize) -> (CGFloat, (CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> Void))) + + if let node = node, let currentAsyncLayout = currentAsyncLayout { + imageNode = node + imageLayout = currentAsyncLayout + } else { + imageNode = ChatMessageInteractiveMediaNode() + imageLayout = imageNode.asyncLayout() + } + + let (initialWidth, continueLayout) = imageLayout(account, media, corners, automaticDownload, constrainedSize) + + return (initialWidth, { constrainedSize in + let (finalWidth, finalLayout) = continueLayout(constrainedSize) + + return (finalWidth, { boundingWidth in + let (finalSize, apply) = finalLayout(boundingWidth) + + return (finalSize, { + apply() + return imageNode + }) + }) + }) + } + } +} + diff --git a/TelegramUI/ChatMessageItem.swift b/TelegramUI/ChatMessageItem.swift new file mode 100644 index 0000000000..7ed5442a9d --- /dev/null +++ b/TelegramUI/ChatMessageItem.swift @@ -0,0 +1,147 @@ +import Foundation +import UIKit +import Postbox +import AsyncDisplayKit +import Display +import SwiftSignalKit +import TelegramCore + +private func mediaIsNotMergeable(_ media: Media) -> Bool { + if let file = media as? TelegramMediaFile, file.isSticker { + return true + } + if let _ = media as? TelegramMediaAction { + return true + } + + return false +} + +private func messagesShouldBeMerged(_ lhs: Message, _ rhs: Message) -> Bool { + if abs(lhs.timestamp - rhs.timestamp) < 5 * 60 && lhs.author?.id == rhs.author?.id { + for media in lhs.media { + if mediaIsNotMergeable(media) { + return false + } + } + for media in rhs.media { + if mediaIsNotMergeable(media) { + return false + } + } + + return true + } + + return false +} + +public class ChatMessageItem: ListViewItem, CustomStringConvertible { + let account: Account + let peerId: PeerId + let controllerInteraction: ChatControllerInteraction + let message: Message + + public let accessoryItem: ListViewAccessoryItem? + + public init(account: Account, peerId: PeerId, controllerInteraction: ChatControllerInteraction, message: Message) { + self.account = account + self.peerId = peerId + self.controllerInteraction = controllerInteraction + self.message = message + + var accessoryItem: ListViewAccessoryItem? + let incoming = account.peerId != message.author?.id + let displayAuthorInfo = incoming && message.author != nil && peerId.isGroup + + if displayAuthorInfo { + var hasActionMedia = false + for media in message.media { + if media is TelegramMediaAction { + hasActionMedia = true + break + } + } + if !hasActionMedia { + if let author = message.author { + accessoryItem = ChatMessageAvatarAccessoryItem(account: account, peerId: author.id, peer: author, messageTimestamp: message.timestamp) + } + } + } + self.accessoryItem = accessoryItem + } + + public func nodeConfiguredForWidth(async: @escaping (@escaping () -> Void) -> Void, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, completion: @escaping (ListViewItemNode, @escaping () -> Void) -> Void) { + var viewClassName: AnyClass = ChatMessageBubbleItemNode.self + + for media in message.media { + if let telegramFile = media as? TelegramMediaFile, telegramFile.isSticker { + viewClassName = ChatMessageStickerItemNode.self + } else if let _ = media as? TelegramMediaAction { + viewClassName = ChatMessageActionItemNode.self + } + } + + let configure = { () -> Void in + let node = (viewClassName as! ChatMessageItemView.Type).init() + node.controllerInteraction = self.controllerInteraction + node.setupItem(self) + + let nodeLayout = node.asyncLayout() + let (top, bottom) = self.mergedWithItems(top: previousItem, bottom: nextItem) + let (layout, apply) = nodeLayout(self, width, top, bottom) + + node.contentSize = layout.contentSize + node.insets = layout.insets + + completion(node, { + apply(.None) + }) + } + if Thread.isMainThread { + async { + configure() + } + } else { + configure() + } + } + + final func mergedWithItems(top: ListViewItem?, bottom: ListViewItem?) -> (top: Bool, bottom: Bool) { + var mergedTop = false + var mergedBottom = false + if let top = top as? ChatMessageItem { + mergedBottom = messagesShouldBeMerged(message, top.message) + } + if let bottom = bottom as? ChatMessageItem { + mergedTop = messagesShouldBeMerged(message, bottom.message) + } + + return (mergedTop, mergedBottom) + } + + public func updateNode(async: @escaping (@escaping () -> Void) -> Void, node: ListViewItemNode, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, animation: ListViewItemUpdateAnimation, completion: @escaping (ListViewItemNodeLayout, @escaping () -> Void) -> Void) { + if let node = node as? ChatMessageItemView { + Queue.mainQueue().async { + node.setupItem(self) + + let nodeLayout = node.asyncLayout() + + async { + let (top, bottom) = self.mergedWithItems(top: previousItem, bottom: nextItem) + + let (layout, apply) = nodeLayout(self, width, top, bottom) + Queue.mainQueue().async { + completion(layout, { + apply(animation) + }) + } + } + } + } + } + + public var description: String { + return "(ChatMessageItem id: \(self.message.id), text: \"\(self.message.text)\")" + } +} diff --git a/TelegramUI/ChatMessageItemView.swift b/TelegramUI/ChatMessageItemView.swift new file mode 100644 index 0000000000..51f0dc2486 --- /dev/null +++ b/TelegramUI/ChatMessageItemView.swift @@ -0,0 +1,120 @@ +import Foundation +import AsyncDisplayKit +import Display +import Postbox + +struct ChatMessageItemBubbleLayoutConstants { + let edgeInset: CGFloat + let defaultSpacing: CGFloat + let mergedSpacing: CGFloat + let maximumWidthFillFactor: CGFloat + let minimumSize: CGSize + let contentInsets: UIEdgeInsets +} + +struct ChatMessageItemTextLayoutConstants { + let bubbleInsets: UIEdgeInsets +} + +struct ChatMessageItemImageLayoutConstants { + let bubbleInsets: UIEdgeInsets + let defaultCornerRadius: CGFloat + let mergedCornerRadius: CGFloat + let contentMergedCornerRadius: CGFloat +} + +struct ChatMessageItemFileLayoutConstants { + let bubbleInsets: UIEdgeInsets +} + +struct ChatMessageItemLayoutConstants { + let avatarDiameter: CGFloat + + let bubble: ChatMessageItemBubbleLayoutConstants + let image: ChatMessageItemImageLayoutConstants + let text: ChatMessageItemTextLayoutConstants + let file: ChatMessageItemFileLayoutConstants + + init() { + self.avatarDiameter = 37.0 + + self.bubble = ChatMessageItemBubbleLayoutConstants(edgeInset: 4.0, defaultSpacing: 2.5, mergedSpacing: 0.0, maximumWidthFillFactor: 0.9, minimumSize: CGSize(width: 40.0, height: 33.0), contentInsets: UIEdgeInsets(top: 1.0, left: 6.0, bottom: 1.0, right: 1.0)) + self.text = ChatMessageItemTextLayoutConstants(bubbleInsets: UIEdgeInsets(top: 5.0, left: 9.0, bottom: 4.0, right: 9.0)) + self.image = ChatMessageItemImageLayoutConstants(bubbleInsets: UIEdgeInsets(top: 1.0, left: 1.0, bottom: 1.0, right: 1.0), defaultCornerRadius: 15.0, mergedCornerRadius: 4.0, contentMergedCornerRadius: 2.0) + self.file = ChatMessageItemFileLayoutConstants(bubbleInsets: UIEdgeInsets(top: 15.0, left: 9.0, bottom: 15.0, right: 12.0)) + } +} + +let defaultChatMessageItemLayoutConstants = ChatMessageItemLayoutConstants() + +public class ChatMessageItemView: ListViewItemNode { + let layoutConstants = defaultChatMessageItemLayoutConstants + + var item: ChatMessageItem? + var controllerInteraction: ChatControllerInteraction? + + public required convenience init() { + self.init(layerBacked: true) + } + + public init(layerBacked: Bool) { + super.init(layerBacked: layerBacked, dynamicBounce: true) + + self.transform = CATransform3DMakeRotation(CGFloat(M_PI), 0.0, 0.0, 1.0) + } + + required public init?(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + override public func reuse() { + super.reuse() + + self.item = nil + self.frame = CGRect() + } + + func setupItem(_ item: ChatMessageItem) { + self.item = item + } + + override public func layoutForWidth(_ width: CGFloat, item: ListViewItem, previousItem: ListViewItem?, nextItem: ListViewItem?) { + if let item = item as? ChatMessageItem { + let doLayout = self.asyncLayout() + let merged = item.mergedWithItems(top: previousItem, bottom: nextItem) + let (layout, apply) = doLayout(item, width, merged.top, merged.bottom) + self.contentSize = layout.contentSize + self.insets = layout.insets + apply(.None) + } + } + + override public func layoutAccessoryItemNode(_ accessoryItemNode: ListViewAccessoryItemNode) { + if let avatarNode = accessoryItemNode as? ChatMessageAvatarAccessoryItemNode { + avatarNode.frame = CGRect(origin: CGPoint(x: 3.0, y: self.bounds.height - 38.0 - self.insets.top + 1.0), size: CGSize(width: 38.0, height: 38.0)) + } + } + + override public func animateInsertion(_ currentTimestamp: Double, duration: Double) { + super.animateInsertion(currentTimestamp, duration: duration) + + self.transitionOffset = -self.bounds.size.height * 1.6 + self.addTransitionOffsetAnimation(0.0, duration: duration, beginAt: currentTimestamp) + //self.layer.animateBoundsOriginYAdditive(from: -self.bounds.size.height * 1.4, to: 0.0, duration: duration) + } + + func asyncLayout() -> (_ item: ChatMessageItem, _ width: CGFloat, _ mergedTop: Bool, _ mergedBottom: Bool) -> (ListViewItemNodeLayout, (ListViewItemUpdateAnimation) -> Void) { + return { _, _, _, _ in + return (ListViewItemNodeLayout(contentSize: CGSize(width: 32.0, height: 32.0), insets: UIEdgeInsets()), { _ in + + }) + } + } + + func transitionNode(id: MessageId, media: Media) -> ASDisplayNode? { + return nil + } + + func updateHiddenMedia() { + } +} diff --git a/TelegramUI/ChatMessageMediaBubbleContentNode.swift b/TelegramUI/ChatMessageMediaBubbleContentNode.swift new file mode 100644 index 0000000000..0d83060aad --- /dev/null +++ b/TelegramUI/ChatMessageMediaBubbleContentNode.swift @@ -0,0 +1,104 @@ +import Foundation +import AsyncDisplayKit +import Display +import SwiftSignalKit +import Postbox +import TelegramCore + +class ChatMessageMediaBubbleContentNode: ChatMessageBubbleContentNode { + override var properties: ChatMessageBubbleContentProperties { + return ChatMessageBubbleContentProperties(hidesSimpleAuthorHeader: true, headerSpacing: 5.0) + } + + private let interactiveImageNode: ChatMessageInteractiveMediaNode + + private var item: ChatMessageItem? + private var media: Media? + + required init() { + self.interactiveImageNode = ChatMessageInteractiveMediaNode() + + super.init() + + self.addSubnode(self.interactiveImageNode) + + self.interactiveImageNode.activateLocalContent = { [weak self] in + if let strongSelf = self { + if let item = strongSelf.item, let controllerInteraction = strongSelf.controllerInteraction { + controllerInteraction.openMessage(item.message.id) + } + } + } + } + + required init?(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + override func asyncLayoutContent() -> (_ item: ChatMessageItem, _ layoutConstants: ChatMessageItemLayoutConstants, _ position: ChatMessageBubbleContentPosition, _ constrainedSize: CGSize) -> (CGFloat, (CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> Void))) { + let interactiveImageLayout = self.interactiveImageNode.asyncLayout() + + return { item, layoutConstants, position, constrainedSize in + var selectedMedia: Media? + for media in item.message.media { + if let telegramImage = media as? TelegramMediaImage { + selectedMedia = telegramImage + } else if let telegramFile = media as? TelegramMediaFile { + selectedMedia = telegramFile + } + } + + let imageCorners = chatMessageBubbleImageContentCorners(relativeContentPosition: position, normalRadius: layoutConstants.image.defaultCornerRadius, mergedRadius: layoutConstants.image.mergedCornerRadius, mergedWithAnotherContentRadius: layoutConstants.image.contentMergedCornerRadius) + + let (initialWidth, refineLayout) = interactiveImageLayout(item.account, selectedMedia!, imageCorners, item.account.settings.automaticDownloadSettingsForPeerId(item.peerId).downloadPhoto, CGSize(width: constrainedSize.width, height: constrainedSize.height)) + + return (initialWidth + layoutConstants.image.bubbleInsets.left + layoutConstants.image.bubbleInsets.right, { constrainedSize in + let (refinedWidth, finishLayout) = refineLayout(constrainedSize) + + return (refinedWidth + layoutConstants.image.bubbleInsets.left + layoutConstants.image.bubbleInsets.right, { boundingWidth in + let (imageSize, imageApply) = finishLayout(boundingWidth - layoutConstants.image.bubbleInsets.left - layoutConstants.image.bubbleInsets.right) + + return (CGSize(width: imageSize.width + layoutConstants.image.bubbleInsets.left + layoutConstants.image.bubbleInsets.right, height: imageSize.height + layoutConstants.image.bubbleInsets.top + layoutConstants.image.bubbleInsets.bottom), { [weak self] in + if let strongSelf = self { + strongSelf.item = item + strongSelf.media = selectedMedia + + strongSelf.interactiveImageNode.frame = CGRect(origin: CGPoint(x: layoutConstants.image.bubbleInsets.left, y: layoutConstants.image.bubbleInsets.top), size: imageSize) + + imageApply() + } + }) + }) + }) + } + } + + override func animateInsertion(_ currentTimestamp: Double, duration: Double) { + self.interactiveImageNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + } + + override func animateAdded(_ currentTimestamp: Double, duration: Double) { + self.interactiveImageNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + } + + override func transitionNode(media: Media) -> ASDisplayNode? { + if let currentMedia = self.media, currentMedia.isEqual(media) { + return self.interactiveImageNode + } + return nil + } + + override func updateHiddenMedia(_ media: [Media]?) { + var mediaHidden = false + if let currentMedia = self.media, let media = media { + for item in media { + if item.isEqual(currentMedia) { + mediaHidden = true + break + } + } + } + + self.interactiveImageNode.isHidden = mediaHidden + } +} diff --git a/TelegramUI/ChatMessageReplyInfoNode.swift b/TelegramUI/ChatMessageReplyInfoNode.swift new file mode 100644 index 0000000000..03b21c1033 --- /dev/null +++ b/TelegramUI/ChatMessageReplyInfoNode.swift @@ -0,0 +1,96 @@ +import Foundation +import AsyncDisplayKit +import Postbox +import Display + +private let titleFont: UIFont = { + if #available(iOS 8.2, *) { + return UIFont.systemFont(ofSize: 14.0, weight: UIFontWeightMedium) + } else { + return CTFontCreateWithName("HelveticaNeue-Medium" as CFString?, 14.0, nil) + } +}() +private let textFont = Font.regular(14.0) + +class ChatMessageReplyInfoNode: ASTransformLayerNode { + private let contentNode: ASDisplayNode + private let lineNode: ASDisplayNode + private var titleNode: TextNode? + private var textNode: TextNode? + + override init() { + self.contentNode = ASDisplayNode() + self.contentNode.displaysAsynchronously = true + self.contentNode.isLayerBacked = true + self.contentNode.shouldRasterizeDescendants = true + self.contentNode.contentMode = .left + self.contentNode.contentsScale = UIScreenScale + + self.lineNode = ASDisplayNode() + self.lineNode.displaysAsynchronously = false + self.lineNode.isLayerBacked = true + + super.init() + + self.addSubnode(self.contentNode) + self.contentNode.addSubnode(self.lineNode) + } + + class func asyncLayout(_ maybeNode: ChatMessageReplyInfoNode?) -> (_ incoming: Bool, _ message: Message, _ constrainedSize: CGSize) -> (CGSize, () -> ChatMessageReplyInfoNode) { + + let titleNodeLayout = TextNode.asyncLayout(maybeNode?.titleNode) + let textNodeLayout = TextNode.asyncLayout(maybeNode?.textNode) + + return { incoming, message, constrainedSize in + let titleString = message.author?.displayTitle ?? "" + let textString = message.text + let titleColor = incoming ? UIColor(0x007bff) : UIColor(0x00a516) + + let leftInset: CGFloat = 10.0 + let lineColor = incoming ? UIColor(0x3ca7fe) : UIColor(0x29cc10) + + let maximumTextWidth = max(0.0, constrainedSize.width - leftInset) + + let contrainedTextSize = CGSize(width: maximumTextWidth, height: constrainedSize.height) + + let (titleLayout, titleApply) = titleNodeLayout(NSAttributedString(string: titleString, font: titleFont, textColor: titleColor), nil, 1, .end, contrainedTextSize, nil) + let (textLayout, textApply) = textNodeLayout(NSAttributedString(string: textString, font: textFont, textColor: UIColor.black), nil, 1, .end, contrainedTextSize, nil) + + let size = CGSize(width: max(titleLayout.size.width, textLayout.size.width) + leftInset, height: titleLayout.size.height + textLayout.size.height) + + return (size, { + let node: ChatMessageReplyInfoNode + if let maybeNode = maybeNode { + node = maybeNode + } else { + node = ChatMessageReplyInfoNode() + } + + let titleNode = titleApply() + let textNode = textApply() + + if node.titleNode == nil { + titleNode.isLayerBacked = true + node.titleNode = titleNode + node.contentNode.addSubnode(titleNode) + } + + if node.textNode == nil { + textNode.isLayerBacked = true + node.textNode = textNode + node.contentNode.addSubnode(textNode) + } + + titleNode.frame = CGRect(origin: CGPoint(x: leftInset, y: 0.0), size: titleLayout.size) + textNode.frame = CGRect(origin: CGPoint(x: leftInset, y: titleLayout.size.height), size: textLayout.size) + + node.lineNode.backgroundColor = lineColor + node.lineNode.frame = CGRect(origin: CGPoint(x: 1.0, y: 2.5), size: CGSize(width: 2.0, height: size.height - 3.0)) + + node.contentNode.frame = CGRect(origin: CGPoint(), size: size) + + return node + }) + } + } +} diff --git a/TelegramUI/ChatMessageStickerItemNode.swift b/TelegramUI/ChatMessageStickerItemNode.swift new file mode 100644 index 0000000000..00d8455d40 --- /dev/null +++ b/TelegramUI/ChatMessageStickerItemNode.swift @@ -0,0 +1,98 @@ +import Foundation +import AsyncDisplayKit +import Display +import SwiftSignalKit +import Postbox +import TelegramCore + +class ChatMessageStickerItemNode: ChatMessageItemView { + let imageNode: TransformImageNode + var progressNode: RadialProgressNode? + var tapRecognizer: UITapGestureRecognizer? + + var telegramFile: TelegramMediaFile? + + private let fetchDisposable = MetaDisposable() + + required init() { + self.imageNode = TransformImageNode() + + super.init(layerBacked: false) + + self.imageNode.displaysAsynchronously = false + self.addSubnode(self.imageNode) + } + + required init?(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + deinit { + self.fetchDisposable.dispose() + } + + override func setupItem(_ item: ChatMessageItem) { + super.setupItem(item) + + for media in item.message.media { + if let telegramFile = media as? TelegramMediaFile { + if self.telegramFile != telegramFile { + self.telegramFile = telegramFile + + let signal = chatMessageSticker(account: item.account, file: telegramFile) + self.imageNode.setSignal(account: item.account, signal: signal) + self.fetchDisposable.set(fileInteractiveFetched(account: item.account, file: telegramFile).start()) + } + + break + } + } + } + + override func asyncLayout() -> (_ item: ChatMessageItem, _ width: CGFloat, _ mergedTop: Bool, _ mergedBottom: Bool) -> (ListViewItemNodeLayout, (ListViewItemUpdateAnimation) -> Void) { + let displaySize = CGSize(width: 200.0, height: 200.0) + let telegramFile = self.telegramFile + let layoutConstants = self.layoutConstants + let imageLayout = self.imageNode.asyncLayout() + + return { item, width, mergedTop, mergedBottom in + let incoming = item.account.peerId != item.message.author?.id + var imageSize: CGSize = CGSize(width: 100.0, height: 100.0) + if let telegramFile = telegramFile { + if let thumbnailSize = telegramFile.previewRepresentations.first?.dimensions { + imageSize = thumbnailSize.aspectFitted(displaySize) + } + } + + let avatarInset: CGFloat = (item.peerId.isGroup && item.message.author != nil) ? layoutConstants.avatarDiameter : 0.0 + + let layoutInsets = UIEdgeInsets(top: mergedTop ? layoutConstants.bubble.mergedSpacing : layoutConstants.bubble.defaultSpacing, left: 0.0, bottom: mergedBottom ? layoutConstants.bubble.mergedSpacing : layoutConstants.bubble.defaultSpacing, right: 0.0) + + let imageFrame = CGRect(origin: CGPoint(x: (incoming ? (layoutConstants.bubble.edgeInset + avatarInset) : (width - imageSize.width - layoutConstants.bubble.edgeInset)), y: 0.0), size: imageSize) + + let arguments = TransformImageArguments(corners: ImageCorners(), imageSize: imageFrame.size, boundingSize: imageFrame.size, intrinsicInsets: UIEdgeInsets()) + + let imageApply = imageLayout(arguments) + + return (ListViewItemNodeLayout(contentSize: CGSize(width: width, height: imageSize.height), insets: layoutInsets), { [weak self] animation in + if let strongSelf = self { + strongSelf.imageNode.frame = imageFrame + strongSelf.progressNode?.position = strongSelf.imageNode.position + imageApply() + } + }) + } + } + + override func animateInsertion(_ currentTimestamp: Double, duration: Double) { + super.animateInsertion(currentTimestamp, duration: duration) + + self.imageNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + } + + override func animateAdded(_ currentTimestamp: Double, duration: Double) { + super.animateAdded(currentTimestamp, duration: duration) + + self.imageNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + } +} diff --git a/TelegramUI/ChatMessageTextBubbleContentNode.swift b/TelegramUI/ChatMessageTextBubbleContentNode.swift new file mode 100644 index 0000000000..64ac5f6439 --- /dev/null +++ b/TelegramUI/ChatMessageTextBubbleContentNode.swift @@ -0,0 +1,170 @@ +import Foundation +import AsyncDisplayKit +import Display +import TelegramCore + +private let messageFont: UIFont = UIFont.systemFont(ofSize: 17.0) +private let messageBoldFont: UIFont = UIFont.boldSystemFont(ofSize: 17.0) + +class ChatMessageTextBubbleContentNode: ChatMessageBubbleContentNode { + private let textNode: TextNode + private let statusNode: ChatMessageDateAndStatusNode + + required init() { + self.textNode = TextNode() + self.statusNode = ChatMessageDateAndStatusNode() + + super.init() + + self.textNode.isLayerBacked = true + self.textNode.contentMode = .topLeft + self.textNode.contentsScale = UIScreenScale + self.textNode.displaysAsynchronously = true + self.addSubnode(self.textNode) + } + + required init?(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + override func asyncLayoutContent() -> (_ item: ChatMessageItem, _ layoutConstants: ChatMessageItemLayoutConstants, _ position: ChatMessageBubbleContentPosition, _ constrainedSize: CGSize) -> (CGFloat, (CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> Void))) { + let textLayout = TextNode.asyncLayout(self.textNode) + let statusLayout = self.statusNode.asyncLayout() + + return { item, layoutConstants, position, _ in + return (CGFloat.greatestFiniteMagnitude, { constrainedSize in + let message = item.message + + let incoming = item.account.peerId != message.author?.id + + let horizontalInset = layoutConstants.text.bubbleInsets.left + layoutConstants.text.bubbleInsets.right + let textConstrainedSize = CGSize(width: constrainedSize.width - horizontalInset, height: constrainedSize.height) + + var t = Int(item.message.timestamp) + var timeinfo = tm() + localtime_r(&t, &timeinfo) + + let dateText = String(format: "%02d:%02d", arguments: [Int(timeinfo.tm_hour), Int(timeinfo.tm_min)]) + //let dateText = "\(message.id.id)" + + let statusType: ChatMessageDateAndStatusType? + if case .None = position.bottom { + if incoming { + statusType = .BubbleIncoming + } else { + if message.flags.contains(.Failed) { + statusType = .BubbleOutgoing(.Failed) + } else if message.flags.contains(.Unsent) { + statusType = .BubbleOutgoing(.Sending) + } else { + statusType = .BubbleOutgoing(.Sent(read: true)) + } + } + } else { + statusType = nil + } + + var statusSize: CGSize? + var statusApply: (() -> Void)? + + if let statusType = statusType { + let (size, apply) = statusLayout(dateText, statusType, textConstrainedSize) + statusSize = size + statusApply = apply + } + + let attributedText: NSAttributedString + var entities: TextEntitiesMessageAttribute? + for attribute in item.message.attributes { + if let attribute = attribute as? TextEntitiesMessageAttribute { + entities = attribute + break + } + } + if let entities = entities { + let string = NSMutableAttributedString(string: message.text, attributes: [NSFontAttributeName: messageFont, NSForegroundColorAttributeName: UIColor.black]) + for entity in entities.entities { + switch entity.type { + case .Url: + string.addAttribute(NSForegroundColorAttributeName, value: UIColor(0x004bad), range: NSRange(location: entity.range.lowerBound, length: entity.range.upperBound - entity.range.lowerBound)) + case .Bold: + string.addAttribute(NSFontAttributeName, value: messageBoldFont, range: NSRange(location: entity.range.lowerBound, length: entity.range.upperBound - entity.range.lowerBound)) + default: + break + } + } + attributedText = string + } else { + attributedText = NSAttributedString(string: message.text, font: messageFont, textColor: UIColor.black) + } + + let (textLayout, textApply) = textLayout(attributedText, nil, 0, .end, textConstrainedSize, nil) + + var textFrame = CGRect(origin: CGPoint(), size: textLayout.size) + let textSize = textLayout.size + + var statusFrame: CGRect? + if let statusSize = statusSize { + var frame = CGRect(origin: CGPoint(), size: statusSize) + + let trailingLineWidth = textLayout.trailingLineWidth + if textSize.width - trailingLineWidth >= statusSize.width { + frame.origin = CGPoint(x: textFrame.maxX - statusSize.width, y: textFrame.maxY - statusSize.height) + } else if trailingLineWidth + statusSize.width < textConstrainedSize.width { + frame.origin = CGPoint(x: textFrame.minX + trailingLineWidth, y: textFrame.maxY - statusSize.height) + } else { + frame.origin = CGPoint(x: textFrame.maxX - statusSize.width, y: textFrame.maxY) + } + statusFrame = frame + } + + textFrame = textFrame.offsetBy(dx: layoutConstants.text.bubbleInsets.left, dy: layoutConstants.text.bubbleInsets.top) + statusFrame = statusFrame?.offsetBy(dx: layoutConstants.text.bubbleInsets.left, dy: layoutConstants.text.bubbleInsets.top) + + var boundingSize: CGSize + if let statusFrame = statusFrame { + boundingSize = textFrame.union(statusFrame).size + } else { + boundingSize = textFrame.size + } + boundingSize.width += layoutConstants.text.bubbleInsets.left + layoutConstants.text.bubbleInsets.right + boundingSize.height += layoutConstants.text.bubbleInsets.top + layoutConstants.text.bubbleInsets.bottom + + return (boundingSize.width, { boundingWidth in + var adjustedStatusFrame: CGRect? + if let statusFrame = statusFrame { + adjustedStatusFrame = CGRect(origin: CGPoint(x: boundingWidth - statusFrame.size.width - layoutConstants.text.bubbleInsets.right, y: statusFrame.origin.y), size: statusFrame.size) + } + + return (boundingSize, { [weak self] in + if let strongSelf = self { + let _ = textApply() + + if let statusApply = statusApply, let adjustedStatusFrame = adjustedStatusFrame { + strongSelf.statusNode.frame = adjustedStatusFrame + statusApply() + if strongSelf.statusNode.supernode == nil { + strongSelf.addSubnode(strongSelf.statusNode) + } + } else if strongSelf.statusNode.supernode != nil { + strongSelf.statusNode.removeFromSupernode() + } + + strongSelf.textNode.frame = textFrame + } + }) + }) + }) + } + } + + override func animateInsertion(_ currentTimestamp: Double, duration: Double) { + self.textNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + self.statusNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + } + + override func animateAdded(_ currentTimestamp: Double, duration: Double) { + self.textNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + self.statusNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + } +} diff --git a/TelegramUI/ChatMessageWebpageBubbleContentNode.swift b/TelegramUI/ChatMessageWebpageBubbleContentNode.swift new file mode 100644 index 0000000000..be9bbe9c16 --- /dev/null +++ b/TelegramUI/ChatMessageWebpageBubbleContentNode.swift @@ -0,0 +1,392 @@ +import Foundation +import Postbox +import Display +import AsyncDisplayKit +import SwiftSignalKit +import TelegramCore + +private func generateLineImage(color: UIColor) -> UIImage? { + return generateImage(CGSize(width: 2.0, height: 3.0), contextGenerator: { size, context in + context.clear(CGRect(origin: CGPoint(), size: size)) + context.setFillColor(color.cgColor) + context.fillEllipse(in: CGRect(origin: CGPoint(), size: CGSize(width: 2.0, height: 2.0))) + context.fillEllipse(in: CGRect(origin: CGPoint(x: 0.0, y: 1.0), size: CGSize(width: 2.0, height: 2.0))) + })?.stretchableImage(withLeftCapWidth: 0, topCapHeight: 1) +} + +private let incomingLineImage = generateLineImage(color: UIColor(0x3ca7fe)) +private let outgoingLineImage = generateLineImage(color: UIColor(0x29cc10)) + +private let incomingAccentColor = UIColor(0x3ca7fe) +private let outgoingAccentColor = UIColor(0x00a700) + +private let titleFont: UIFont = UIFont.boldSystemFont(ofSize: 15.0) +private let textFont: UIFont = UIFont.systemFont(ofSize: 15.0) + +final class ChatMessageWebpageBubbleContentNode: ChatMessageBubbleContentNode { + private let lineNode: ASImageNode + private let textNode: TextNode + private let inlineImageNode: TransformImageNode + private var contentImageNode: ChatMessageInteractiveMediaNode? + private var contentFileNode: ChatMessageInteractiveFileNode? + + private let statusNode: ChatMessageDateAndStatusNode + + private var image: TelegramMediaImage? + + required init() { + self.lineNode = ASImageNode() + self.lineNode.isLayerBacked = true + self.lineNode.displaysAsynchronously = false + self.lineNode.displayWithoutProcessing = true + + self.textNode = TextNode() + self.textNode.isLayerBacked = true + self.textNode.displaysAsynchronously = true + self.textNode.contentsScale = UIScreenScale + self.textNode.contentMode = .topLeft + + self.inlineImageNode = TransformImageNode() + self.inlineImageNode.isLayerBacked = true + self.inlineImageNode.displaysAsynchronously = false + + self.statusNode = ChatMessageDateAndStatusNode() + + super.init() + + self.addSubnode(self.lineNode) + self.addSubnode(self.textNode) + + self.addSubnode(self.statusNode) + } + + required init?(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + override func asyncLayoutContent() -> (_ item: ChatMessageItem, _ layoutConstants: ChatMessageItemLayoutConstants, _ position: ChatMessageBubbleContentPosition, _ constrainedSize: CGSize) -> (CGFloat, (CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> Void))) { + let textAsyncLayout = TextNode.asyncLayout(self.textNode) + let currentImage = self.image + let imageLayout = self.inlineImageNode.asyncLayout() + let statusLayout = self.statusNode.asyncLayout() + let contentImageLayout = ChatMessageInteractiveMediaNode.asyncLayout(self.contentImageNode) + let contentFileLayout = ChatMessageInteractiveFileNode.asyncLayout(self.contentFileNode) + + return { item, layoutConstants, _, constrainedSize in + let insets = UIEdgeInsets(top: 0.0, left: 9.0 + 8.0, bottom: 5.0, right: 8.0) + + var webpage: TelegramMediaWebpageLoadedContent? + for media in item.message.media { + if let media = media as? TelegramMediaWebpage { + if case let .Loaded(content) = media.content { + webpage = content + } + break + } + } + + var textString: NSAttributedString? + var inlineImageDimensions: CGSize? + var inlineImageSize: CGSize? + var updateInlineImageSignal: Signal<(TransformImageArguments) -> DrawingContext, NoError>? + var textCutout: TextNodeCutout? + var initialWidth: CGFloat = CGFloat.greatestFiniteMagnitude + var refineContentImageLayout: ((CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> ChatMessageInteractiveMediaNode)))? + var refineContentFileLayout: ((CGSize) -> (CGFloat, (CGFloat) -> (CGSize, () -> ChatMessageInteractiveFileNode)))? + + if let webpage = webpage { + let string = NSMutableAttributedString() + var notEmpty = false + + if let websiteName = webpage.websiteName, !websiteName.isEmpty { + string.append(NSAttributedString(string: websiteName, font: titleFont, textColor: item.message.flags.contains(.Incoming) ? incomingAccentColor : outgoingAccentColor)) + notEmpty = true + } + + if let title = webpage.title, !title.isEmpty { + if notEmpty { + string.append(NSAttributedString(string: "\n", font: textFont, textColor: UIColor.black)) + } + string.append(NSAttributedString(string: title, font: titleFont, textColor: UIColor.black)) + notEmpty = true + } + + if let text = webpage.text, !text.isEmpty { + if notEmpty { + string.append(NSAttributedString(string: "\n", font: textFont, textColor: UIColor.black)) + } + string.append(NSAttributedString(string: text + "\n", font: textFont, textColor: UIColor.black)) + notEmpty = true + } + + textString = string + + if let file = webpage.file { + if file.isVideo { + let (initialImageWidth, refineLayout) = contentImageLayout(item.account, file, ImageCorners(radius: 4.0), true, CGSize(width: constrainedSize.width - insets.left - insets.right, height: constrainedSize.height)) + initialWidth = initialImageWidth + insets.left + insets.right + refineContentImageLayout = refineLayout + } else { + let (_, refineLayout) = contentFileLayout(item.account, file, item.message.flags.contains(.Incoming), CGSize(width: constrainedSize.width - insets.left - insets.right, height: constrainedSize.height)) + refineContentFileLayout = refineLayout + } + } else if let image = webpage.image { + if let type = webpage.type, ["photo"].contains(type) { + let (initialImageWidth, refineLayout) = contentImageLayout(item.account, image, ImageCorners(radius: 4.0), true, CGSize(width: constrainedSize.width - insets.left - insets.right, height: constrainedSize.height)) + initialWidth = initialImageWidth + insets.left + insets.right + refineContentImageLayout = refineLayout + } else if let dimensions = largestImageRepresentation(image.representations)?.dimensions { + inlineImageDimensions = dimensions + + if image != currentImage { + updateInlineImageSignal = chatWebpageSnippetPhoto(account: item.account, photo: image) + } + } + } + } + + if let _ = inlineImageDimensions { + inlineImageSize = CGSize(width: 54.0, height: 54.0) + + if let inlineImageSize = inlineImageSize { + textCutout = TextNodeCutout(position: .TopRight, size: CGSize(width: inlineImageSize.width + 10.0, height: inlineImageSize.height + 10.0)) + } + } + + return (initialWidth, { constrainedSize in + var t = Int(item.message.timestamp) + var timeinfo = tm() + localtime_r(&t, &timeinfo) + + let dateText = String(format: "%02d:%02d", arguments: [Int(timeinfo.tm_hour), Int(timeinfo.tm_min)]) + //let dateText = "\(message.id.id)" + + let statusType: ChatMessageDateAndStatusType + if item.message.flags.contains(.Incoming) { + statusType = .BubbleIncoming + } else { + if item.message.flags.contains(.Failed) { + statusType = .BubbleOutgoing(.Failed) + } else if item.message.flags.contains(.Unsent) { + statusType = .BubbleOutgoing(.Sending) + } else { + statusType = .BubbleOutgoing(.Sent(read: true)) + } + } + + let textConstrainedSize = CGSize(width: constrainedSize.width - insets.left - insets.right, height: constrainedSize.height - insets.top - insets.bottom) + + var statusSizeAndApply: (CGSize, () -> Void)? + + if refineContentImageLayout == nil && refineContentFileLayout == nil { + statusSizeAndApply = statusLayout(dateText, statusType, textConstrainedSize) + } + + let (textLayout, textApply) = textAsyncLayout(textString, nil, 12, .end, textConstrainedSize, textCutout) + + var textFrame = CGRect(origin: CGPoint(), size: textLayout.size) + + var statusFrame: CGRect? + + if let (statusSize, _) = statusSizeAndApply { + var frame = CGRect(origin: CGPoint(), size: statusSize) + + let trailingLineWidth = textLayout.trailingLineWidth + if textLayout.size.width - trailingLineWidth >= statusSize.width { + frame.origin = CGPoint(x: textFrame.maxX - statusSize.width, y: textFrame.maxY - statusSize.height) + } else if trailingLineWidth + statusSize.width < textConstrainedSize.width { + frame.origin = CGPoint(x: textFrame.minX + trailingLineWidth, y: textFrame.maxY - statusSize.height) + } else { + frame.origin = CGPoint(x: textFrame.maxX - statusSize.width, y: textFrame.maxY) + } + + if let inlineImageSize = inlineImageSize { + if frame.origin.y < inlineImageSize.height + 4.0 { + frame.origin.y = inlineImageSize.height + 4.0 + } + } + + frame = frame.offsetBy(dx: insets.left, dy: insets.top) + statusFrame = frame + } + + textFrame = textFrame.offsetBy(dx: insets.left, dy: insets.top) + + let lineImage = item.message.flags.contains(.Incoming) ? incomingLineImage : outgoingLineImage + + var boundingSize = textFrame.size + if let statusFrame = statusFrame { + boundingSize = textFrame.union(statusFrame).size + } + var lineHeight = textFrame.size.height + if let inlineImageSize = inlineImageSize { + if boundingSize.height < inlineImageSize.height { + boundingSize.height = inlineImageSize.height + } + if lineHeight < inlineImageSize.height { + lineHeight = inlineImageSize.height + } + } + + var finalizeContentImageLayout: ((CGFloat) -> (CGSize, () -> ChatMessageInteractiveMediaNode))? + if let refineContentImageLayout = refineContentImageLayout { + let (refinedWidth, finalizeImageLayout) = refineContentImageLayout(textConstrainedSize) + finalizeContentImageLayout = finalizeImageLayout + + boundingSize.width = max(boundingSize.width, refinedWidth) + } + var finalizeContentFileLayout: ((CGFloat) -> (CGSize, () -> ChatMessageInteractiveFileNode))? + if let refineContentFileLayout = refineContentFileLayout { + let (refinedWidth, finalizeFileLayout) = refineContentFileLayout(textConstrainedSize) + finalizeContentFileLayout = finalizeFileLayout + + boundingSize.width = max(boundingSize.width, refinedWidth) + } + + boundingSize.width += insets.left + insets.right + boundingSize.height += insets.top + insets.bottom + lineHeight += insets.top + insets.bottom + + var imageApply: (() -> Void)? + if let inlineImageSize = inlineImageSize, let inlineImageDimensions = inlineImageDimensions { + let imageCorners = ImageCorners(topLeft: .Corner(4.0), topRight: .Corner(4.0), bottomLeft: .Corner(4.0), bottomRight: .Corner(4.0)) + let arguments = TransformImageArguments(corners: imageCorners, imageSize: inlineImageDimensions.aspectFilled(inlineImageSize), boundingSize: inlineImageSize, intrinsicInsets: UIEdgeInsets()) + imageApply = imageLayout(arguments) + } + + return (boundingSize.width, { boundingWidth in + var adjustedBoundingSize = boundingSize + var adjustedLineHeight = lineHeight + + var imageFrame: CGRect? + if let inlineImageSize = inlineImageSize { + imageFrame = CGRect(origin: CGPoint(x: boundingWidth - inlineImageSize.width - insets.right, y: 0.0), size: inlineImageSize) + } + + var contentImageSizeAndApply: (CGSize, () -> ChatMessageInteractiveMediaNode)? + if let finalizeContentImageLayout = finalizeContentImageLayout { + let (size, apply) = finalizeContentImageLayout(boundingWidth - insets.left - insets.right) + contentImageSizeAndApply = (size, apply) + + var imageHeigthAddition = size.height + if textFrame.size.height > CGFloat(FLT_EPSILON) { + imageHeigthAddition += 2.0 + } + + adjustedBoundingSize.height += imageHeigthAddition + 5.0 + adjustedLineHeight += imageHeigthAddition + 4.0 + } + + var contentFileSizeAndApply: (CGSize, () -> ChatMessageInteractiveFileNode)? + if let finalizeContentFileLayout = finalizeContentFileLayout { + let (size, apply) = finalizeContentFileLayout(boundingWidth - insets.left - insets.right) + contentFileSizeAndApply = (size, apply) + + var imageHeigthAddition = size.height + if textFrame.size.height > CGFloat(FLT_EPSILON) { + imageHeigthAddition += 2.0 + } + + adjustedBoundingSize.height += imageHeigthAddition + 5.0 + adjustedLineHeight += imageHeigthAddition + 4.0 + } + + var adjustedStatusFrame: CGRect? + if let statusFrame = statusFrame { + adjustedStatusFrame = CGRect(origin: CGPoint(x: boundingWidth - statusFrame.size.width - insets.right, y: statusFrame.origin.y), size: statusFrame.size) + } + + return (adjustedBoundingSize, { [weak self] in + if let strongSelf = self { + strongSelf.lineNode.image = lineImage + strongSelf.lineNode.frame = CGRect(origin: CGPoint(x: 9.0, y: 0.0), size: CGSize(width: 2.0, height: adjustedLineHeight - insets.top - insets.bottom - 2.0)) + + let _ = textApply() + strongSelf.textNode.frame = textFrame + + if let (_, statusApply) = statusSizeAndApply, let adjustedStatusFrame = adjustedStatusFrame { + strongSelf.statusNode.frame = adjustedStatusFrame + if strongSelf.statusNode.supernode == nil { + strongSelf.addSubnode(strongSelf.statusNode) + } + statusApply() + } else if strongSelf.statusNode.supernode != nil { + strongSelf.statusNode.removeFromSupernode() + } + + strongSelf.image = webpage?.image + + if let imageFrame = imageFrame { + if let updateImageSignal = updateInlineImageSignal { + strongSelf.inlineImageNode.setSignal(account: item.account, signal: updateImageSignal) + } + + strongSelf.inlineImageNode.frame = imageFrame + if strongSelf.inlineImageNode.supernode == nil { + strongSelf.addSubnode(strongSelf.inlineImageNode) + } + + if let imageApply = imageApply { + imageApply() + } + } else if strongSelf.inlineImageNode.supernode != nil { + strongSelf.inlineImageNode.removeFromSupernode() + } + + if let (contentImageSize, contentImageApply) = contentImageSizeAndApply { + let contentImageNode = contentImageApply() + if strongSelf.contentImageNode !== contentImageNode { + strongSelf.contentImageNode = contentImageNode + strongSelf.addSubnode(contentImageNode) + contentImageNode.activateLocalContent = { [weak strongSelf] in + if let strongSelf = strongSelf { + strongSelf.controllerInteraction?.openMessage(item.message.id) + } + } + } + let _ = contentImageApply() + contentImageNode.frame = CGRect(origin: CGPoint(x: insets.left, y: textFrame.maxY + (textFrame.size.height > CGFloat(FLT_EPSILON) ? 4.0 : 0.0)), size: contentImageSize) + } else if let contentImageNode = strongSelf.contentImageNode { + contentImageNode.removeFromSupernode() + strongSelf.contentImageNode = nil + } + + if let (contentFileSize, contentFileApply) = contentFileSizeAndApply { + let contentFileNode = contentFileApply() + if strongSelf.contentFileNode !== contentFileNode { + strongSelf.contentFileNode = contentFileNode + strongSelf.addSubnode(contentFileNode) + contentFileNode.activateLocalContent = { [weak strongSelf] in + if let strongSelf = strongSelf { + strongSelf.controllerInteraction?.openMessage(item.message.id) + } + } + } + let _ = contentFileApply() + contentFileNode.frame = CGRect(origin: CGPoint(x: insets.left, y: textFrame.maxY + (textFrame.size.height > CGFloat(FLT_EPSILON) ? 4.0 : 0.0)), size: contentFileSize) + } else if let contentFileNode = strongSelf.contentFileNode { + contentFileNode.removeFromSupernode() + strongSelf.contentFileNode = nil + } + } + }) + }) + }) + } + } + + override func animateInsertion(_ currentTimestamp: Double, duration: Double) { + self.lineNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + } + + override func animateAdded(_ currentTimestamp: Double, duration: Double) { + self.lineNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + } + + override func animateInsertionIntoBubble(_ duration: Double) { + self.lineNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.25) + self.textNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.25) + self.statusNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.3) + self.inlineImageNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.3) + } +} diff --git a/TelegramUI/ChatUnreadItem.swift b/TelegramUI/ChatUnreadItem.swift new file mode 100644 index 0000000000..3cad436c33 --- /dev/null +++ b/TelegramUI/ChatUnreadItem.swift @@ -0,0 +1,95 @@ +import Foundation +import UIKit +import Postbox +import AsyncDisplayKit +import Display + +private func backgroundImage() -> UIImage? { + return generateImage(CGSize(width: 1.0, height: 25.0), contextGenerator: { size, context -> Void in + context.clear(CGRect(origin: CGPoint(), size: size)) + context.setFillColor(UIColor(white: 0.0, alpha: 0.2).cgColor) + context.fill(CGRect(origin: CGPoint(), size: CGSize(width: size.width, height: UIScreenPixel))) + context.fill(CGRect(origin: CGPoint(x: 0.0, y: size.height - UIScreenPixel), size: CGSize(width: size.width, height: UIScreenPixel))) + context.setFillColor(UIColor(white: 1.0, alpha: 0.9).cgColor) + context.fill(CGRect(x: 0.0, y: UIScreenPixel, width: size.width, height: size.height - UIScreenPixel - UIScreenPixel)) + })?.stretchableImage(withLeftCapWidth: 8, topCapHeight: 8) +} + +private let titleFont = UIFont.systemFont(ofSize: 13.0) + +class ChatUnreadItem: ListViewItem { + func nodeConfiguredForWidth(async: @escaping (@escaping () -> Void) -> Void, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, completion: @escaping (ListViewItemNode, @escaping () -> Void) -> Void) { + + async { + let node = ChatUnreadItemNode() + node.layoutForWidth(width, item: self, previousItem: previousItem, nextItem: nextItem) + completion(node, {}) + } + } +} + +class ChatUnreadItemNode: ListViewItemNode { + let backgroundNode: ASImageNode + let labelNode: TextNode + + init() { + self.backgroundNode = ASImageNode() + self.backgroundNode.isLayerBacked = true + self.backgroundNode.displayWithoutProcessing = true + + self.labelNode = TextNode() + self.labelNode.isLayerBacked = true + + super.init(layerBacked: true) + + self.backgroundNode.image = backgroundImage() + self.addSubnode(self.backgroundNode) + + self.addSubnode(self.labelNode) + + self.transform = CATransform3DMakeRotation(CGFloat(M_PI), 0.0, 0.0, 1.0) + + self.scrollPositioningInsets = UIEdgeInsets(top: 5.0, left: 0.0, bottom: 5.0, right: 0.0) + } + + override func animateInsertion(_ currentTimestamp: Double, duration: Double) { + super.animateInsertion(currentTimestamp, duration: duration) + + self.backgroundNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: duration) + self.labelNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: duration) + + //self.transitionOffset = -self.bounds.size.height * 1.6 + //self.addTransitionOffsetAnimation(0.0, duration: duration, beginAt: currentTimestamp) + //self.layer.animateBoundsOriginYAdditive(from: -self.bounds.size.height * 1.4, to: 0.0, duration: duration) + } + + override func animateAdded(_ currentTimestamp: Double, duration: Double) { + self.backgroundNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + self.labelNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.2) + } + + override func layoutForWidth(_ width: CGFloat, item: ListViewItem, previousItem: ListViewItem?, nextItem: ListViewItem?) { + let (layout, apply) = self.asyncLayout()(width) + apply() + self.contentSize = layout.contentSize + self.insets = layout.insets + } + + func asyncLayout() -> (_ width: CGFloat) -> (ListViewItemNodeLayout, () -> Void) { + let labelLayout = TextNode.asyncLayout(self.labelNode) + return { width in + let (size, apply) = labelLayout(NSAttributedString(string: "Unread", font: titleFont, textColor: UIColor(0x86868d)), nil, 1, .end, CGSize(width: width, height: CGFloat.greatestFiniteMagnitude), nil) + + let backgroundSize = CGSize(width: width, height: 25.0) + + return (ListViewItemNodeLayout(contentSize: CGSize(width: width, height: 25.0), insets: UIEdgeInsets(top: 5.0, left: 0.0, bottom: 5.0, right: 0.0)), { [weak self] in + if let strongSelf = self { + let _ = apply() + + strongSelf.backgroundNode.frame = CGRect(origin: CGPoint(x: 0.0, y: 0.0), size: backgroundSize) + strongSelf.labelNode.frame = CGRect(origin: CGPoint(x: floorToScreenPixels((backgroundSize.width - size.size.width) / 2.0), y: floorToScreenPixels((backgroundSize.height - size.size.height) / 2.0) - 1.0), size: size.size) + } + }) + } + } +} diff --git a/TelegramUI/ChatVideoGalleryItem.swift b/TelegramUI/ChatVideoGalleryItem.swift new file mode 100644 index 0000000000..faa4313b94 --- /dev/null +++ b/TelegramUI/ChatVideoGalleryItem.swift @@ -0,0 +1,230 @@ +import Foundation +import Display +import AsyncDisplayKit +import SwiftSignalKit +import Postbox +import TelegramCore + +class ChatVideoGalleryItem: GalleryItem { + let account: Account + let message: Message + let location: MessageHistoryEntryLocation? + + init(account: Account, message: Message, location: MessageHistoryEntryLocation?) { + self.account = account + self.message = message + self.location = location + } + + func node() -> GalleryItemNode { + let node = ChatVideoGalleryItemNode() + + for media in self.message.media { + if let file = media as? TelegramMediaFile, (file.isVideo || file.mimeType.hasPrefix("video/")) { + node.setFile(account: account, file: file) + break + } + } + + if let location = self.location { + node._title.set(.single("\(location.index + 1) of \(location.count)")) + } + + return node + } + + func updateNode(node: GalleryItemNode) { + if let node = node as? ChatVideoGalleryItemNode, let location = self.location { + node._title.set(.single("\(location.index + 1) of \(location.count)")) + } + } +} + +final class ChatVideoGalleryItemNode: ZoomableContentGalleryItemNode { + fileprivate let _ready = Promise() + fileprivate let _title = Promise() + fileprivate let _titleView = Promise() + + private var player: MediaPlayer? + private let snapshotNode: TransformImageNode + private let videoNode: MediaPlayerNode + private let scrubberView: ChatVideoGalleryItemScrubberView + + private var accountAndFile: (Account, TelegramMediaFile)? + + private var isCentral = false + + private let videoStatusDisposable = MetaDisposable() + + override init() { + self.videoNode = MediaPlayerNode() + self.snapshotNode = TransformImageNode() + self.snapshotNode.backgroundColor = UIColor.black + self.videoNode.snapshotNode = snapshotNode + self.scrubberView = ChatVideoGalleryItemScrubberView() + + super.init() + + self.snapshotNode.imageUpdated = { [weak self] in + self?._ready.set(.single(Void())) + } + + self._titleView.set(.single(self.scrubberView)) + self.scrubberView.seek = { [weak self] timestamp in + self?.player?.seek(timestamp: timestamp) + } + } + + deinit { + self.videoStatusDisposable.dispose() + } + + override func ready() -> Signal { + return self._ready.get() + } + + override func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: transition) + } + + func setFile(account: Account, file: TelegramMediaFile) { + if self.accountAndFile == nil || !self.accountAndFile!.1.isEqual(file) { + if let largestSize = file.dimensions { + self.snapshotNode.alphaTransitionOnFirstUpdate = false + let displaySize = largestSize.dividedByScreenScale() + self.snapshotNode.asyncLayout()(TransformImageArguments(corners: ImageCorners(), imageSize: displaySize, boundingSize: displaySize, intrinsicInsets: UIEdgeInsets()))() + self.snapshotNode.setSignal(account: account, signal: chatMessageImageFile(account: account, file: file, progressive: true), dispatchOnDisplayLink: false) + self.zoomableContent = (largestSize, self.videoNode) + } else { + self._ready.set(.single(Void())) + } + + let shouldPlayVideo = self.accountAndFile?.1 != file + self.accountAndFile = (account, file) + if shouldPlayVideo && self.isCentral { + self.playVideo() + } + } + } + + private func playVideo() { + if let (account, file) = self.accountAndFile { + var dimensions: CGSize? = file.dimensions + if dimensions == nil || dimensions!.width.isLessThanOrEqualTo(0.0) || dimensions!.height.isLessThanOrEqualTo(0.0) { + dimensions = largestImageRepresentation(file.previewRepresentations)?.dimensions.aspectFitted(CGSize(width: 1920, height: 1080)) + } + if dimensions == nil || dimensions!.width.isLessThanOrEqualTo(0.0) || dimensions!.height.isLessThanOrEqualTo(0.0) { + dimensions = CGSize(width: 1920, height: 1080) + } + + if let dimensions = dimensions, !dimensions.width.isLessThanOrEqualTo(0.0) && !dimensions.height.isLessThanOrEqualTo(0.0) { + /*let source = VideoPlayerSource(account: account, resource: CloudFileMediaResource(location: file.location, size: file.size)) + self.videoNode.player = VideoPlayer(source: source)*/ + + let player = MediaPlayer(account: account, resource: CloudFileMediaResource(location: file.location, size: file.size)) + player.attachPlayerNode(self.videoNode) + self.player = player + self.videoStatusDisposable.set((player.status |> deliverOnMainQueue).start(next: { [weak self] status in + if let strongSelf = self { + strongSelf.scrubberView.setStatus(status) + } + })) + player.play() + + + self.zoomableContent = (dimensions, self.videoNode) + } + } + } + + private func stopVideo() { + self.player = nil + } + + override func centralityUpdated(isCentral: Bool) { + super.centralityUpdated(isCentral: isCentral) + + if self.isCentral != isCentral { + self.isCentral = isCentral + if isCentral { + self.playVideo() + } else { + self.stopVideo() + } + } + } + + override func animateIn(from node: ASDisplayNode) { + var transformedFrame = node.view.convert(node.view.bounds, to: self.videoNode.view) + let transformedSuperFrame = node.view.convert(node.view.bounds, to: self.videoNode.view.superview) + + self.videoNode.layer.animatePosition(from: CGPoint(x: transformedSuperFrame.midX, y: transformedSuperFrame.midY), to: self.videoNode.layer.position, duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring) + + transformedFrame.origin = CGPoint() + + let transform = CATransform3DScale(self.videoNode.layer.transform, transformedFrame.size.width / self.videoNode.layer.bounds.size.width, transformedFrame.size.height / self.videoNode.layer.bounds.size.height, 1.0) + self.videoNode.layer.animate(from: NSValue(caTransform3D: transform), to: NSValue(caTransform3D: self.videoNode.layer.transform), keyPath: "transform", timingFunction: kCAMediaTimingFunctionSpring, duration: 0.25) + } + + override func animateOut(to node: ASDisplayNode, completion: @escaping () -> Void) { + var transformedFrame = node.view.convert(node.view.bounds, to: self.videoNode.view) + let transformedSuperFrame = node.view.convert(node.view.bounds, to: self.videoNode.view.superview) + let transformedSelfFrame = node.view.convert(node.view.bounds, to: self.view) + let transformedCopyViewInitialFrame = self.videoNode.view.convert(self.videoNode.view.bounds, to: self.view) + + var positionCompleted = false + var boundsCompleted = false + var copyCompleted = false + + let copyView = node.view.snapshotContentTree()! + + self.view.insertSubview(copyView, belowSubview: self.scrollView) + copyView.frame = transformedSelfFrame + + let intermediateCompletion = { [weak copyView] in + if positionCompleted && boundsCompleted && copyCompleted { + copyView?.removeFromSuperview() + completion() + } + } + + copyView.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.1, removeOnCompletion: false) + + copyView.layer.animatePosition(from: CGPoint(x: transformedCopyViewInitialFrame.midX, y: transformedCopyViewInitialFrame.midY), to: CGPoint(x: transformedSelfFrame.midX, y: transformedSelfFrame.midY), duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring, removeOnCompletion: false) + let scale = CGSize(width: transformedCopyViewInitialFrame.size.width / transformedSelfFrame.size.width, height: transformedCopyViewInitialFrame.size.height / transformedSelfFrame.size.height) + copyView.layer.animate(from: NSValue(caTransform3D: CATransform3DMakeScale(scale.width, scale.height, 1.0)), to: NSValue(caTransform3D: CATransform3DIdentity), keyPath: "transform", timingFunction: kCAMediaTimingFunctionSpring, duration: 0.25, removeOnCompletion: false, completion: { _ in + copyCompleted = true + intermediateCompletion() + }) + + self.videoNode.layer.animatePosition(from: self.videoNode.layer.position, to: CGPoint(x: transformedSuperFrame.midX, y: transformedSuperFrame.midY), duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring, removeOnCompletion: false, completion: { _ in + positionCompleted = true + intermediateCompletion() + }) + + self.videoNode.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.25, removeOnCompletion: false) + + self.videoNode.snapshotNode?.isHidden = true + + transformedFrame.origin = CGPoint() + /*self.videoNode.layer.animateBounds(from: self.videoNode.layer.bounds, to: transformedFrame, duration: 0.25, timingFunction: kCAMediaTimingFunctionSpring, removeOnCompletion: false, completion: { _ in + boundsCompleted = true + intermediateCompletion() + })*/ + + let transform = CATransform3DScale(self.videoNode.layer.transform, transformedFrame.size.width / self.videoNode.layer.bounds.size.width, transformedFrame.size.height / self.videoNode.layer.bounds.size.height, 1.0) + self.videoNode.layer.animate(from: NSValue(caTransform3D: self.videoNode.layer.transform), to: NSValue(caTransform3D: transform), keyPath: "transform", timingFunction: kCAMediaTimingFunctionSpring, duration: 0.25, removeOnCompletion: false, completion: { _ in + boundsCompleted = true + intermediateCompletion() + }) + } + + override func title() -> Signal { + //return self._title.get() + return .single("") + } + + override func titleView() -> Signal { + return self._titleView.get() + } +} diff --git a/TelegramUI/ChatVideoGalleryItemScrubberView.swift b/TelegramUI/ChatVideoGalleryItemScrubberView.swift new file mode 100644 index 0000000000..6b142e73ef --- /dev/null +++ b/TelegramUI/ChatVideoGalleryItemScrubberView.swift @@ -0,0 +1,93 @@ +import Foundation +import UIKit + +final class ChatVideoGalleryItemScrubberView: UIView { + private let backgroundView: UIView + private let foregroundView: UIView + private let handleView: UIView + + private var status: MediaPlayerStatus? + + private var scrubbing = false + private var scrubbingLocation: CGFloat = 0.0 + private var initialScrubbingPosition: CGFloat = 0.0 + private var scrubbingPosition: CGFloat = 0.0 + + var seek: (Double) -> Void = { _ in } + + override init(frame: CGRect) { + self.backgroundView = UIView() + self.backgroundView.backgroundColor = UIColor.gray + self.backgroundView.clipsToBounds = true + self.foregroundView = UIView() + self.foregroundView.backgroundColor = UIColor.white + self.handleView = UIView() + self.handleView.backgroundColor = UIColor.white + + super.init(frame: frame) + + self.backgroundView.addSubview(self.foregroundView) + self.addSubview(self.backgroundView) + self.addSubview(self.handleView) + + self.addGestureRecognizer(UIPanGestureRecognizer(target: self, action: #selector(self.panGesture(_:)))) + } + + required init?(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + func setStatus(_ status: MediaPlayerStatus) { + self.status = status + self.layoutSubviews() + + if status.status == .playing { + + } + } + + @objc func panGesture(_ recognizer: UIPanGestureRecognizer) { + guard let status = self.status, status.duration > 0.0 else { + return + } + + switch recognizer.state { + case .began: + self.scrubbing = true + self.scrubbingLocation = recognizer.location(in: self).x + self.initialScrubbingPosition = CGFloat(status.timestamp / status.duration) + self.scrubbingPosition = 0.0 + case .changed: + let distance = recognizer.location(in: self).x - self.scrubbingLocation + self.scrubbingPosition = self.initialScrubbingPosition + (distance / self.bounds.size.width) + self.layoutSubviews() + case .ended: + self.scrubbing = false + self.seek(Double(self.scrubbingPosition) * status.duration) + default: + break + } + } + + override func layoutSubviews() { + super.layoutSubviews() + + let size = self.bounds.size + let barHeight: CGFloat = 2.0 + let handleHeight: CGFloat = 14.0 + + self.backgroundView.frame = CGRect(origin: CGPoint(x: 0.0, y: floor(size.height - barHeight) / 2.0), size: CGSize(width: size.width, height: barHeight)) + + var position: CGFloat = 0.0 + if self.scrubbing { + position = self.scrubbingPosition + } else { + if let status = self.status, status.duration > 0.0 { + position = CGFloat(status.timestamp / status.duration) + } + } + + self.foregroundView.frame = CGRect(origin: CGPoint(x: -size.width + floor(position * size.width), y: 0.0), size: CGSize(width: size.width, height: barHeight)) + self.handleView.frame = CGRect(origin: CGPoint(x: floor(position * size.width), y: floor(size.height - handleHeight) / 2.0), size: CGSize(width: 1.5, height: handleHeight)) + } +} diff --git a/TelegramUI/Config/TelegramUI.xcconfig b/TelegramUI/Config/TelegramUI.xcconfig new file mode 100644 index 0000000000..f54392eba9 --- /dev/null +++ b/TelegramUI/Config/TelegramUI.xcconfig @@ -0,0 +1,2 @@ +SWIFT_INCLUDE_PATHS = $(SRCROOT)/TelegramUI +MODULEMAP_PRIVATE_FILE = $(SRCROOT)/TelegramUI/module.private.modulemap diff --git a/TelegramUI/ContactsController.swift b/TelegramUI/ContactsController.swift new file mode 100644 index 0000000000..91abf60574 --- /dev/null +++ b/TelegramUI/ContactsController.swift @@ -0,0 +1,255 @@ +import Foundation +import Display +import AsyncDisplayKit +import Postbox +import SwiftSignalKit +import TelegramCore + +private enum ContactsControllerEntryId: Hashable { + case search + case vcard + case peerId(Int64) + + var hashValue: Int { + switch self { + case .search: + return 0 + case .vcard: + return 1 + case let .peerId(peerId): + return peerId.hashValue + } + } +} + +private func <(lhs: ContactsControllerEntryId, rhs: ContactsControllerEntryId) -> Bool { + return lhs.hashValue < rhs.hashValue +} + +private func ==(lhs: ContactsControllerEntryId, rhs: ContactsControllerEntryId) -> Bool { + switch lhs { + case .search: + switch rhs { + case .search: + return true + default: + return false + } + case .vcard: + switch rhs { + case .vcard: + return true + default: + return false + } + case let .peerId(lhsId): + switch rhs { + case let .peerId(rhsId): + return lhsId == rhsId + default: + return false + } + } +} + +private enum ContactsEntry: Comparable, Identifiable { + case search + case vcard(Peer) + case peer(Peer) + + var stableId: ContactsControllerEntryId { + switch self { + case .search: + return .search + case .vcard: + return .vcard + case let .peer(peer): + return .peerId(peer.id.toInt64()) + } + } +} + +private func ==(lhs: ContactsEntry, rhs: ContactsEntry) -> Bool { + switch lhs { + case .search: + switch rhs { + case .search: + return true + default: + return false + } + case let .vcard(lhsPeer): + switch rhs { + case let .vcard(rhsPeer): + return lhsPeer.id == rhsPeer.id + default: + return false + } + case let .peer(lhsPeer): + switch rhs { + case let .peer(rhsPeer): + return lhsPeer.id == rhsPeer.id + default: + return false + } + } +} + +private func <(lhs: ContactsEntry, rhs: ContactsEntry) -> Bool { + return lhs.stableId < rhs.stableId +} + +private func entriesForView(_ view: ContactPeersView) -> [ContactsEntry] { + var entries: [ContactsEntry] = [] + entries.append(.search) + if let peer = view.accountPeer { + entries.append(.vcard(peer)) + } + for peer in view.peers { + entries.append(.peer(peer)) + } + return entries +} + +public class ContactsController: ViewController { + private let queue = Queue() + + private let account: Account + private let disposable = MetaDisposable() + + private var entries: [ContactsEntry] = [] + + private var contactsNode: ContactsControllerNode { + return self.displayNode as! ContactsControllerNode + } + + private let index: PeerNameIndex = .lastNameFirst + + public init(account: Account) { + self.account = account + + super.init() + + self.title = "Contacts" + self.tabBarItem.title = "Contacts" + self.tabBarItem.image = UIImage(bundleImageName: "Chat List/Tabs/IconContacts") + self.tabBarItem.selectedImage = UIImage(bundleImageName: "Chat List/Tabs/IconContactsSelected") + + self.disposable.set((account.postbox.contactPeersView(index: self.index, accountPeerId: account.peerId) |> deliverOn(self.queue)).start(next: { [weak self] view in + self?.updateView(view) + })) + + self.scrollToTop = { [weak self] in + if let strongSelf = self, !strongSelf.entries.isEmpty { + strongSelf.contactsNode.listView.deleteAndInsertItems(deleteIndices: [], insertIndicesAndItems: [], updateIndicesAndItems: [], options: [.Synchronous, .LowLatency], scrollToItem: ListViewScrollToItem(index: 0, position: .Top, animated: true, curve: .Default, directionHint: .Up), updateSizeAndInsets: nil, stationaryItemRange: nil, completion: { _ in }) + } + } + } + + required public init(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + deinit { + self.disposable.dispose() + } + + override public func loadDisplayNode() { + self.displayNode = ContactsControllerNode(account: self.account) + + self.contactsNode.navigationBar = self.navigationBar + + self.contactsNode.requestDeactivateSearch = { [weak self] in + self?.deactivateSearch() + } + + self.contactsNode.requestOpenPeerFromSearch = { [weak self] peerId in + if let strongSelf = self { + (strongSelf.navigationController as? NavigationController)?.pushViewController(ChatController(account: strongSelf.account, peerId: peerId)) + } + } + + self.displayNodeDidLoad() + } + + override public func containerLayoutUpdated(_ layout: ContainerViewLayout, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, transition: transition) + + self.contactsNode.containerLayoutUpdated(layout, navigationBarHeight: self.navigationBar.frame.maxY, transition: transition) + } + + private func updateView(_ view: ContactPeersView) { + assert(self.queue.isCurrent()) + + let previousEntries = self.entries + let updatedEntries = entriesForView(view) + + let (deleteIndices, indicesAndItems) = mergeListsStable(leftList: previousEntries, rightList: updatedEntries) + + self.entries = updatedEntries + + var adjustedDeleteIndices: [ListViewDeleteItem] = [] + if deleteIndices.count != 0 { + for index in deleteIndices { + adjustedDeleteIndices.append(ListViewDeleteItem(index: index, directionHint: nil)) + } + } + + var adjustedIndicesAndItems: [ListViewInsertItem] = [] + for (index, entry, previousIndex) in indicesAndItems { + switch entry { + case .search: + adjustedIndicesAndItems.append(ListViewInsertItem(index: index, previousIndex: previousIndex, item: ChatListSearchItem(placeholder: "Search contacts", activate: { [weak self] in + self?.activateSearch() + }), directionHint: nil)) + case let .vcard(peer): + adjustedIndicesAndItems.append(ListViewInsertItem(index: index, previousIndex: previousIndex, item: ContactsVCardItem(account: self.account, peer: peer, action: { [weak self] _ in + if let strongSelf = self { + strongSelf.entrySelected(entry) + strongSelf.contactsNode.listView.clearHighlightAnimated(true) + } + }), directionHint: nil)) + case let .peer(peer): + adjustedIndicesAndItems.append(ListViewInsertItem(index: index, previousIndex: previousIndex, item: ContactsPeerItem(account: self.account, peer: peer, index: self.index, action: { [weak self] _ in + if let strongSelf = self { + strongSelf.entrySelected(entry) + strongSelf.contactsNode.listView.clearHighlightAnimated(true) + } + }), directionHint: nil)) + } + } + + DispatchQueue.main.async { + let options: ListViewDeleteAndInsertOptions = [] + + self.contactsNode.listView.deleteAndInsertItems(deleteIndices: adjustedDeleteIndices, insertIndicesAndItems: adjustedIndicesAndItems, updateIndicesAndItems: [], options: options, scrollToItem: nil, completion: { _ in + }) + } + } + + private func entrySelected(_ entry: ContactsEntry) { + if case let .peer(peer) = entry { + (self.navigationController as? NavigationController)?.pushViewController(ChatController(account: self.account, peerId: peer.id)) + } + if case let .vcard(peer) = entry { + (self.navigationController as? NavigationController)?.pushViewController(ChatController(account: self.account, peerId: peer.id)) + } + } + + private func activateSearch() { + if self.displayNavigationBar { + if let scrollToTop = self.scrollToTop { + scrollToTop() + } + self.contactsNode.activateSearch() + self.setDisplayNavigationBar(false, transition: .animated(duration: 0.5, curve: .spring)) + } + } + + private func deactivateSearch() { + if !self.displayNavigationBar { + self.contactsNode.deactivateSearch() + self.setDisplayNavigationBar(true, transition: .animated(duration: 0.5, curve: .spring)) + } + } +} diff --git a/TelegramUI/ContactsControllerNode.swift b/TelegramUI/ContactsControllerNode.swift new file mode 100644 index 0000000000..a376bf7f83 --- /dev/null +++ b/TelegramUI/ContactsControllerNode.swift @@ -0,0 +1,120 @@ +import Display +import AsyncDisplayKit +import UIKit +import Postbox +import TelegramCore + +final class ContactsControllerNode: ASDisplayNode { + let listView: ListView + + private let account: Account + private var searchDisplayController: SearchDisplayController? + + private var containerLayout: (ContainerViewLayout, CGFloat)? + + var navigationBar: NavigationBar? + + var requestDeactivateSearch: (() -> Void)? + var requestOpenPeerFromSearch: ((PeerId) -> Void)? + + init(account: Account) { + self.account = account + self.listView = ListView() + + super.init(viewBlock: { + return UITracingLayerView() + }, didLoad: nil) + + self.addSubnode(self.listView) + } + + func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + self.containerLayout = (layout, navigationBarHeight) + + var insets = layout.insets(options: [.input]) + insets.top += navigationBarHeight + + self.listView.bounds = CGRect(x: 0.0, y: 0.0, width: layout.size.width, height: layout.size.height) + self.listView.position = CGPoint(x: layout.size.width / 2.0, y: layout.size.height / 2.0) + + var duration: Double = 0.0 + var curve: UInt = 0 + switch transition { + case .immediate: + break + case let .animated(animationDuration, animationCurve): + duration = animationDuration + switch animationCurve { + case .easeInOut: + break + case .spring: + curve = 7 + } + } + + let listViewCurve: ListViewAnimationCurve + var speedFactor: CGFloat = 1.0 + if curve == 7 { + speedFactor = CGFloat(duration) / 0.5 + listViewCurve = .Spring(speed: CGFloat(speedFactor)) + } else { + listViewCurve = .Default + } + + let updateSizeAndInsets = ListViewUpdateSizeAndInsets(size: layout.size, insets: insets, duration: duration, curve: listViewCurve) + + self.listView.deleteAndInsertItems(deleteIndices: [], insertIndicesAndItems: [], updateIndicesAndItems: [], options: [.Synchronous, .LowLatency], scrollToItem: nil, updateSizeAndInsets: updateSizeAndInsets, stationaryItemRange: nil, completion: { _ in }) + + if let searchDisplayController = self.searchDisplayController { + searchDisplayController.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: transition) + } + } + + func activateSearch() { + guard let (containerLayout, navigationBarHeight) = self.containerLayout, let navigationBar = self.navigationBar else { + return + } + + var maybePlaceholderNode: SearchBarPlaceholderNode? + self.listView.forEachItemNode { node in + if let node = node as? ChatListSearchItemNode { + maybePlaceholderNode = node.searchBarNode + } + } + + if let _ = self.searchDisplayController { + return + } + + if let placeholderNode = maybePlaceholderNode { + self.searchDisplayController = SearchDisplayController(contentNode: ContactsSearchContainerNode(account: self.account, openPeer: { [weak self] peerId in + if let requestOpenPeerFromSearch = self?.requestOpenPeerFromSearch { + requestOpenPeerFromSearch(peerId) + } + }), cancel: { [weak self] in + if let requestDeactivateSearch = self?.requestDeactivateSearch { + requestDeactivateSearch() + } + }) + + self.searchDisplayController?.containerLayoutUpdated(containerLayout, navigationBarHeight: navigationBarHeight, transition: .immediate) + self.searchDisplayController?.activate(insertSubnode: { subnode in + self.insertSubnode(subnode, belowSubnode: navigationBar) + }, placeholder: placeholderNode) + } + } + + func deactivateSearch() { + if let searchDisplayController = self.searchDisplayController { + var maybePlaceholderNode: SearchBarPlaceholderNode? + self.listView.forEachItemNode { node in + if let node = node as? ChatListSearchItemNode { + maybePlaceholderNode = node.searchBarNode + } + } + + searchDisplayController.deactivate(placeholder: maybePlaceholderNode) + self.searchDisplayController = nil + } + } +} diff --git a/TelegramUI/ContactsPeerItem.swift b/TelegramUI/ContactsPeerItem.swift new file mode 100644 index 0000000000..4112b8daf1 --- /dev/null +++ b/TelegramUI/ContactsPeerItem.swift @@ -0,0 +1,266 @@ +import Foundation +import UIKit +import AsyncDisplayKit +import Postbox +import Display +import SwiftSignalKit +import TelegramCore + +private let titleFont = Font.regular(17.0) +private let titleBoldFont = Font.medium(17.0) +private let statusFont = Font.regular(13.0) + +class ContactsPeerItem: ListViewItem { + let account: Account + let peer: Peer + let action: (Peer) -> Void + let selectable: Bool = true + + let headerAccessoryItem: ListViewAccessoryItem? + + init(account: Account, peer: Peer, index: PeerNameIndex?, action: @escaping (Peer) -> Void) { + self.account = account + self.peer = peer + self.action = action + + if let index = index { + var letter: String = "#" + if let user = peer as? TelegramUser { + switch index { + case .firstNameFirst: + if let firstName = user.firstName, !firstName.isEmpty { + letter = firstName.substring(to: firstName.index(after: firstName.startIndex)).uppercased() + } else if let lastName = user.lastName, !lastName.isEmpty { + letter = lastName.substring(to: lastName.index(after: lastName.startIndex)).uppercased() + } + case .lastNameFirst: + if let lastName = user.lastName, !lastName.isEmpty { + letter = lastName.substring(to: lastName.index(after: lastName.startIndex)).uppercased() + } else if let firstName = user.firstName, !firstName.isEmpty { + letter = firstName.substring(to: firstName.index(after: firstName.startIndex)).uppercased() + } + } + } else if let group = peer as? TelegramGroup { + if !group.title.isEmpty { + letter = group.title.substring(to: group.title.index(after: group.title.startIndex)).uppercased() + } + } + self.headerAccessoryItem = ContactsSectionHeaderAccessoryItem(sectionHeader: .letter(letter)) + } else { + self.headerAccessoryItem = nil + } + } + + func nodeConfiguredForWidth(async: @escaping (@escaping () -> Void) -> Void, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, completion: @escaping (ListViewItemNode, @escaping () -> Void) -> Void) { + async { + let node = ContactsPeerItemNode() + let makeLayout = node.asyncLayout() + var first = false + var last = false + if let headerAccessoryItem = self.headerAccessoryItem { + first = true + if let previousItem = previousItem, let previousHeaderItem = previousItem.headerAccessoryItem, previousHeaderItem.isEqualToItem(headerAccessoryItem) { + first = false + } + + last = true + if let nextItem = nextItem, let nextHeaderItem = nextItem.headerAccessoryItem, nextHeaderItem.isEqualToItem(headerAccessoryItem) { + last = false + } + } + let (nodeLayout, nodeApply) = makeLayout(self.account, self.peer, width, first, last) + node.contentSize = nodeLayout.contentSize + node.insets = nodeLayout.insets + + completion(node, { + nodeApply() + }) + } + } + + func updateNode(async: @escaping (@escaping () -> Void) -> Void, node: ListViewItemNode, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, animation: ListViewItemUpdateAnimation, completion: @escaping (ListViewItemNodeLayout, @escaping () -> Void) -> Void) { + if let node = node as? ContactsPeerItemNode { + Queue.mainQueue().async { + let layout = node.asyncLayout() + async { + var first = false + var last = false + if let headerAccessoryItem = self.headerAccessoryItem { + first = true + if let previousItem = previousItem, let previousHeaderItem = previousItem.headerAccessoryItem, previousHeaderItem.isEqualToItem(headerAccessoryItem) { + first = false + } + + last = true + if let nextItem = nextItem, let nextHeaderItem = nextItem.headerAccessoryItem, nextHeaderItem.isEqualToItem(headerAccessoryItem) { + last = false + } + } + + let (nodeLayout, apply) = layout(self.account, self.peer, width, first, last) + Queue.mainQueue().async { + completion(nodeLayout, { + apply() + }) + } + } + } + } + } + + func selected() { + self.action(self.peer) + } +} + +private let separatorHeight = 1.0 / UIScreen.main.scale + +class ContactsPeerItemNode: ListViewItemNode { + private let separatorNode: ASDisplayNode + private let highlightedBackgroundNode: ASDisplayNode + + private let avatarNode: ChatListAvatarNode + private let titleNode: TextNode + private let statusNode: TextNode + + private var account: Account? + private var peer: Peer? + private var avatarState: (Account, Peer)? + + required init() { + self.separatorNode = ASDisplayNode() + self.separatorNode.backgroundColor = UIColor(0xc8c7cc) + self.separatorNode.isLayerBacked = true + + self.highlightedBackgroundNode = ASDisplayNode() + self.highlightedBackgroundNode.backgroundColor = UIColor(0xd9d9d9) + self.highlightedBackgroundNode.isLayerBacked = true + + self.avatarNode = ChatListAvatarNode(font: Font.regular(15.0)) + self.avatarNode.isLayerBacked = true + + self.titleNode = TextNode() + self.statusNode = TextNode() + + super.init(layerBacked: false, dynamicBounce: false) + + self.addSubnode(self.separatorNode) + self.addSubnode(self.avatarNode) + self.addSubnode(self.titleNode) + self.addSubnode(self.statusNode) + } + + override func layoutForWidth(_ width: CGFloat, item: ListViewItem, previousItem: ListViewItem?, nextItem: ListViewItem?) { + let makeLayout = self.asyncLayout() + let (nodeLayout, nodeApply) = makeLayout(self.account, self.peer, width, previousItem != nil, nextItem != nil) + self.contentSize = nodeLayout.contentSize + self.insets = nodeLayout.insets + nodeApply() + } + + override func setHighlighted(_ highlighted: Bool, animated: Bool) { + super.setHighlighted(highlighted, animated: animated) + + if highlighted { + /*self.contentNode.displaysAsynchronously = false + self.contentNode.backgroundColor = UIColor.clear + self.contentNode.isOpaque = false*/ + + self.highlightedBackgroundNode.alpha = 1.0 + if self.highlightedBackgroundNode.supernode == nil { + self.insertSubnode(self.highlightedBackgroundNode, aboveSubnode: self.separatorNode) + } + } else { + if self.highlightedBackgroundNode.supernode != nil { + if animated { + self.highlightedBackgroundNode.layer.animateAlpha(from: self.highlightedBackgroundNode.alpha, to: 0.0, duration: 0.4, completion: { [weak self] completed in + if let strongSelf = self { + if completed { + strongSelf.highlightedBackgroundNode.removeFromSupernode() + /*strongSelf.contentNode.backgroundColor = UIColor.white + strongSelf.contentNode.isOpaque = true + strongSelf.contentNode.displaysAsynchronously = true*/ + } + } + }) + self.highlightedBackgroundNode.alpha = 0.0 + } else { + self.highlightedBackgroundNode.removeFromSupernode() + /*self.contentNode.backgroundColor = UIColor.white + self.contentNode.isOpaque = true + self.contentNode.displaysAsynchronously = true*/ + } + } + } + } + + func asyncLayout() -> (_ account: Account?, _ peer: Peer?, _ width: CGFloat, _ first: Bool, _ last: Bool) -> (ListViewItemNodeLayout, () -> Void) { + let makeTitleLayout = TextNode.asyncLayout(self.titleNode) + let makeStatusLayout = TextNode.asyncLayout(self.statusNode) + + return { [weak self] account, peer, width, first, last in + let leftInset: CGFloat = 65.0 + let rightInset: CGFloat = 10.0 + + var titleAttributedString: NSAttributedString? + var statusAttributedString: NSAttributedString? + + if let peer = peer { + if let user = peer as? TelegramUser { + if let firstName = user.firstName, let lastName = user.lastName, !firstName.isEmpty, !lastName.isEmpty { + let string = NSMutableAttributedString() + string.append(NSAttributedString(string: firstName, font: titleFont, textColor: .black)) + string.append(NSAttributedString(string: " ", font: titleFont, textColor: .black)) + string.append(NSAttributedString(string: lastName, font: titleBoldFont, textColor: .black)) + titleAttributedString = string + } else if let firstName = user.firstName, !firstName.isEmpty { + titleAttributedString = NSAttributedString(string: firstName, font: titleBoldFont, textColor: UIColor.black) + } else if let lastName = user.lastName, !lastName.isEmpty { + titleAttributedString = NSAttributedString(string: lastName, font: titleBoldFont, textColor: UIColor.black) + } else { + titleAttributedString = NSAttributedString(string: "Deleted User", font: titleBoldFont, textColor: UIColor(0xa6a6a6)) + } + + statusAttributedString = NSAttributedString(string: "last seen recently", font: statusFont, textColor: UIColor(0xa6a6a6)) + } else if let group = peer as? TelegramGroup { + titleAttributedString = NSAttributedString(string: group.title, font: titleBoldFont, textColor: UIColor.black) + } + } + + let (titleLayout, titleApply) = makeTitleLayout(titleAttributedString, nil, 1, .end, CGSize(width: max(0.0, width - leftInset - rightInset), height: CGFloat.infinity), nil) + + let (statusLayout, statusApply) = makeStatusLayout(statusAttributedString, nil, 1, .end, CGSize(width: max(0.0, width - leftInset - rightInset), height: CGFloat.infinity), nil) + + let nodeLayout = ListViewItemNodeLayout(contentSize: CGSize(width: width, height: 48.0), insets: UIEdgeInsets(top: first ? 29.0 : 0.0, left: 0.0, bottom: 0.0, right: 0.0)) + + return (nodeLayout, { [weak self] in + if let strongSelf = self { + strongSelf.peer = peer + strongSelf.account = account + + if let peer = peer, let account = account, strongSelf.avatarState == nil || strongSelf.avatarState!.0 !== account || !strongSelf.avatarState!.1.isEqual(peer) { + strongSelf.avatarNode.setPeer(account: account, peer: peer) + } + + strongSelf.avatarNode.frame = CGRect(origin: CGPoint(x: 14.0, y: 4.0), size: CGSize(width: 40.0, height: 40.0)) + + let _ = titleApply() + strongSelf.titleNode.frame = CGRect(origin: CGPoint(x: leftInset, y: 4.0), size: titleLayout.size) + + let _ = statusApply() + strongSelf.statusNode.frame = CGRect(origin: CGPoint(x: leftInset, y: 25.0), size: statusLayout.size) + + let topHighlightInset: CGFloat = first ? 0.0 : separatorHeight + strongSelf.highlightedBackgroundNode.frame = CGRect(origin: CGPoint(x: 0.0, y: -nodeLayout.insets.top - topHighlightInset), size: CGSize(width: nodeLayout.size.width, height: nodeLayout.size.height + topHighlightInset)) + strongSelf.separatorNode.frame = CGRect(origin: CGPoint(x: 65.0, y: nodeLayout.contentSize.height - separatorHeight), size: CGSize(width: max(0.0, nodeLayout.size.width - 65.0), height: separatorHeight)) + strongSelf.separatorNode.isHidden = last + } + }) + } + } + + override func layoutHeaderAccessoryItemNode(_ accessoryItemNode: ListViewAccessoryItemNode) { + let bounds = self.bounds + accessoryItemNode.frame = CGRect(origin: CGPoint(x: 0.0, y: -29.0), size: CGSize(width: bounds.size.width, height: 29.0)) + } +} diff --git a/TelegramUI/ContactsSearchContainerNode.swift b/TelegramUI/ContactsSearchContainerNode.swift new file mode 100644 index 0000000000..79b54a721b --- /dev/null +++ b/TelegramUI/ContactsSearchContainerNode.swift @@ -0,0 +1,92 @@ +import Foundation +import AsyncDisplayKit +import Display +import SwiftSignalKit +import Postbox +import TelegramCore + +private enum ContactListSearchEntry { + case peer(Peer) +} + +final class ContactsSearchContainerNode: SearchDisplayControllerContentNode { + private let account: Account + private let openPeer: (PeerId) -> Void + + private let listNode: ListView + + private let searchQuery = Promise() + private let searchDisposable = MetaDisposable() + + init(account: Account, openPeer: @escaping (PeerId) -> Void) { + self.account = account + self.openPeer = openPeer + + self.listNode = ListView() + + super.init() + + self.backgroundColor = UIColor.white + self.addSubnode(self.listNode) + + self.listNode.isHidden = true + + let searchItems = searchQuery.get() + |> mapToSignal { query -> Signal<[ContactListSearchEntry], NoError> in + if let query = query, !query.isEmpty { + return account.postbox.searchContacts(query: query.lowercased()) + |> delay(0.1, queue: Queue.concurrentDefaultQueue()) + |> map { peers -> [ContactListSearchEntry] in + return peers.map({ .peer($0) }) + } + } else { + return .single([]) + } + } + + let previousSearchItems = Atomic<[ContactListSearchEntry]>(value: []) + + self.searchDisposable.set((searchItems + |> deliverOnMainQueue).start(next: { [weak self] items in + if let strongSelf = self { + let previousItems = previousSearchItems.swap(items) + + var listItems: [ListViewItem] = [] + for item in items { + switch item { + case let .peer(peer): + listItems.append(ContactsPeerItem(account: account, peer: peer, index: nil, action: { [weak self] peer in + if let openPeer = self?.openPeer { + self?.listNode.clearHighlightAnimated(true) + openPeer(peer.id) + } + })) + } + } + + strongSelf.listNode.deleteAndInsertItems(deleteIndices: (0 ..< previousItems.count).map({ ListViewDeleteItem(index: $0, directionHint: nil) }), insertIndicesAndItems: (0 ..< listItems.count).map({ ListViewInsertItem(index: $0, previousIndex: nil, item: listItems[$0], directionHint: .Down) }), updateIndicesAndItems: [], options: []) + } + })) + } + + deinit { + self.searchDisposable.dispose() + } + + override func searchTextUpdated(text: String) { + if text.isEmpty { + self.searchQuery.set(.single(nil)) + self.listNode.isHidden = true + } else { + self.searchQuery.set(.single(text)) + self.listNode.isHidden = false + } + } + + override func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: transition) + + self.listNode.frame = CGRect(origin: CGPoint(), size: layout.size) + self.listNode.deleteAndInsertItems(deleteIndices: [], insertIndicesAndItems: [], updateIndicesAndItems: [], options: [.Synchronous], scrollToItem: nil, updateSizeAndInsets: ListViewUpdateSizeAndInsets(size: layout.size, insets: UIEdgeInsets(top: navigationBarHeight, left: 0.0, bottom: 0.0, right: 0.0), duration: 0.0, curve: .Default), stationaryItemRange: nil, completion: { _ in }) + } +} diff --git a/TelegramUI/ContactsSectionHeaderAccessoryItem.swift b/TelegramUI/ContactsSectionHeaderAccessoryItem.swift new file mode 100644 index 0000000000..f70aad2829 --- /dev/null +++ b/TelegramUI/ContactsSectionHeaderAccessoryItem.swift @@ -0,0 +1,70 @@ +import Foundation +import AsyncDisplayKit +import Display + +enum ContactsSectionHeader: Equatable { + case letter(String) + case title(String) +} + +func ==(lhs: ContactsSectionHeader, rhs: ContactsSectionHeader) -> Bool { + switch lhs { + case let .letter(letter): + if case .letter(letter) = rhs { + return true + } else { + return false + } + case let .title(title): + if case .title(title) = rhs { + return true + } else { + return false + } + } +} + +final class ContactsSectionHeaderAccessoryItem: ListViewAccessoryItem { + private let sectionHeader: ContactsSectionHeader + + init(sectionHeader: ContactsSectionHeader) { + self.sectionHeader = sectionHeader + } + + func isEqualToItem(_ other: ListViewAccessoryItem) -> Bool { + if let other = other as? ContactsSectionHeaderAccessoryItem, self.sectionHeader == other.sectionHeader { + return true + } else { + return false + } + } + + func node() -> ListViewAccessoryItemNode { + return ContactsSectionHeaderAccessoryItemNode(sectionHeader: self.sectionHeader) + } +} + +private final class ContactsSectionHeaderAccessoryItemNode: ListViewAccessoryItemNode { + private let sectionHeader: ContactsSectionHeader + private let sectionHeaderNode: ListSectionHeaderNode + + init(sectionHeader: ContactsSectionHeader) { + self.sectionHeader = sectionHeader + self.sectionHeaderNode = ListSectionHeaderNode() + + super.init() + + switch sectionHeader { + case let .letter(letter): + self.sectionHeaderNode.title = letter + case let .title(title): + self.sectionHeaderNode.title = title + } + + self.addSubnode(self.sectionHeaderNode) + } + + override func layout() { + self.sectionHeaderNode.frame = CGRect(origin: CGPoint(), size: self.bounds.size) + } +} diff --git a/TelegramUI/ContactsVCardItem.swift b/TelegramUI/ContactsVCardItem.swift new file mode 100644 index 0000000000..6810c46120 --- /dev/null +++ b/TelegramUI/ContactsVCardItem.swift @@ -0,0 +1,201 @@ +import Foundation +import UIKit +import AsyncDisplayKit +import Postbox +import Display +import SwiftSignalKit +import TelegramCore + +private let titleFont = Font.regular(20.0) +private let statusFont = Font.regular(14.0) + +class ContactsVCardItem: ListViewItem { + let account: Account + let peer: Peer + let action: (Peer) -> Void + let selectable: Bool = true + + init(account: Account, peer: Peer, action: @escaping (Peer) -> Void) { + self.account = account + self.peer = peer + self.action = action + } + + func nodeConfiguredForWidth(async: @escaping (@escaping () -> Void) -> Void, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, completion: @escaping (ListViewItemNode, @escaping () -> Void) -> Void) { + async { + let node = ContactsVCardItemNode() + let makeLayout = node.asyncLayout() + let (nodeLayout, nodeApply) = makeLayout(self.account, self.peer, width, previousItem != nil, nextItem != nil) + node.contentSize = nodeLayout.contentSize + node.insets = nodeLayout.insets + + completion(node, { + nodeApply() + }) + } + } + + func updateNode(async: @escaping (@escaping () -> Void) -> Void, node: ListViewItemNode, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, animation: ListViewItemUpdateAnimation, completion: @escaping (ListViewItemNodeLayout, @escaping () -> Void) -> Void) { + if let node = node as? ContactsVCardItemNode { + Queue.mainQueue().async { + let layout = node.asyncLayout() + async { + let first = previousItem == nil + let last = nextItem == nil + + let (nodeLayout, apply) = layout(self.account, self.peer, width, first, last) + Queue.mainQueue().async { + completion(nodeLayout, { + apply() + }) + } + } + } + } + } + + func selected() { + self.action(self.peer) + } +} + +private let separatorHeight = 1.0 / UIScreen.main.scale + +class ContactsVCardItemNode: ListViewItemNode { + private let separatorNode: ASDisplayNode + private let highlightedBackgroundNode: ASDisplayNode + + private let avatarNode: ChatListAvatarNode + private let titleNode: TextNode + private let statusNode: TextNode + + private var account: Account? + private var peer: Peer? + private var avatarState: (Account, Peer)? + + required init() { + self.separatorNode = ASDisplayNode() + self.separatorNode.backgroundColor = UIColor(0xc8c7cc) + self.separatorNode.isLayerBacked = true + + self.highlightedBackgroundNode = ASDisplayNode() + self.highlightedBackgroundNode.backgroundColor = UIColor(0xd9d9d9) + self.highlightedBackgroundNode.isLayerBacked = true + + self.avatarNode = ChatListAvatarNode(font: Font.regular(15.0)) + self.avatarNode.isLayerBacked = true + + self.titleNode = TextNode() + self.statusNode = TextNode() + + super.init(layerBacked: false, dynamicBounce: false) + + self.addSubnode(self.separatorNode) + self.addSubnode(self.avatarNode) + self.addSubnode(self.titleNode) + self.addSubnode(self.statusNode) + } + + override func layoutForWidth(_ width: CGFloat, item: ListViewItem, previousItem: ListViewItem?, nextItem: ListViewItem?) { + let makeLayout = self.asyncLayout() + let (nodeLayout, nodeApply) = makeLayout(self.account, self.peer, width, previousItem != nil, nextItem != nil) + self.contentSize = nodeLayout.contentSize + self.insets = nodeLayout.insets + nodeApply() + } + + private func updateBackgroundAndSeparatorsLayout(layout: ListViewItemNodeLayout) { + self.highlightedBackgroundNode.frame = CGRect(origin: CGPoint(x: 0.0, y: -layout.insets.top - separatorHeight), size: CGSize(width: layout.size.width, height: layout.size.height + separatorHeight)) + self.separatorNode.frame = CGRect(origin: CGPoint(x: 65.0, y: layout.size.height - separatorHeight), size: CGSize(width: max(0.0, layout.size.width - 65.0), height: separatorHeight)) + } + + override func setHighlighted(_ highlighted: Bool, animated: Bool) { + super.setHighlighted(highlighted, animated: animated) + + if highlighted { + /*self.contentNode.displaysAsynchronously = false + self.contentNode.backgroundColor = UIColor.clear + self.contentNode.isOpaque = false*/ + + self.highlightedBackgroundNode.alpha = 1.0 + if self.highlightedBackgroundNode.supernode == nil { + self.insertSubnode(self.highlightedBackgroundNode, aboveSubnode: self.separatorNode) + } + } else { + if self.highlightedBackgroundNode.supernode != nil { + if animated { + self.highlightedBackgroundNode.layer.animateAlpha(from: self.highlightedBackgroundNode.alpha, to: 0.0, duration: 0.4, completion: { [weak self] completed in + if let strongSelf = self { + if completed { + strongSelf.highlightedBackgroundNode.removeFromSupernode() + /*strongSelf.contentNode.backgroundColor = UIColor.white + strongSelf.contentNode.isOpaque = true + strongSelf.contentNode.displaysAsynchronously = true*/ + } + } + }) + self.highlightedBackgroundNode.alpha = 0.0 + } else { + self.highlightedBackgroundNode.removeFromSupernode() + /*self.contentNode.backgroundColor = UIColor.white + self.contentNode.isOpaque = true + self.contentNode.displaysAsynchronously = true*/ + } + } + } + } + + func asyncLayout() -> (_ account: Account?, _ peer: Peer?, _ width: CGFloat, _ first: Bool, _ last: Bool) -> (ListViewItemNodeLayout, () -> Void) { + let makeTitleLayout = TextNode.asyncLayout(self.titleNode) + let makeStatusLayout = TextNode.asyncLayout(self.statusNode) + + return { [weak self] account, peer, width, first, last in + let leftInset: CGFloat = 91.0 + let rightInset: CGFloat = 10.0 + + var titleAttributedString: NSAttributedString? + var statusAttributedString: NSAttributedString? + + if let peer = peer { + if let user = peer as? TelegramUser { + titleAttributedString = NSAttributedString(string: user.displayTitle, font: titleFont, textColor: UIColor.black) + + if let phone = user.phone { + statusAttributedString = NSAttributedString(string: formatPhoneNumber(phone), font: statusFont, textColor: UIColor(0xa6a6a6)) + } + } else if let group = peer as? TelegramGroup { + titleAttributedString = NSAttributedString(string: group.title, font: titleFont, textColor: UIColor.black) + statusAttributedString = NSAttributedString(string: "group", font: statusFont, textColor: UIColor(0xa6a6a6)) + } + } + + let (titleLayout, titleApply) = makeTitleLayout(titleAttributedString, nil, 1, .end, CGSize(width: max(0.0, width - leftInset - rightInset), height: CGFloat.infinity), nil) + + let (statusLayout, statusApply) = makeStatusLayout(statusAttributedString, nil, 1, .end, CGSize(width: max(0.0, width - leftInset - rightInset), height: CGFloat.infinity), nil) + + let nodeLayout = ListViewItemNodeLayout(contentSize: CGSize(width: width, height: 78.0), insets: UIEdgeInsets()) + + return (nodeLayout, { [weak self] in + if let strongSelf = self { + strongSelf.peer = peer + strongSelf.account = account + + if let peer = peer, let account = account, strongSelf.avatarState == nil || strongSelf.avatarState!.0 !== account || !strongSelf.avatarState!.1.isEqual(peer) { + strongSelf.avatarNode.setPeer(account: account, peer: peer) + } + + strongSelf.avatarNode.frame = CGRect(origin: CGPoint(x: 14.0, y: 6.0), size: CGSize(width: 60.0, height: 60.0)) + + let _ = titleApply() + strongSelf.titleNode.frame = CGRect(origin: CGPoint(x: leftInset, y: 15.0), size: titleLayout.size) + + let _ = statusApply() + strongSelf.statusNode.frame = CGRect(origin: CGPoint(x: leftInset, y: 40.0), size: statusLayout.size) + + strongSelf.updateBackgroundAndSeparatorsLayout(layout: nodeLayout) + strongSelf.separatorNode.isHidden = true + } + }) + } + } +} diff --git a/TelegramUI/FFMpegAudioFrameDecoder.swift b/TelegramUI/FFMpegAudioFrameDecoder.swift new file mode 100644 index 0000000000..63c677e687 --- /dev/null +++ b/TelegramUI/FFMpegAudioFrameDecoder.swift @@ -0,0 +1,69 @@ +import Foundation +import TelegramUIPrivateModule +import CoreMedia + +final class FFMpegAudioFrameDecoder: MediaTrackFrameDecoder { + private let codecContext: UnsafeMutablePointer + private let swrContext: FFMpegSwResample + + private let audioFrame: UnsafeMutablePointer + private var resetDecoderOnNextFrame = true + + init(codecContext: UnsafeMutablePointer) { + self.codecContext = codecContext + self.audioFrame = av_frame_alloc() + + self.swrContext = FFMpegSwResample(sourceChannelCount: Int(codecContext.pointee.channels), sourceSampleRate: Int(codecContext.pointee.sample_rate), sourceSampleFormat: codecContext.pointee.sample_fmt, destinationChannelCount: 2, destinationSampleRate: 44100, destinationSampleFormat: AV_SAMPLE_FMT_S16) + } + + deinit { + av_frame_unref(self.audioFrame) + + var codecContextRef: UnsafeMutablePointer? = codecContext + avcodec_free_context(&codecContextRef) + } + + func decode(frame: MediaTrackDecodableFrame) -> MediaTrackFrame? { + var status = avcodec_send_packet(self.codecContext, frame.packet) + if status == 0 { + status = avcodec_receive_frame(self.codecContext, self.audioFrame) + if status == 0 { + return convertAudioFrame(self.audioFrame, pts: frame.pts, duration: frame.duration) + } + } + + return nil + } + + private func convertAudioFrame(_ frame: UnsafeMutablePointer, pts: CMTime, duration: CMTime) -> MediaTrackFrame? { + guard let data = self.swrContext.resample(frame) else { + return nil + } + + var blockBuffer: CMBlockBuffer? + + let bytes = malloc(data.count)! + data.copyBytes(to: bytes.assumingMemoryBound(to: UInt8.self), count: data.count) + let status = CMBlockBufferCreateWithMemoryBlock(nil, bytes, data.count, nil, nil, 0, data.count, 0, &blockBuffer) + if status != noErr { + return nil + } + + var timingInfo = CMSampleTimingInfo(duration: duration, presentationTimeStamp: pts, decodeTimeStamp: pts) + var sampleBuffer: CMSampleBuffer? + var sampleSize = data.count + guard CMSampleBufferCreate(nil, blockBuffer, true, nil, nil, nil, 1, 1, &timingInfo, 1, &sampleSize, &sampleBuffer) == noErr else { + return nil + } + + let resetDecoder = self.resetDecoderOnNextFrame + self.resetDecoderOnNextFrame = false + + return MediaTrackFrame(type: .audio, sampleBuffer: sampleBuffer!, resetDecoder: resetDecoder) + } + + func reset() { + avcodec_flush_buffers(self.codecContext) + self.resetDecoderOnNextFrame = true + } +} diff --git a/TelegramUI/FFMpegMediaFrameSource.swift b/TelegramUI/FFMpegMediaFrameSource.swift new file mode 100644 index 0000000000..adcb3a1a8c --- /dev/null +++ b/TelegramUI/FFMpegMediaFrameSource.swift @@ -0,0 +1,211 @@ +import Foundation +import SwiftSignalKit +import Postbox +import TelegramCore + +private final class ThreadTaskQueue: NSObject { + private var mutex: pthread_mutex_t + private var condition: pthread_cond_t + private var tasks: [() -> Void] = [] + private var shouldExit = false + + override init() { + self.mutex = pthread_mutex_t() + self.condition = pthread_cond_t() + pthread_mutex_init(&self.mutex, nil) + pthread_cond_init(&self.condition, nil) + + super.init() + } + + deinit { + pthread_mutex_destroy(&self.mutex) + pthread_cond_destroy(&self.condition) + } + + func loop() { + while !self.shouldExit { + pthread_mutex_lock(&self.mutex) + + if tasks.isEmpty { + pthread_cond_wait(&self.condition, &self.mutex) + } + + var task: (() -> Void)? + if !self.tasks.isEmpty { + task = self.tasks.removeFirst() + } + + pthread_mutex_unlock(&self.mutex) + + if let task = task { + autoreleasepool { + task() + } + } + } + } + + func enqueue(_ task: @escaping () -> Void) { + pthread_mutex_lock(&self.mutex) + self.tasks.append(task) + pthread_cond_broadcast(&self.condition) + pthread_mutex_unlock(&self.mutex) + } + + func terminate() { + pthread_mutex_lock(&self.mutex) + self.shouldExit = true + pthread_cond_broadcast(&self.condition) + pthread_mutex_unlock(&self.mutex) + } +} + +private func contextForCurrentThread() -> FFMpegMediaFrameSourceContext? { + return Thread.current.threadDictionary["FFMpegMediaFrameSourceContext"] as? FFMpegMediaFrameSourceContext +} + +final class FFMpegMediaFrameSource: NSObject, MediaFrameSource { + private let queue: Queue + private let account: Account + private let resource: MediaResource + + private let taskQueue: ThreadTaskQueue + private let thread: Thread + + private let eventSinkBag = Bag<(MediaTrackEvent) -> Void>() + private var generatingFrames = false + private var requestedFrameGenerationTimestamp: Double? + + @objc private static func threadEntry(_ taskQueue: ThreadTaskQueue) { + autoreleasepool { + let context = FFMpegMediaFrameSourceContext(thread: Thread.current) + let localStorage = Thread.current.threadDictionary + localStorage["FFMpegMediaFrameSourceContext"] = context + + taskQueue.loop() + } + } + + init(queue: Queue, account: Account, resource: MediaResource) { + self.queue = queue + self.account = account + self.resource = resource + + self.taskQueue = ThreadTaskQueue() + + self.thread = Thread(target: FFMpegMediaFrameSource.self, selector: #selector(FFMpegMediaFrameSource.threadEntry(_:)), object: taskQueue) + self.thread.name = "FFMpegMediaFrameSourceContext" + self.thread.start() + + super.init() + } + + deinit { + assert(self.queue.isCurrent()) + + self.taskQueue.terminate() + } + + func addEventSink(_ f: @escaping (MediaTrackEvent) -> Void) -> Int { + assert(self.queue.isCurrent()) + + return self.eventSinkBag.add(f) + } + + func removeEventSink(_ index: Int) { + assert(self.queue.isCurrent()) + + self.eventSinkBag.remove(index) + } + + func generateFrames(until timestamp: Double) { + assert(self.queue.isCurrent()) + + if self.requestedFrameGenerationTimestamp == nil || !self.requestedFrameGenerationTimestamp!.isEqual(to: timestamp) { + self.requestedFrameGenerationTimestamp = timestamp + + self.internalGenerateFrames(until: timestamp) + } + } + + private func internalGenerateFrames(until timestamp: Double) { + if self.generatingFrames { + return + } + + self.generatingFrames = true + + let account = self.account + let resource = self.resource + let queue = self.queue + self.performWithContext { [weak self] context in + context.initializeState(account: account, resource: resource) + + let frames = context.takeFrames(until: timestamp) + + queue.async { [weak self] in + if let strongSelf = self { + strongSelf.generatingFrames = false + + for sink in strongSelf.eventSinkBag.copyItems() { + sink(.frames(frames)) + } + + if strongSelf.requestedFrameGenerationTimestamp != nil && !strongSelf.requestedFrameGenerationTimestamp!.isEqual(to: timestamp) { + strongSelf.internalGenerateFrames(until: strongSelf.requestedFrameGenerationTimestamp!) + } + } + } + } + } + + func performWithContext(_ f: @escaping (FFMpegMediaFrameSourceContext) -> Void) { + assert(self.queue.isCurrent()) + + taskQueue.enqueue { + if let context = contextForCurrentThread() { + f(context) + } + } + } + + func seek(timestamp: Double) -> Signal { + assert(self.queue.isCurrent()) + + return Signal { subscriber in + let disposable = MetaDisposable() + + let queue = self.queue + let account = self.account + let resource = self.resource + + self.performWithContext { [weak self] context in + context.initializeState(account: account, resource: resource) + + context.seek(timestamp: timestamp, completed: { [weak self] streamDescriptions, timestamp in + queue.async { [weak self] in + if let strongSelf = self { + var audioBuffer: MediaTrackFrameBuffer? + var videoBuffer: MediaTrackFrameBuffer? + + if let audio = streamDescriptions.audio { + audioBuffer = MediaTrackFrameBuffer(frameSource: strongSelf, decoder: audio.decoder, type: .audio, duration: audio.duration) + } + + if let video = streamDescriptions.video { + videoBuffer = MediaTrackFrameBuffer(frameSource: strongSelf, decoder: video.decoder, type: .video, duration: video.duration) + } + + strongSelf.requestedFrameGenerationTimestamp = nil + subscriber.putNext(MediaFrameSourceSeekResult(buffers: MediaPlaybackBuffers(audioBuffer: audioBuffer, videoBuffer: videoBuffer), timestamp: timestamp)) + subscriber.putCompletion() + } + } + }) + } + + return disposable + } + } +} diff --git a/TelegramUI/FFMpegMediaFrameSourceContext.swift b/TelegramUI/FFMpegMediaFrameSourceContext.swift new file mode 100644 index 0000000000..3acf9a9084 --- /dev/null +++ b/TelegramUI/FFMpegMediaFrameSourceContext.swift @@ -0,0 +1,403 @@ +import Foundation +import SwiftSignalKit +import Postbox +import CoreMedia +import TelegramUIPrivateModule +import TelegramCore + +private struct StreamContext { + fileprivate let index: Int + fileprivate let codecContext: UnsafeMutablePointer? + fileprivate let fps: CMTime + fileprivate let timebase: CMTime + fileprivate let duration: CMTime + fileprivate let decoder: MediaTrackFrameDecoder + + func close() { + } +} + +struct FFMpegMediaFrameSourceDescription { + let duration: CMTime + let decoder: MediaTrackFrameDecoder +} + +struct FFMpegMediaFrameSourceDescriptionSet { + let audio: FFMpegMediaFrameSourceDescription? + let video: FFMpegMediaFrameSourceDescription? +} + +private struct InitializedState { + fileprivate let avIoContext: UnsafeMutablePointer + fileprivate let avFormatContext: UnsafeMutablePointer + + fileprivate let audioStream: StreamContext? + fileprivate let videoStream: StreamContext? + + func close() { + self.videoStream?.close() + self.audioStream?.close() + } +} + +struct FFMpegMediaFrameSourceStreamContextInfo { + let duration: CMTime + let decoder: MediaTrackFrameDecoder +} + +struct FFMpegMediaFrameSourceContextInfo { + let audioStream: FFMpegMediaFrameSourceStreamContextInfo? + let videoStream: FFMpegMediaFrameSourceStreamContextInfo? +} + +/*private func getFormatCallback(codecContext: UnsafeMutablePointer?, formats: UnsafePointer?) -> AVPixelFormat { + var formats = formats! + while formats.pointee != AV_PIX_FMT_NONE { + let desc = av_pix_fmt_desc_get(formats.pointee)! + + if formats.pointee == AV_PIX_FMT_VIDEOTOOLBOX { + let result = av_videotoolbox_default_init(codecContext!) + if (result < 0) { + print("av_videotoolbox_default_init failed (\(result))") + formats = formats.successor() + continue + } + + return formats.pointee; + } else if (desc.pointee.flags & UInt64(AV_PIX_FMT_FLAG_HWACCEL)) == 0 { + return formats.pointee + } + formats = formats.successor() + } + return formats.pointee +}*/ + +private func readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: UnsafeMutablePointer?, bufferSize: Int32) -> Int32 { + let context = Unmanaged.fromOpaque(userData!).takeUnretainedValue() + guard let account = context.account, let resource = context.resource else { + return 0 + } + + var fetchedCount: Int32 = 0 + + let readCount = min(resource.size - context.readingOffset, Int(bufferSize)) + let data = account.postbox.mediaBox.resourceData(resource, in: context.readingOffset ..< (context.readingOffset + readCount), mode: .complete) + var fetchedData: Data? + let semaphore = DispatchSemaphore(value: 0) + let _ = data.start(next: { data in + if data.count == readCount { + fetchedData = data + semaphore.signal() + } + }) + semaphore.wait() + if let fetchedData = fetchedData { + fetchedData.withUnsafeBytes { (bytes: UnsafePointer) -> Void in + memcpy(buffer, bytes, fetchedData.count) + } + fetchedCount = Int32(fetchedData.count) + context.readingOffset += Int(fetchedCount) + } + + return fetchedCount +} + +private func seekCallback(userData: UnsafeMutableRawPointer?, offset: Int64, whence: Int32) -> Int64 { + let context = Unmanaged.fromOpaque(userData!).takeUnretainedValue() + guard let account = context.account, let resource = context.resource else { + return 0 + } + + var result: Int64 = offset + + if (whence & AVSEEK_SIZE) != 0 { + result = Int64(resource.size) + } else { + context.readingOffset = Int(min(Int64(resource.size), offset)) + + if context.readingOffset != context.requestedDataOffset { + context.requestedDataOffset = context.readingOffset + + if context.readingOffset >= resource.size { + context.fetchedDataDisposable.set(nil) + } else { + context.fetchedDataDisposable.set(account.postbox.mediaBox.fetchedResourceData(resource, in: context.readingOffset ..< resource.size).start()) + } + } + } + + return result +} + +final class FFMpegMediaFrameSourceContext: NSObject { + private let thread: Thread + + var closed = false + + fileprivate var account: Account? + fileprivate var resource: MediaResource? + + private let ioBufferSize = 64 * 1024 + fileprivate var readingOffset = 0 + + fileprivate var requestedDataOffset: Int? + fileprivate let fetchedDataDisposable = MetaDisposable() + + fileprivate var readingError = false + + private var initializedState: InitializedState? + private var packetQueue: [FFMpegPacket] = [] + + init(thread: Thread) { + self.thread = thread + } + + deinit { + assert(Thread.current === self.thread) + + fetchedDataDisposable.dispose() + } + + func initializeState(account: Account, resource: MediaResource) { + if self.readingError || self.initializedState != nil { + return + } + + let _ = FFMpegMediaFrameSourceContextHelpers.registerFFMpegGlobals + + self.account = account + self.resource = resource + + self.fetchedDataDisposable.set(account.postbox.mediaBox.fetchedResourceData(resource, in: 0 ..< resource.size).start()) + + var avFormatContextRef = avformat_alloc_context() + guard let avFormatContext = avFormatContextRef else { + self.readingError = true + return + } + + let avIoBuffer = av_malloc(self.ioBufferSize)! + let avIoContextRef = avio_alloc_context(avIoBuffer.assumingMemoryBound(to: UInt8.self), Int32(self.ioBufferSize), 0, Unmanaged.passUnretained(self).toOpaque(), readPacketCallback, nil, seekCallback) + + guard let avIoContext = avIoContextRef else { + self.readingError = true + return + } + + avFormatContext.pointee.pb = avIoContext + + guard avformat_open_input(&avFormatContextRef, nil, nil, nil) >= 0 else { + self.readingError = true + return + } + + guard avformat_find_stream_info(avFormatContext, nil) >= 0 else { + self.readingError = true + return + } + + var videoStream: StreamContext? + var audioStream: StreamContext? + + for streamIndex in FFMpegMediaFrameSourceContextHelpers.streamIndices(formatContext: avFormatContext, codecType: AVMEDIA_TYPE_VIDEO) { + if (avFormatContext.pointee.streams.advanced(by: streamIndex).pointee!.pointee.disposition & Int32(AV_DISPOSITION_ATTACHED_PIC)) == 0 { + + let codecPar = avFormatContext.pointee.streams.advanced(by: streamIndex).pointee!.pointee.codecpar! + + if codecPar.pointee.codec_id == AV_CODEC_ID_H264 { + if let videoFormat = FFMpegMediaFrameSourceContextHelpers.createFormatDescriptionFromCodecData(UInt32(kCMVideoCodecType_H264), codecPar.pointee.width, codecPar.pointee.height, codecPar.pointee.extradata, codecPar.pointee.extradata_size, 0x43637661) { + let (fps, timebase) = FFMpegMediaFrameSourceContextHelpers.streamFpsAndTimeBase(stream: avFormatContext.pointee.streams.advanced(by: streamIndex).pointee!, defaultTimeBase: CMTimeMake(1, 1000)) + + let duration = CMTimeMake(avFormatContext.pointee.streams.advanced(by: streamIndex).pointee!.pointee.duration, timebase.timescale) + + videoStream = StreamContext(index: streamIndex, codecContext: nil, fps: fps, timebase: timebase, duration: duration, decoder: FFMpegMediaPassthroughVideoFrameDecoder(videoFormat: videoFormat)) + break + } + } + } + } + + for streamIndex in FFMpegMediaFrameSourceContextHelpers.streamIndices(formatContext: avFormatContext, codecType: AVMEDIA_TYPE_AUDIO) { + if let codec = avcodec_find_decoder(avFormatContext.pointee.streams[streamIndex]!.pointee.codecpar.pointee.codec_id) { + if let codecContext = avcodec_alloc_context3(codec) { + if avcodec_parameters_to_context(codecContext, avFormatContext.pointee.streams[streamIndex]!.pointee.codecpar) >= 0 { + if avcodec_open2(codecContext, codec, nil) >= 0 { + let (fps, timebase) = FFMpegMediaFrameSourceContextHelpers.streamFpsAndTimeBase(stream: avFormatContext.pointee.streams.advanced(by: streamIndex).pointee!, defaultTimeBase: CMTimeMake(1, 40000)) + + let duration = CMTimeMake(avFormatContext.pointee.streams.advanced(by: streamIndex).pointee!.pointee.duration, timebase.timescale) + + audioStream = StreamContext(index: streamIndex, codecContext: codecContext, fps: fps, timebase: timebase, duration: duration, decoder: FFMpegAudioFrameDecoder(codecContext: codecContext)) + } else { + var codecContextRef: UnsafeMutablePointer? = codecContext + avcodec_free_context(&codecContextRef) + } + } else { + var codecContextRef: UnsafeMutablePointer? = codecContext + avcodec_free_context(&codecContextRef) + } + } + } + } + + self.initializedState = InitializedState(avIoContext: avIoContext, avFormatContext: avFormatContext, audioStream: audioStream, videoStream: videoStream) + } + + private func readPacket() -> FFMpegPacket? { + if !self.packetQueue.isEmpty { + return self.packetQueue.remove(at: 0) + } else { + return self.readPacketInternal() + } + } + + private func readPacketInternal() -> FFMpegPacket? { + guard let initializedState = self.initializedState else { + return nil + } + + let packet = FFMpegPacket() + if av_read_frame(initializedState.avFormatContext, &packet.packet) < 0 { + return nil + } else { + return packet + } + } + + func takeFrames(until: Double) -> [MediaTrackDecodableFrame] { + if self.readingError { + return [] + } + + guard let initializedState = self.initializedState else { + return [] + } + + var videoTimestamp: Double? + if initializedState.videoStream == nil { + videoTimestamp = Double.infinity + } + + var audioTimestamp: Double? + if initializedState.audioStream == nil { + audioTimestamp = Double.infinity + } + + var frames: [MediaTrackDecodableFrame] = [] + + while !self.readingError && ((videoTimestamp == nil || videoTimestamp!.isLess(than: until)) || (audioTimestamp == nil || audioTimestamp!.isLess(than: until))) { + + if let packet = self.readPacket() { + if let videoStream = initializedState.videoStream, Int(packet.packet.stream_index) == videoStream.index { + let avNoPtsRawValue: UInt64 = 0x8000000000000000 + let avNoPtsValue = unsafeBitCast(avNoPtsRawValue, to: Int64.self) + let packetPts = packet.packet.pts == avNoPtsValue ? packet.packet.dts : packet.packet.pts + + let pts = CMTimeMake(packetPts, videoStream.timebase.timescale) + let dts = CMTimeMake(packet.packet.dts, videoStream.timebase.timescale) + + let duration: CMTime + + let frameDuration = packet.packet.duration + if frameDuration != 0 { + duration = CMTimeMake(frameDuration * videoStream.timebase.value, videoStream.timebase.timescale) + } else { + duration = videoStream.fps + } + + let frame = MediaTrackDecodableFrame(type: .video, packet: &packet.packet, pts: pts, dts: dts, duration: duration) + frames.append(frame) + + if videoTimestamp == nil || videoTimestamp! < CMTimeGetSeconds(pts) { + videoTimestamp = CMTimeGetSeconds(pts) + } + } else if let audioStream = initializedState.audioStream, Int(packet.packet.stream_index) == audioStream.index { + let avNoPtsRawValue: UInt64 = 0x8000000000000000 + let avNoPtsValue = unsafeBitCast(avNoPtsRawValue, to: Int64.self) + let packetPts = packet.packet.pts == avNoPtsValue ? packet.packet.dts : packet.packet.pts + + let pts = CMTimeMake(packetPts, audioStream.timebase.timescale) + let dts = CMTimeMake(packet.packet.dts, audioStream.timebase.timescale) + + let duration: CMTime + + let frameDuration = packet.packet.duration + if frameDuration != 0 { + duration = CMTimeMake(frameDuration * audioStream.timebase.value, audioStream.timebase.timescale) + } else { + duration = audioStream.fps + } + + let frame = MediaTrackDecodableFrame(type: .audio, packet: &packet.packet, pts: pts, dts: dts, duration: duration) + frames.append(frame) + + if audioTimestamp == nil || audioTimestamp! < CMTimeGetSeconds(pts) { + audioTimestamp = CMTimeGetSeconds(pts) + } + } + } else { + break + } + } + + return frames + } + + func contextInfo() -> FFMpegMediaFrameSourceContextInfo? { + if let initializedState = self.initializedState { + var audioStreamContext: FFMpegMediaFrameSourceStreamContextInfo? + var videoStreamContext: FFMpegMediaFrameSourceStreamContextInfo? + + if let audioStream = initializedState.audioStream { + audioStreamContext = FFMpegMediaFrameSourceStreamContextInfo(duration: audioStream.duration, decoder: audioStream.decoder) + } + + if let videoStream = initializedState.videoStream { + videoStreamContext = FFMpegMediaFrameSourceStreamContextInfo(duration: videoStream.duration, decoder: videoStream.decoder) + } + + return FFMpegMediaFrameSourceContextInfo(audioStream: audioStreamContext, videoStream: videoStreamContext) + } + return nil + } + + func seek(timestamp: Double, completed: (FFMpegMediaFrameSourceDescriptionSet, CMTime) -> Void) { + if let initializedState = self.initializedState { + self.packetQueue.removeAll() + + for stream in [initializedState.videoStream, initializedState.audioStream] { + if let stream = stream { + let pts = CMTimeMakeWithSeconds(timestamp, stream.timebase.timescale) + av_seek_frame(initializedState.avFormatContext, Int32(stream.index), pts.value, AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_FRAME) + break + } + } + + var audioDescription: FFMpegMediaFrameSourceDescription? + var videoDescription: FFMpegMediaFrameSourceDescription? + + if let audioStream = initializedState.audioStream { + audioDescription = FFMpegMediaFrameSourceDescription(duration: audioStream.duration, decoder: audioStream.decoder) + } + + if let videoStream = initializedState.videoStream { + videoDescription = FFMpegMediaFrameSourceDescription(duration: videoStream.duration, decoder: videoStream.decoder) + } + + let actualPts: CMTime + if let packet = self.readPacketInternal() { + self.packetQueue.append(packet) + if let videoStream = initializedState.videoStream, Int(packet.packet.stream_index) == videoStream.index { + actualPts = CMTimeMake(packet.pts, videoStream.timebase.timescale) + } else if let audioStream = initializedState.audioStream, Int(packet.packet.stream_index) == audioStream.index { + actualPts = CMTimeMake(packet.pts, audioStream.timebase.timescale) + } else { + actualPts = CMTimeMake(0, 1) + } + } else { + actualPts = CMTimeMake(0, 1) + } + + completed(FFMpegMediaFrameSourceDescriptionSet(audio: audioDescription, video: videoDescription), actualPts) + } + } +} diff --git a/TelegramUI/FFMpegMediaFrameSourceContextHelpers.swift b/TelegramUI/FFMpegMediaFrameSourceContextHelpers.swift new file mode 100644 index 0000000000..b58abd8cd3 --- /dev/null +++ b/TelegramUI/FFMpegMediaFrameSourceContextHelpers.swift @@ -0,0 +1,71 @@ +import Foundation +import CoreMedia +import TelegramUIPrivateModule + +final class FFMpegMediaFrameSourceContextHelpers { + static let registerFFMpegGlobals: Void = { + av_log_set_level(AV_LOG_DEBUG) + av_register_all() + return + }() + + static func createFormatDescriptionFromCodecData(_ formatId: UInt32, _ width: Int32, _ height: Int32, _ extradata: UnsafePointer, _ extradata_size: Int32, _ atom: UInt32) -> CMFormatDescription? { + let par = NSMutableDictionary() + par.setObject(1 as NSNumber, forKey: "HorizontalSpacing" as NSString) + par.setObject(1 as NSNumber, forKey: "VerticalSpacing" as NSString) + + let atoms = NSMutableDictionary() + atoms.setObject(NSData(bytes: extradata, length: Int(extradata_size)), forKey: "avcC" as NSString) + + let extensions = NSMutableDictionary() + extensions.setObject("left" as NSString, forKey: "CVImageBufferChromaLocationBottomField" as NSString) + extensions.setObject("left" as NSString, forKey: "CVImageBufferChromaLocationTopField" as NSString) + extensions.setObject(0 as NSNumber, forKey: "FullRangeVideo" as NSString) + extensions.setObject(par, forKey: "CVPixelAspectRatio" as NSString) + extensions.setObject(atoms, forKey: "SampleDescriptionExtensionAtoms" as NSString) + extensions.setObject("avc1" as NSString, forKey: "FormatName" as NSString) + extensions.setObject(0 as NSNumber, forKey: "SpatialQuality" as NSString) + extensions.setObject(0 as NSNumber, forKey: "Version" as NSString) + extensions.setObject(0 as NSNumber, forKey: "FullRangeVideo" as NSString) + extensions.setObject(1 as NSNumber, forKey: "CVFieldCount" as NSString) + extensions.setObject(24 as NSNumber, forKey: "Depth" as NSString) + + var formatDescription: CMFormatDescription? + CMVideoFormatDescriptionCreate(nil, CMVideoCodecType(formatId), width, height, extensions, &formatDescription) + + return formatDescription + } + + static func streamIndices(formatContext: UnsafeMutablePointer, codecType: AVMediaType) -> [Int] { + var indices: [Int] = [] + for i in 0 ..< Int(formatContext.pointee.nb_streams) { + if codecType == formatContext.pointee.streams.advanced(by: i).pointee!.pointee.codecpar!.pointee.codec_type { + indices.append(i) + } + } + return indices + } + + static func streamFpsAndTimeBase(stream: UnsafePointer, defaultTimeBase: CMTime) -> (fps: CMTime, timebase: CMTime) { + let timebase: CMTime + var fps: CMTime + + if stream.pointee.time_base.den != 0 && stream.pointee.time_base.num != 0 { + timebase = CMTimeMake(Int64(stream.pointee.time_base.num), stream.pointee.time_base.den) + } else if stream.pointee.codec.pointee.time_base.den != 0 && stream.pointee.codec.pointee.time_base.num != 0 { + timebase = CMTimeMake(Int64(stream.pointee.codec.pointee.time_base.num), stream.pointee.codec.pointee.time_base.den) + } else { + timebase = defaultTimeBase + } + + if stream.pointee.avg_frame_rate.den != 0 && stream.pointee.avg_frame_rate.num != 0 { + fps = CMTimeMake(Int64(stream.pointee.avg_frame_rate.num), stream.pointee.avg_frame_rate.den) + } else if stream.pointee.r_frame_rate.den != 0 && stream.pointee.r_frame_rate.num != 0 { + fps = CMTimeMake(Int64(stream.pointee.r_frame_rate.num), stream.pointee.r_frame_rate.den) + } else { + fps = CMTimeMake(1, 24) + } + + return (fps, timebase) + } +} diff --git a/TelegramUI/FFMpegMediaPassthroughVideoFrameDecoder.swift b/TelegramUI/FFMpegMediaPassthroughVideoFrameDecoder.swift new file mode 100644 index 0000000000..b7ee22a11d --- /dev/null +++ b/TelegramUI/FFMpegMediaPassthroughVideoFrameDecoder.swift @@ -0,0 +1,42 @@ +import CoreMedia + +final class FFMpegMediaPassthroughVideoFrameDecoder: MediaTrackFrameDecoder { + private let videoFormat: CMVideoFormatDescription + private var resetDecoderOnNextFrame = true + + init(videoFormat: CMVideoFormatDescription) { + self.videoFormat = videoFormat + } + + func decode(frame: MediaTrackDecodableFrame) -> MediaTrackFrame? { + var blockBuffer: CMBlockBuffer? + + let bytes = malloc(Int(frame.packet.pointee.size))! + memcpy(bytes, frame.packet.pointee.data, Int(frame.packet.pointee.size)) + guard CMBlockBufferCreateWithMemoryBlock(nil, bytes, Int(frame.packet.pointee.size), nil, nil, 0, Int(frame.packet.pointee.size), 0, &blockBuffer) == noErr else { + free(bytes) + return nil + } + + var timingInfo = CMSampleTimingInfo(duration: frame.duration, presentationTimeStamp: frame.pts, decodeTimeStamp: frame.dts) + var sampleBuffer: CMSampleBuffer? + var sampleSize = Int(frame.packet.pointee.size) + guard CMSampleBufferCreate(nil, blockBuffer, true, nil, nil, self.videoFormat, 1, 1, &timingInfo, 1, &sampleSize, &sampleBuffer) == noErr else { + return nil + } + + let resetDecoder = self.resetDecoderOnNextFrame + if self.resetDecoderOnNextFrame { + self.resetDecoderOnNextFrame = false + let attachments = CMSampleBufferGetSampleAttachmentsArray(sampleBuffer!, true)! as NSArray + let dict = attachments[0] as! NSMutableDictionary + dict.setValue(kCFBooleanTrue as AnyObject, forKey: kCMSampleBufferAttachmentKey_ResetDecoderBeforeDecoding as NSString as String) + } + + return MediaTrackFrame(type: .video, sampleBuffer: sampleBuffer!, resetDecoder: resetDecoder) + } + + func reset() { + self.resetDecoderOnNextFrame = true + } +} diff --git a/TelegramUI/FFMpegMediaVideoFrameDecoder.swift b/TelegramUI/FFMpegMediaVideoFrameDecoder.swift new file mode 100644 index 0000000000..ccaa213705 --- /dev/null +++ b/TelegramUI/FFMpegMediaVideoFrameDecoder.swift @@ -0,0 +1,10 @@ + +final class FFMpegMediaVideoFrameDecoder: MediaTrackFrameDecoder { + func decode(frame: MediaTrackDecodableFrame) -> MediaTrackFrame? { + return nil + } + + func reset() { + + } +} diff --git a/TelegramUI/FFMpegPacket.swift b/TelegramUI/FFMpegPacket.swift new file mode 100644 index 0000000000..a632d12a93 --- /dev/null +++ b/TelegramUI/FFMpegPacket.swift @@ -0,0 +1,18 @@ +import Foundation +import TelegramUIPrivateModule + +final class FFMpegPacket { + var packet = AVPacket() + + deinit { + av_packet_unref(&self.packet) + } + + var pts: Int64 { + let avNoPtsRawValue: UInt64 = 0x8000000000000000 + let avNoPtsValue = unsafeBitCast(avNoPtsRawValue, to: Int64.self) + let packetPts = self.packet.pts == avNoPtsValue ? self.packet.dts : self.packet.pts + + return packetPts + } +} diff --git a/TelegramUI/FFMpegSwResample.h b/TelegramUI/FFMpegSwResample.h new file mode 100644 index 0000000000..a1cb9fab98 --- /dev/null +++ b/TelegramUI/FFMpegSwResample.h @@ -0,0 +1,12 @@ +#import + +#import "../third-party/FFMpeg-iOS/include/libavutil/avutil.h" +#import "../third-party/FFMpeg-iOS/include/libavutil/channel_layout.h" +#import "../third-party/FFMpeg-iOS/include/libswresample/swresample.h" + +@interface FFMpegSwResample : NSObject + +- (instancetype)initWithSourceChannelCount:(NSInteger)sourceChannelCount sourceSampleRate:(NSInteger)sourceSampleRate sourceSampleFormat:(enum AVSampleFormat)sourceSampleFormat destinationChannelCount:(NSInteger)destinationChannelCount destinationSampleRate:(NSInteger)destinationSampleRate destinationSampleFormat:(enum AVSampleFormat)destinationSampleFormat; +- (NSData *)resample:(AVFrame *)frame; + +@end diff --git a/TelegramUI/FFMpegSwResample.m b/TelegramUI/FFMpegSwResample.m new file mode 100644 index 0000000000..4a819198ea --- /dev/null +++ b/TelegramUI/FFMpegSwResample.m @@ -0,0 +1,69 @@ +#import "FFMpegSwResample.h" + +@interface FFMpegSwResample () { + SwrContext *_context; + NSUInteger _ratio; + NSInteger _destinationChannelCount; + enum AVSampleFormat _destinationSampleFormat; + void *_buffer; + int _bufferSize; +} + +@end + +@implementation FFMpegSwResample + +- (instancetype)initWithSourceChannelCount:(NSInteger)sourceChannelCount sourceSampleRate:(NSInteger)sourceSampleRate sourceSampleFormat:(enum AVSampleFormat)sourceSampleFormat destinationChannelCount:(NSInteger)destinationChannelCount destinationSampleRate:(NSInteger)destinationSampleRate destinationSampleFormat:(enum AVSampleFormat)destinationSampleFormat { + self = [super init]; + if (self != nil) { + _destinationChannelCount = destinationChannelCount; + _destinationSampleFormat = destinationSampleFormat; + _context = swr_alloc_set_opts(NULL, + av_get_default_channel_layout((int)destinationChannelCount), + destinationSampleFormat, + (int)destinationSampleRate, + av_get_default_channel_layout((int)sourceChannelCount), + sourceSampleFormat, + (int)sourceSampleRate, + 0, + NULL); + _ratio = MAX(1, destinationSampleRate / sourceSampleRate) * MAX(1, destinationChannelCount / sourceChannelCount) * 2; + swr_init(_context); + } + return self; +} + +- (void)dealloc { + swr_free(&_context); + if (_buffer) { + free(_buffer); + } +} + +- (NSData *)resample:(AVFrame *)frame { + int bufSize = av_samples_get_buffer_size(NULL, + (int)_destinationChannelCount, + frame->nb_samples * (int)_ratio, + _destinationSampleFormat, + 1); + + if (!_buffer || _bufferSize < bufSize) { + _bufferSize = bufSize; + _buffer = realloc(_buffer, _bufferSize); + } + + Byte *outbuf[2] = { _buffer, 0 }; + + int numFrames = swr_convert(_context, + outbuf, + frame->nb_samples * (int)_ratio, + (const uint8_t **)frame->data, + frame->nb_samples); + if (numFrames <= 0) { + return nil; + } + + return [[NSData alloc] initWithBytes:_buffer length:numFrames * _destinationChannelCount * 2]; +} + +@end diff --git a/TelegramUI/FastBlur.h b/TelegramUI/FastBlur.h new file mode 100644 index 0000000000..09e9270644 --- /dev/null +++ b/TelegramUI/FastBlur.h @@ -0,0 +1,9 @@ +#ifndef Telegram_FastBlur_h +#define Telegram_FastBlur_h + +#import + +void telegramFastBlur(int imageWidth, int imageHeight, int imageStride, void *pixels); +void telegramDspBlur(int imageWidth, int imageHeight, int imageStride, void *pixels); + +#endif diff --git a/TelegramUI/FastBlur.m b/TelegramUI/FastBlur.m new file mode 100644 index 0000000000..a79943b134 --- /dev/null +++ b/TelegramUI/FastBlur.m @@ -0,0 +1,164 @@ +#import "FastBlur.h" + +#import + +static inline uint64_t get_colors (const uint8_t *p) { + return p[0] + (p[1] << 16) + ((uint64_t)p[2] << 32); +} + +void telegramFastBlur(int imageWidth, int imageHeight, int imageStride, void *pixels) +{ + uint8_t *pix = (uint8_t *)pixels; + const int w = imageWidth; + const int h = imageHeight; + const int stride = imageStride; + const int radius = 3; + const int r1 = radius + 1; + const int div = radius * 2 + 1; + + if (radius > 15 || div >= w || div >= h) + { + return; + } + + uint64_t *rgb = malloc(imageStride * imageHeight * sizeof(uint64_t)); + + int x, y, i; + + int yw = 0; + const int we = w - r1; + for (y = 0; y < h; y++) { + uint64_t cur = get_colors (&pix[yw]); + uint64_t rgballsum = -radius * cur; + uint64_t rgbsum = cur * ((r1 * (r1 + 1)) >> 1); + + for (i = 1; i <= radius; i++) { + uint64_t cur = get_colors (&pix[yw + i * 4]); + rgbsum += cur * (r1 - i); + rgballsum += cur; + } + + x = 0; + +#define update(start, middle, end) \ +rgb[y * w + x] = (rgbsum >> 4) & 0x00FF00FF00FF00FF; \ +\ +rgballsum += get_colors (&pix[yw + (start) * 4]) - \ +2 * get_colors (&pix[yw + (middle) * 4]) + \ +get_colors (&pix[yw + (end) * 4]); \ +rgbsum += rgballsum; \ +x++; \ + + while (x < r1) { + update (0, x, x + r1); + } + while (x < we) { + update (x - r1, x, x + r1); + } + while (x < w) { + update (x - r1, x, w - 1); + } +#undef update + + yw += stride; + } + + const int he = h - r1; + for (x = 0; x < w; x++) { + uint64_t rgballsum = -radius * rgb[x]; + uint64_t rgbsum = rgb[x] * ((r1 * (r1 + 1)) >> 1); + for (i = 1; i <= radius; i++) { + rgbsum += rgb[i * w + x] * (r1 - i); + rgballsum += rgb[i * w + x]; + } + + y = 0; + int yi = x * 4; + +#define update(start, middle, end) \ +int64_t res = rgbsum >> 4; \ +pix[yi] = (uint8_t)res; \ +pix[yi + 1] = (uint8_t)(res >> 16); \ +pix[yi + 2] = (uint8_t)(res >> 32); \ +\ +rgballsum += rgb[x + (start) * w] - \ +2 * rgb[x + (middle) * w] + \ +rgb[x + (end) * w]; \ +rgbsum += rgballsum; \ +y++; \ +yi += stride; + + while (y < r1) { + update (0, y, y + r1); + } + while (y < he) { + update (y - r1, y, y + r1); + } + while (y < h) { + update (y - r1, y, h - 1); + } +#undef update + } + + free(rgb); +} + +void telegramDspBlur(int imageWidth, int imageHeight, int imageStride, void *pixels) { + uint8_t *srcData = pixels; + int bytesPerRow = imageStride; + int width = imageWidth; + int height = imageHeight; + bool shouldClip = false; + static const float matrix[] = { 1/9.0f, 1/9.0f, 1/9.0f, 1/9.0f, 1/9.0f, 1/9.0f, 1/9.0f, 1/9.0f, 1/9.0f }; + +//void telegramDspBlur(uint8_t *srcData, int bytesPerRow, int width, int height, float *matrix, int matrixRows, int matrixCols, bool shouldClip) { + unsigned char *finalData = malloc(bytesPerRow * height * sizeof(unsigned char)); + if (srcData != NULL && finalData != NULL) + { + size_t dataSize = bytesPerRow * height; + // copy src to destination: technically this is a bit wasteful as we'll overwrite + // all but the "alpha" portion of finalData during processing but I'm unaware of + // a memcpy with stride function + memcpy(finalData, srcData, dataSize); + // alloc space for our dsp arrays + float *srcAsFloat = malloc(width*height*sizeof(float)); + float *resultAsFloat = malloc(width*height*sizeof(float)); + // loop through each colour (color) chanel (skip the first chanel, it's alpha and is left alone) + for (int i=1; i<4; i++) { + // convert src pixels into float data type + vDSP_vfltu8(srcData+i,4,srcAsFloat,1,width * height); + // apply matrix using dsp + /*switch (matrixSize) { + case DSPMatrixSize3x3:*/ + vDSP_f3x3(srcAsFloat, height, width, matrix, resultAsFloat); + /*break; + case DSPMatrixSize5x5: + vDSP_f5x5(srcAsFloat, height, width, matrix, resultAsFloat); + break; + case DSPMatrixSizeCustom: + NSAssert(matrixCols > 0 && matrixRows > 0, + @"invalid usage: please use full method definition and pass rows/cols for matrix"); + vDSP_imgfir(srcAsFloat, height, width, matrix, resultAsFloat, matrixRows, matrixCols); + break; + default: + break; + }*/ + // certain operations may result in values to large or too small in our output float array + // so if necessary we clip the results here. This param is optional so that we don't need to take + // the speed hit on blur operations or others which can't result in invalid float values. + if (shouldClip) { + float min = 0; + float max = 255; + vDSP_vclip(resultAsFloat, 1, &min, &max, resultAsFloat, 1, width * height); + } + // convert back into bytes and copy into finalData + vDSP_vfixu8(resultAsFloat, 1, finalData+i, 4, width * height); + } + // clean up dsp space + free(srcAsFloat); + free(resultAsFloat); + memcpy(srcData, finalData, bytesPerRow * height * sizeof(unsigned char)); + free(finalData); + } +} + diff --git a/TelegramUI/FileResources.swift b/TelegramUI/FileResources.swift new file mode 100644 index 0000000000..f61135c075 --- /dev/null +++ b/TelegramUI/FileResources.swift @@ -0,0 +1,16 @@ +import Foundation +import Postbox +import SwiftSignalKit +import TelegramCore + +func fileResource(_ file: TelegramMediaFile) -> CloudFileMediaResource { + return CloudFileMediaResource(location: file.location, size: file.size) +} + +func fileInteractiveFetched(account: Account, file: TelegramMediaFile) -> Signal { + return account.postbox.mediaBox.fetchedResource(fileResource(file)) +} + +func fileCancelInteractiveFetch(account: Account, file: TelegramMediaFile) { + account.postbox.mediaBox.cancelInteractiveResourceFetch(fileResource(file)) +} diff --git a/TelegramUI/FrameworkBundle.swift b/TelegramUI/FrameworkBundle.swift new file mode 100644 index 0000000000..b863239d79 --- /dev/null +++ b/TelegramUI/FrameworkBundle.swift @@ -0,0 +1,13 @@ +import Foundation + +private class FrameworkBundleClass: NSObject { +} + +private let frameworkBundle: Bundle = Bundle(for: FrameworkBundleClass.self) +private let screenScaleFactor = Int(UIScreen.main.scale) + +extension UIImage { + convenience init?(bundleImageName: String) { + self.init(named: bundleImageName, in: frameworkBundle, compatibleWith: nil) + } +} diff --git a/TelegramUI/GalleryController.swift b/TelegramUI/GalleryController.swift new file mode 100644 index 0000000000..1b645bfc35 --- /dev/null +++ b/TelegramUI/GalleryController.swift @@ -0,0 +1,310 @@ +import Foundation +import Display +import QuickLook +import Postbox +import SwiftSignalKit +import AsyncDisplayKit +import TelegramCore + +private func tagsForMessage(_ message: Message) -> MessageTags? { + for media in message.media { + switch media { + case _ as TelegramMediaImage: + return .PhotoOrVideo + case let file as TelegramMediaFile: + if file.isVideo { + return .PhotoOrVideo + } else if file.isVoice { + return .Voice + } else if file.isSticker { + return nil + } else { + return .File + } + default: + break + } + } + return nil +} + +private func mediaForMessage(message: Message) -> Media? { + for media in message.media { + if let media = media as? TelegramMediaImage { + return media + } else if let file = media as? TelegramMediaFile { + if file.mimeType.hasPrefix("audio/") { + return nil + } else if !file.isVideo && file.mimeType.hasPrefix("video/") { + return file + } else { + return file + } + } + } + return nil +} + +private func itemForEntry(account: Account, entry: MessageHistoryEntry) -> GalleryItem { + switch entry { + case let .MessageEntry(message, location): + if let media = mediaForMessage(message: message) { + if let _ = media as? TelegramMediaImage { + return ChatImageGalleryItem(account: account, message: message, location: location) + } else if let file = media as? TelegramMediaFile { + if file.isVideo || file.mimeType.hasPrefix("video/") { + return ChatVideoGalleryItem(account: account, message: message, location: location) + } else { + if file.mimeType.hasPrefix("image/") { + return ChatImageGalleryItem(account: account, message: message, location: location) + } else { + return ChatDocumentGalleryItem(account: account, message: message, location: location) + } + } + } + } + default: + break + } + return ChatHoleGalleryItem() +} + +class GalleryControllerPresentationArguments { + let transitionNode: (MessageId, Media) -> ASDisplayNode? + + init(transitionNode: @escaping (MessageId, Media) -> ASDisplayNode?) { + self.transitionNode = transitionNode + } +} + +class GalleryController: ViewController { + private var galleryNode: GalleryControllerNode { + return self.displayNode as! GalleryControllerNode + } + + private let account: Account + + private let _ready = Promise() + override var ready: Promise { + return self._ready + } + private var didSetReady = false + + private let disposable = MetaDisposable() + + private var entries: [MessageHistoryEntry] = [] + private var centralEntryIndex: Int? + + private let centralItemTitle = Promise() + private let centralItemTitleView = Promise() + private let centralItemNavigationStyle = Promise() + private let centralItemAttributesDisposable = DisposableSet() + + private let _hiddenMedia = Promise<(MessageId, Media)?>(nil) + var hiddenMedia: Signal<(MessageId, Media)?, NoError> { + return self._hiddenMedia.get() + } + + init(account: Account, messageId: MessageId) { + self.account = account + + super.init() + + self.navigationBar.backgroundColor = UIColor(white: 0.0, alpha: 0.5) + self.navigationBar.stripeColor = UIColor.clear + self.navigationBar.foregroundColor = UIColor.white + self.navigationBar.accentColor = UIColor.white + + self.navigationItem.leftBarButtonItem = UIBarButtonItem(title: "Done", style: .plain, target: self, action: #selector(self.donePressed)) + + self.statusBar.style = .White + + let message = account.postbox.messageAtId(messageId) + + let messageView = message + |> filter({ $0 != nil }) + |> mapToSignal { message -> Signal in + if let tags = tagsForMessage(message!) { + let view = account.postbox.aroundMessageHistoryViewForPeerId(messageId.peerId, index: MessageIndex(message!), count: 50, anchorIndex: MessageIndex(message!), fixedCombinedReadState: nil, tagMask: tags) + + return view + |> mapToSignal { (view, _) -> Signal in + return .single(view) + } + } else { + return .single(nil) + } + } + |> take(1) + |> deliverOnMainQueue + + self.disposable.set(messageView.start(next: { [weak self] view in + if let strongSelf = self { + if let view = view { + strongSelf.entries = view.entries + loop: for i in 0 ..< strongSelf.entries.count { + switch strongSelf.entries[i] { + case let .MessageEntry(message, _) where message.id == messageId: + strongSelf.centralEntryIndex = i + break loop + default: + break + } + } + if strongSelf.isViewLoaded { + strongSelf.galleryNode.pager.replaceItems(strongSelf.entries.map({ itemForEntry(account: account, entry: $0) }), centralItemIndex: strongSelf.centralEntryIndex) + + let ready = strongSelf.galleryNode.pager.ready() |> timeout(2.0, queue: Queue.mainQueue(), alternate: .single(Void())) |> afterNext { [weak strongSelf] _ in + strongSelf?.didSetReady = true + } + strongSelf._ready.set(ready |> map { true }) + } + } + } + })) + + self.centralItemAttributesDisposable.add(self.centralItemTitle.get().start(next: { [weak self] title in + self?.navigationItem.title = title + })) + + self.centralItemAttributesDisposable.add(self.centralItemTitleView.get().start(next: { [weak self] titleView in + self?.navigationItem.titleView = titleView + })) + + self.centralItemAttributesDisposable.add(self.centralItemNavigationStyle.get().start(next: { [weak self] style in + if let strongSelf = self { + switch style { + case .dark: + strongSelf.statusBar.style = .White + strongSelf.navigationBar.backgroundColor = UIColor(white: 0.0, alpha: 0.5) + strongSelf.navigationBar.stripeColor = UIColor.clear + strongSelf.navigationBar.foregroundColor = UIColor.white + strongSelf.navigationBar.accentColor = UIColor.white + strongSelf.galleryNode.backgroundColor = UIColor.black + case .light: + strongSelf.statusBar.style = .Black + strongSelf.navigationBar.backgroundColor = UIColor(red: 0.968626451, green: 0.968626451, blue: 0.968626451, alpha: 1.0) + strongSelf.navigationBar.foregroundColor = UIColor.black + strongSelf.navigationBar.accentColor = UIColor(0x1195f2) + strongSelf.navigationBar.stripeColor = UIColor(red: 0.6953125, green: 0.6953125, blue: 0.6953125, alpha: 1.0) + strongSelf.galleryNode.backgroundColor = UIColor(0xbdbdc2) + } + } + })) + } + + required init(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + deinit { + self.disposable.dispose() + self.centralItemAttributesDisposable.dispose() + } + + @objc func donePressed() { + var animatedOutNode = true + var animatedOutInterface = false + + let completion = { [weak self] in + if animatedOutNode && animatedOutInterface { + self?._hiddenMedia.set(.single(nil)) + self?.presentingViewController?.dismiss(animated: false, completion: nil) + } + } + + if let centralItemNode = self.galleryNode.pager.centralItemNode(), let presentationArguments = self.presentationArguments as? GalleryControllerPresentationArguments { + if case let .MessageEntry(message, _) = self.entries[centralItemNode.index] { + if let media = mediaForMessage(message: message), let node = presentationArguments.transitionNode(message.id, media) { + animatedOutNode = false + centralItemNode.animateOut(to: node, completion: { + animatedOutNode = true + completion() + }) + } + } + } + + self.galleryNode.animateOut(animateContent: animatedOutNode, completion: { + animatedOutInterface = true + completion() + }) + } + + override func loadDisplayNode() { + self.displayNode = GalleryControllerNode() + self.displayNodeDidLoad() + + self.galleryNode.statusBar = self.statusBar + self.galleryNode.navigationBar = self.navigationBar + + self.galleryNode.transitionNodeForCentralItem = { [weak self] in + if let strongSelf = self { + if let centralItemNode = strongSelf.galleryNode.pager.centralItemNode(), let presentationArguments = strongSelf.presentationArguments as? GalleryControllerPresentationArguments { + if case let .MessageEntry(message, _) = strongSelf.entries[centralItemNode.index] { + if let media = mediaForMessage(message: message), let node = presentationArguments.transitionNode(message.id, media) { + return node + } + } + } + } + return nil + } + self.galleryNode.dismiss = { [weak self] in + self?._hiddenMedia.set(.single(nil)) + self?.presentingViewController?.dismiss(animated: false, completion: nil) + } + + self.galleryNode.pager.replaceItems(self.entries.map({ itemForEntry(account: self.account, entry: $0) }), centralItemIndex: self.centralEntryIndex) + + self.galleryNode.pager.centralItemIndexUpdated = { [weak self] index in + if let strongSelf = self { + var hiddenItem: (MessageId, Media)? + if let index = index { + if case let .MessageEntry(message, _) = strongSelf.entries[index], let media = mediaForMessage(message: message) { + hiddenItem = (message.id, media) + } + + if let node = strongSelf.galleryNode.pager.centralItemNode() { + strongSelf.centralItemTitle.set(node.title()) + strongSelf.centralItemTitleView.set(node.titleView()) + strongSelf.centralItemNavigationStyle.set(node.navigationStyle()) + } + } + if strongSelf.didSetReady { + strongSelf._hiddenMedia.set(.single(hiddenItem)) + } + } + } + } + + override func viewDidAppear(_ animated: Bool) { + super.viewDidAppear(animated) + + var nodeAnimatesItself = false + + if let centralItemNode = self.galleryNode.pager.centralItemNode(), let presentationArguments = self.presentationArguments as? GalleryControllerPresentationArguments { + if case let .MessageEntry(message, _) = self.entries[centralItemNode.index] { + self.centralItemTitle.set(centralItemNode.title()) + self.centralItemTitleView.set(centralItemNode.titleView()) + self.centralItemNavigationStyle.set(centralItemNode.navigationStyle()) + + if let media = mediaForMessage(message: message), let node = presentationArguments.transitionNode(message.id, media) { + nodeAnimatesItself = true + centralItemNode.animateIn(from: node) + + self._hiddenMedia.set(.single((message.id, media))) + } + } + } + + self.galleryNode.animateIn(animateContent: !nodeAnimatesItself) + } + + override func containerLayoutUpdated(_ layout: ContainerViewLayout, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, transition: transition) + + self.galleryNode.frame = CGRect(origin: CGPoint(), size: layout.size) + self.galleryNode.containerLayoutUpdated(layout, navigationBarHeight: self.navigationBar.frame.maxY, transition: transition) + } +} diff --git a/TelegramUI/GalleryControllerNode.swift b/TelegramUI/GalleryControllerNode.swift new file mode 100644 index 0000000000..0fc0106529 --- /dev/null +++ b/TelegramUI/GalleryControllerNode.swift @@ -0,0 +1,166 @@ +import Foundation +import AsyncDisplayKit +import Display + +class GalleryControllerNode: ASDisplayNode, UIScrollViewDelegate { + var statusBar: StatusBar? + var navigationBar: NavigationBar? + var transitionNodeForCentralItem: (() -> ASDisplayNode?)? + var dismiss: (() -> Void)? + + var containerLayout: ContainerViewLayout? + var scrollView: UIScrollView + var pager: GalleryPagerNode + + var areControlsHidden = false + + override init() { + self.scrollView = UIScrollView() + self.pager = GalleryPagerNode() + + super.init(viewBlock: { + return UITracingLayerView() + }, didLoad: nil) + + self.pager.toggleControlsVisibility = { [weak self] in + if let strongSelf = self { + strongSelf.areControlsHidden = !strongSelf.areControlsHidden + UIView.animate(withDuration: 0.3, animations: { + let alpha: CGFloat = strongSelf.areControlsHidden ? 0.0 : 1.0 + strongSelf.navigationBar?.alpha = alpha + strongSelf.statusBar?.alpha = alpha + }) + } + } + + self.scrollView.showsVerticalScrollIndicator = false + self.scrollView.showsHorizontalScrollIndicator = false + self.scrollView.alwaysBounceHorizontal = false + self.scrollView.alwaysBounceVertical = false + self.scrollView.clipsToBounds = false + self.scrollView.delegate = self + self.scrollView.scrollsToTop = false + self.view.addSubview(self.scrollView) + + self.scrollView.addSubview(self.pager.view) + + self.backgroundColor = UIColor.black + } + + func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + self.containerLayout = layout + + let previousContentHeight = self.scrollView.contentSize.height + let previousVerticalOffset = self.scrollView.contentOffset.y + + self.scrollView.frame = CGRect(origin: CGPoint(), size: layout.size) + self.scrollView.contentSize = CGSize(width: 0.0, height: layout.size.height * 3.0) + + if previousContentHeight.isEqual(to: 0.0) { + self.scrollView.contentOffset = CGPoint(x: 0.0, y: self.scrollView.contentSize.height / 3.0) + } else { + self.scrollView.contentOffset = CGPoint(x: 0.0, y: previousVerticalOffset * self.scrollView.contentSize.height / previousContentHeight) + } + + self.pager.frame = CGRect(origin: CGPoint(x: 0.0, y: layout.size.height), size: layout.size) + self.pager.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: transition) + } + + func animateIn(animateContent: Bool) { + self.backgroundColor = self.backgroundColor?.withAlphaComponent(0.0) + self.statusBar?.alpha = 0.0 + self.navigationBar?.alpha = 0.0 + UIView.animate(withDuration: 0.2, animations: { + self.backgroundColor = self.backgroundColor?.withAlphaComponent(1.0) + self.statusBar?.alpha = 1.0 + self.navigationBar?.alpha = 1.0 + }) + + if animateContent { + self.scrollView.layer.animateBounds(from: self.scrollView.layer.bounds.offsetBy(dx: 0.0, dy: -self.scrollView.layer.bounds.size.height), to: self.scrollView.layer.bounds, duration: 0.4, timingFunction: kCAMediaTimingFunctionSpring) + } + } + + func animateOut(animateContent: Bool, completion: @escaping () -> Void) { + var contentAnimationCompleted = true + var interfaceAnimationCompleted = false + + let intermediateCompletion = { + if contentAnimationCompleted && interfaceAnimationCompleted { + completion() + } + } + + UIView.animate(withDuration: 0.25, animations: { + self.backgroundColor = self.backgroundColor?.withAlphaComponent(0.0) + self.statusBar?.alpha = 0.0 + self.navigationBar?.alpha = 0.0 + }, completion: { _ in + interfaceAnimationCompleted = true + intermediateCompletion() + }) + + if animateContent { + contentAnimationCompleted = false + self.scrollView.layer.animateBounds(from: self.scrollView.layer.bounds, to: self.scrollView.layer.bounds.offsetBy(dx: 0.0, dy: -self.scrollView.layer.bounds.size.height), duration: 0.25, timingFunction: kCAMediaTimingFunctionLinear, removeOnCompletion: false, completion: { _ in + contentAnimationCompleted = true + intermediateCompletion() + }) + } + } + + func scrollViewDidScroll(_ scrollView: UIScrollView) { + let distanceFromEquilibrium = scrollView.contentOffset.y - scrollView.contentSize.height / 3.0 + + let transition = 1.0 - min(1.0, max(0.0, abs(distanceFromEquilibrium) / 50.0)) + let backgroundTransition = 1.0 - min(1.0, max(0.0, abs(distanceFromEquilibrium) / 80.0)) + self.backgroundColor = self.backgroundColor?.withAlphaComponent(backgroundTransition) + + if !self.areControlsHidden { + self.statusBar?.alpha = transition + self.navigationBar?.alpha = transition + } + } + + func scrollViewWillEndDragging(_ scrollView: UIScrollView, withVelocity velocity: CGPoint, targetContentOffset: UnsafeMutablePointer) { + targetContentOffset.pointee = scrollView.contentOffset + + if abs(velocity.y) > 1.0 { + self.layer.animate(from: self.layer.backgroundColor!, to: UIColor(white: 0.0, alpha: 0.0).cgColor, keyPath: "backgroundColor", timingFunction: kCAMediaTimingFunctionLinear, duration: 0.2, removeOnCompletion: false) + + var interfaceAnimationCompleted = false + var contentAnimationCompleted = true + + let completion = { [weak self] in + if interfaceAnimationCompleted && contentAnimationCompleted { + if let dismiss = self?.dismiss { + dismiss() + } + } + } + + if let centralItemNode = self.pager.centralItemNode(), let transitionNodeForCentralItem = self.transitionNodeForCentralItem, let node = transitionNodeForCentralItem() { + contentAnimationCompleted = false + centralItemNode.animateOut(to: node, completion: { + contentAnimationCompleted = true + completion() + }) + } + + self.animateOut(animateContent: false, completion: { + interfaceAnimationCompleted = true + completion() + }) + + if contentAnimationCompleted { + contentAnimationCompleted = false + self.scrollView.layer.animateBounds(from: self.scrollView.layer.bounds, to: self.scrollView.layer.bounds.offsetBy(dx: 0.0, dy: self.scrollView.layer.bounds.size.height * (velocity.y < 0.0 ? -1.0 : 1.0)), duration: 0.2, timingFunction: kCAMediaTimingFunctionLinear, removeOnCompletion: false, completion: { _ in + contentAnimationCompleted = true + completion() + }) + } + } else { + self.scrollView.setContentOffset(CGPoint(x: 0.0, y: self.scrollView.contentSize.height / 3.0), animated: true) + } + } +} diff --git a/TelegramUI/GalleryItem.swift b/TelegramUI/GalleryItem.swift new file mode 100644 index 0000000000..7a6a3f4d64 --- /dev/null +++ b/TelegramUI/GalleryItem.swift @@ -0,0 +1,8 @@ +import Foundation + + + +protocol GalleryItem { + func node() -> GalleryItemNode + func updateNode(node: GalleryItemNode) +} diff --git a/TelegramUI/GalleryItemNode.swift b/TelegramUI/GalleryItemNode.swift new file mode 100644 index 0000000000..5583760b70 --- /dev/null +++ b/TelegramUI/GalleryItemNode.swift @@ -0,0 +1,59 @@ +import Foundation +import AsyncDisplayKit +import Display +import SwiftSignalKit + +public enum GalleryItemNodeNavigationStyle { + case light + case dark +} + +open class GalleryItemNode: ASDisplayNode { + private var _index: Int? + var index: Int { + get { + return self._index! + } set(value) { + self._index = value + } + } + + var toggleControlsVisibility: () -> Void = { } + + override init() { + super.init(viewBlock: { + return UITracingLayerView() + }, didLoad: nil) + } + + open func ready() -> Signal { + return .single(Void()) + } + + open func title() -> Signal { + return .single("") + } + + open func titleView() -> Signal { + return .single(nil) + } + + open func navigationStyle() -> Signal { + return .single(.dark) + } + + open func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + } + + open func centralityUpdated(isCentral: Bool) { + } + + open func visibilityUpdated(isVisible: Bool) { + } + + open func animateIn(from node: ASDisplayNode) { + } + + open func animateOut(to node: ASDisplayNode, completion: @escaping () -> Void) { + } +} diff --git a/TelegramUI/GalleryPagerNode.swift b/TelegramUI/GalleryPagerNode.swift new file mode 100644 index 0000000000..c16c4913a6 --- /dev/null +++ b/TelegramUI/GalleryPagerNode.swift @@ -0,0 +1,253 @@ +import Foundation +import AsyncDisplayKit +import Display +import SwiftSignalKit + +final class GalleryPagerNode: ASDisplayNode, UIScrollViewDelegate { + private let pageGap: CGFloat = 20.0 + + private let scrollView: UIScrollView + + private var items: [GalleryItem] = [] + private var itemNodes: [GalleryItemNode] = [] + private var centralItemIndex: Int? { + didSet { + if oldValue != self.centralItemIndex { + self.centralItemIndexUpdated(self.centralItemIndex) + } + } + } + + private var containerLayout: (ContainerViewLayout, CGFloat)? + + var centralItemIndexUpdated: (Int?) -> Void = { _ in } + var toggleControlsVisibility: () -> Void = { } + + override init() { + self.scrollView = UIScrollView() + + super.init() + + self.scrollView.showsVerticalScrollIndicator = false + self.scrollView.showsHorizontalScrollIndicator = false + self.scrollView.alwaysBounceHorizontal = true + self.scrollView.isPagingEnabled = true + self.scrollView.delegate = self + self.scrollView.clipsToBounds = false + self.scrollView.scrollsToTop = false + self.view.addSubview(self.scrollView) + } + + func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + self.containerLayout = (layout, navigationBarHeight) + + var previousCentralNodeHorizontalOffset: CGFloat? + if let centralItemIndex = self.centralItemIndex, let centralNode = self.visibleItemNode(at: centralItemIndex) { + previousCentralNodeHorizontalOffset = self.scrollView.contentOffset.x - centralNode.frame.minX + } + + self.scrollView.frame = CGRect(origin: CGPoint(x: -self.pageGap, y: 0.0), size: CGSize(width: layout.size.width + self.pageGap * 2.0, height: layout.size.height)) + + for i in 0 ..< self.itemNodes.count { + self.itemNodes[i].frame = CGRect(origin: CGPoint(x: CGFloat(i) * self.scrollView.bounds.size.width + self.pageGap, y: 0.0), size: CGSize(width: self.scrollView.bounds.size.width - self.pageGap * 2.0, height: self.scrollView.bounds.size.height)) + self.itemNodes[i].containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: transition) + } + + if let previousCentralNodeHorizontalOffset = previousCentralNodeHorizontalOffset, let centralItemIndex = self.centralItemIndex, let centralNode = self.visibleItemNode(at: centralItemIndex) { + self.scrollView.contentOffset = CGPoint(x: centralNode.frame.minX + previousCentralNodeHorizontalOffset, y: 0.0) + } + + self.updateItemNodes() + } + + func ready() -> Signal { + if let itemNode = self.centralItemNode() { + return itemNode.ready() + } + return .single(Void()) + } + + func centralItemNode() -> GalleryItemNode? { + if let centralItemIndex = self.centralItemIndex, let centralItemNode = self.visibleItemNode(at: centralItemIndex) { + return centralItemNode + } else { + return nil + } + } + + func replaceItems(_ items: [GalleryItem], centralItemIndex: Int?) { + for itemNode in self.itemNodes { + itemNode.removeFromSupernode() + } + self.itemNodes.removeAll() + if let centralItemIndex = centralItemIndex, centralItemIndex >= 0 && centralItemIndex < items.count { + self.centralItemIndex = centralItemIndex + } else { + self.centralItemIndex = nil + } + self.items = items + + self.updateItemNodes() + } + + private func makeNodeForItem(at index: Int) -> GalleryItemNode { + let node = self.items[index].node() + node.toggleControlsVisibility = self.toggleControlsVisibility + node.index = index + return node + } + + private func visibleItemNode(at index: Int) -> GalleryItemNode? { + for itemNode in self.itemNodes { + if itemNode.index == index { + return itemNode + } + } + return nil + } + + private func addVisibleItemNode(_ node: GalleryItemNode) { + var added = false + for i in 0 ..< self.itemNodes.count { + if node.index < self.itemNodes[i].index { + self.itemNodes.insert(node, at: i) + added = true + break + } + } + if !added { + self.itemNodes.append(node) + } + self.scrollView.addSubview(node.view) + } + + private func removeVisibleItemNode(internalIndex: Int) { + self.itemNodes[internalIndex].view.removeFromSuperview() + self.itemNodes.remove(at: internalIndex) + } + + private func updateItemNodes() { + if self.items.isEmpty { + return + } + + var resetOffsetToCentralItem = false + if self.itemNodes.isEmpty { + let node = self.makeNodeForItem(at: self.centralItemIndex ?? 0) + node.frame = CGRect(origin: CGPoint(), size: scrollView.bounds.size) + if let containerLayout = self.containerLayout { + node.containerLayoutUpdated(containerLayout.0, navigationBarHeight: containerLayout.1, transition: .immediate) + } + self.addVisibleItemNode(node) + self.centralItemIndex = node.index + resetOffsetToCentralItem = true + } + + if let centralItemIndex = self.centralItemIndex, let centralItemNode = self.visibleItemNode(at: centralItemIndex) { + if centralItemIndex != 0 { + if self.visibleItemNode(at: centralItemIndex - 1) == nil { + let node = self.makeNodeForItem(at: centralItemIndex - 1) + node.frame = centralItemNode.frame.offsetBy(dx: -centralItemNode.frame.size.width - self.pageGap, dy: 0.0) + if let containerLayout = self.containerLayout { + node.containerLayoutUpdated(containerLayout.0, navigationBarHeight: containerLayout.1, transition: .immediate) + } + self.addVisibleItemNode(node) + } + } + + if centralItemIndex != items.count - 1 { + if self.visibleItemNode(at: centralItemIndex + 1) == nil { + let node = self.makeNodeForItem(at: centralItemIndex + 1) + node.frame = centralItemNode.frame.offsetBy(dx: centralItemNode.frame.size.width + self.pageGap, dy: 0.0) + if let containerLayout = self.containerLayout { + node.containerLayoutUpdated(containerLayout.0, navigationBarHeight: containerLayout.1, transition: .immediate) + } + self.addVisibleItemNode(node) + } + } + + for i in 0 ..< self.itemNodes.count { + self.itemNodes[i].frame = CGRect(origin: CGPoint(x: CGFloat(i) * self.scrollView.bounds.size.width + self.pageGap, y: 0.0), size: CGSize(width: self.scrollView.bounds.size.width - self.pageGap * 2.0, height: self.scrollView.bounds.size.height)) + } + + if resetOffsetToCentralItem { + self.scrollView.contentOffset = CGPoint(x: centralItemNode.frame.minX - self.pageGap, y: 0.0) + } + + if let centralItemCandidateNode = self.centralItemCandidate(), centralItemCandidateNode.index != centralItemIndex { + for i in (0 ..< self.itemNodes.count).reversed() { + let node = self.itemNodes[i] + if node.index < centralItemCandidateNode.index - 1 || node.index > centralItemCandidateNode.index + 1 { + self.removeVisibleItemNode(internalIndex: i) + } + } + + self.centralItemIndex = centralItemCandidateNode.index + + if centralItemCandidateNode.index != 0 { + if self.visibleItemNode(at: centralItemCandidateNode.index - 1) == nil { + let node = self.makeNodeForItem(at: centralItemCandidateNode.index - 1) + node.frame = centralItemCandidateNode.frame.offsetBy(dx: -centralItemCandidateNode.frame.size.width - self.pageGap, dy: 0.0) + if let containerLayout = self.containerLayout { + node.containerLayoutUpdated(containerLayout.0, navigationBarHeight: containerLayout.1, transition: .immediate) + } + self.addVisibleItemNode(node) + } + } + + if centralItemCandidateNode.index != items.count - 1 { + if self.visibleItemNode(at: centralItemCandidateNode.index + 1) == nil { + let node = self.makeNodeForItem(at: centralItemCandidateNode.index + 1) + node.frame = centralItemCandidateNode.frame.offsetBy(dx: centralItemCandidateNode.frame.size.width + self.pageGap, dy: 0.0) + if let containerLayout = self.containerLayout { + node.containerLayoutUpdated(containerLayout.0, navigationBarHeight: containerLayout.1, transition: .immediate) + } + self.addVisibleItemNode(node) + } + } + + let previousCentralCandidateHorizontalOffset = self.scrollView.contentOffset.x - centralItemCandidateNode.frame.minX + + for i in 0 ..< self.itemNodes.count { + self.itemNodes[i].frame = CGRect(origin: CGPoint(x: CGFloat(i) * self.scrollView.bounds.size.width + self.pageGap, y: 0.0), size: CGSize(width: self.scrollView.bounds.size.width - self.pageGap * 2.0, height: self.scrollView.bounds.size.height)) + } + + self.scrollView.contentOffset = CGPoint(x: centralItemCandidateNode.frame.minX + previousCentralCandidateHorizontalOffset, y: 0.0) + } + + self.scrollView.contentSize = CGSize(width: CGFloat(self.itemNodes.count) * self.scrollView.bounds.size.width, height: self.scrollView.bounds.size.height) + } else { + assertionFailure() + } + + for itemNode in self.itemNodes { + itemNode.centralityUpdated(isCentral: itemNode.index == self.centralItemIndex) + itemNode.visibilityUpdated(isVisible: self.scrollView.bounds.intersects(itemNode.frame)) + } + } + + func scrollViewDidScroll(_ scrollView: UIScrollView) { + self.updateItemNodes() + } + + private func centralItemCandidate() -> GalleryItemNode? { + let hotizontlOffset = self.scrollView.contentOffset.x + self.pageGap + var closestNodeAndDistance: (Int, CGFloat)? + for i in 0 ..< self.itemNodes.count { + let node = self.itemNodes[i] + let distance = abs(node.frame.minX - hotizontlOffset) + if let currentClosestNodeAndDistance = closestNodeAndDistance { + if distance < currentClosestNodeAndDistance.1 { + closestNodeAndDistance = (node.index, distance) + } + } else { + closestNodeAndDistance = (node.index, distance) + } + } + if let closestNodeAndDistance = closestNodeAndDistance { + return self.visibleItemNode(at: closestNodeAndDistance.0) + } else { + return nil + } + } +} diff --git a/TelegramUI/HorizontalPeerItem.swift b/TelegramUI/HorizontalPeerItem.swift new file mode 100644 index 0000000000..3e1ccd5300 --- /dev/null +++ b/TelegramUI/HorizontalPeerItem.swift @@ -0,0 +1,80 @@ +import Foundation +import Display +import Postbox +import AsyncDisplayKit +import TelegramCore + +final class HorizontalPeerItem: ListViewItem { + let account: Account + let peer: Peer + let action: (PeerId) -> Void + + init(account: Account, peer: Peer, action: @escaping (PeerId) -> Void) { + self.account = account + self.peer = peer + self.action = action + } + + func nodeConfiguredForWidth(async: @escaping (@escaping () -> Void) -> Void, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, completion: @escaping (ListViewItemNode, @escaping () -> Void) -> Void) { + async { + let node = HorizontalPeerItemNode() + node.contentSize = CGSize(width: 92.0, height: 80.0) + node.insets = UIEdgeInsets(top: 0.0, left: 0.0, bottom: 0.0, right: 0.0) + node.update(account: self.account, peer: self.peer) + node.action = self.action + completion(node, { + }) + } + } + + func updateNode(async: @escaping (@escaping () -> Void) -> Void, node: ListViewItemNode, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, animation: ListViewItemUpdateAnimation, completion: @escaping (ListViewItemNodeLayout, @escaping () -> Void) -> Void) { + completion(ListViewItemNodeLayout(contentSize: node.contentSize, insets: node.insets), { + }) + } +} + +private final class HorizontalPeerItemNode: ListViewItemNode { + private let avatarNode: ChatListAvatarNode + private let titleNode: ASTextNode + private var peer: Peer? + fileprivate var action: ((PeerId) -> Void)? + + init() { + self.avatarNode = ChatListAvatarNode(font: Font.regular(14.0)) + //self.avatarNode.transform = CATransform3DMakeRotation(CGFloat(M_PI / 2.0), 0.0, 0.0, 1.0) + self.avatarNode.frame = CGRect(origin: CGPoint(x: floor((92.0 - 60.0) / 2.0), y: 4.0), size: CGSize(width: 60.0, height: 60.0)) + + self.titleNode = ASTextNode() + //self.titleNode.transform = CATransform3DMakeRotation(CGFloat(M_PI / 2.0), 0.0, 0.0, 1.0) + + super.init(layerBacked: false, dynamicBounce: false) + + self.addSubnode(self.avatarNode) + self.addSubnode(self.titleNode) + } + + override func didLoad() { + super.didLoad() + + self.layer.sublayerTransform = CATransform3DMakeRotation(CGFloat(M_PI / 2.0), 0.0, 0.0, 1.0) + + self.view.addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(self.tapGesture(_:)))) + } + + func update(account: Account, peer: Peer) { + self.peer = peer + self.avatarNode.setPeer(account: account, peer: peer) + self.titleNode.attributedText = NSAttributedString(string: peer.compactDisplayTitle, font: Font.regular(11.0), textColor: UIColor.black) + let titleSize = self.titleNode.measure(CGSize(width: 84.0, height: CGFloat.infinity)) + self.titleNode.frame = CGRect(origin: CGPoint(x: floor((92.0 - titleSize.width) / 2.0), y: 4.0 + 60.0 + 6.0), size: titleSize) + } + + @objc private func tapGesture(_ recognizer: UITapGestureRecognizer) { + if case .ended = recognizer.state { + if let peer = self.peer, let action = self.action { + action(peer.id) + } + } + } +} + diff --git a/TelegramUI/ImageNode.swift b/TelegramUI/ImageNode.swift new file mode 100644 index 0000000000..ea99d01507 --- /dev/null +++ b/TelegramUI/ImageNode.swift @@ -0,0 +1,109 @@ +import Foundation +import AsyncDisplayKit +import SwiftSignalKit +import Display + +public let displayLinkDispatcher = DisplayLinkDispatcher() +private let dispatcher = displayLinkDispatcher + +public enum ImageCorner: Equatable { + case Corner(CGFloat) + case Tail(CGFloat) + + public var extendedInsets: CGSize { + switch self { + case .Tail: + return CGSize(width: 3.0, height: 0.0) + default: + return CGSize() + } + } +} + +public func ==(lhs: ImageCorner, rhs: ImageCorner) -> Bool { + switch lhs { + case let .Corner(lhsRadius): + switch rhs { + case let .Corner(rhsRadius) where abs(lhsRadius - rhsRadius) < CGFloat(FLT_EPSILON): + return true + default: + return false + } + case let .Tail(lhsRadius): + switch rhs { + case let .Tail(rhsRadius) where abs(lhsRadius - rhsRadius) < CGFloat(FLT_EPSILON): + return true + default: + return false + } + } +} + +public struct ImageCorners: Equatable { + public let topLeft: ImageCorner + public let topRight: ImageCorner + public let bottomLeft: ImageCorner + public let bottomRight: ImageCorner + + public init(radius: CGFloat) { + self.topLeft = .Corner(radius) + self.topRight = .Corner(radius) + self.bottomLeft = .Corner(radius) + self.bottomRight = .Corner(radius) + } + + public init(topLeft: ImageCorner, topRight: ImageCorner, bottomLeft: ImageCorner, bottomRight: ImageCorner) { + self.topLeft = topLeft + self.topRight = topRight + self.bottomLeft = bottomLeft + self.bottomRight = bottomRight + } + + public init() { + self.init(topLeft: .Corner(0.0), topRight: .Corner(0.0), bottomLeft: .Corner(0.0), bottomRight: .Corner(0.0)) + } + + public var extendedEdges: UIEdgeInsets { + let left = self.bottomLeft.extendedInsets.width + let right = self.bottomRight.extendedInsets.width + + return UIEdgeInsets(top: 0.0, left: left, bottom: 0.0, right: right) + } +} + +public func ==(lhs: ImageCorners, rhs: ImageCorners) -> Bool { + return lhs.topLeft == rhs.topLeft && lhs.topRight == rhs.topRight && lhs.bottomLeft == rhs.bottomLeft && lhs.bottomRight == rhs.bottomRight +} + +public class ImageNode: ASDisplayNode { + private var disposable = MetaDisposable() + + override init() { + super.init() + } + + public func setSignal(_ signal: Signal) { + var first = true + self.disposable.set((signal |> deliverOnMainQueue).start(next: {[weak self] next in + dispatcher.dispatch { + if let strongSelf = self { + strongSelf.contents = next.cgImage + if first { + first = false + if strongSelf.isNodeLoaded { + strongSelf.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.18) + } + } + } + } + })) + } + + public override func clearContents() { + super.clearContents() + + self.contents = nil + self.disposable.set(nil) + } +} + diff --git a/TelegramUI/ImageRepresentationsUtils.swift b/TelegramUI/ImageRepresentationsUtils.swift new file mode 100644 index 0000000000..784919d36e --- /dev/null +++ b/TelegramUI/ImageRepresentationsUtils.swift @@ -0,0 +1,39 @@ +import TelegramCore + +func smallestImageRepresentation(_ representations: [TelegramMediaImageRepresentation]) -> TelegramMediaImageRepresentation? { + if representations.count == 0 { + return nil + } else { + var dimensions = representations[0].dimensions + var index = 0 + + for i in 1 ..< representations.count { + let representationDimensions = representations[i].dimensions + if representationDimensions.width < dimensions.width && representationDimensions.height < dimensions.height { + dimensions = representationDimensions + index = i + } + } + + return representations[index] + } +} + +func largestImageRepresentation(_ representations: [TelegramMediaImageRepresentation]) -> TelegramMediaImageRepresentation? { + if representations.count == 0 { + return nil + } else { + var dimensions = representations[0].dimensions + var index = 0 + + for i in 1 ..< representations.count { + let representationDimensions = representations[i].dimensions + if representationDimensions.width > dimensions.width && representationDimensions.height > dimensions.height { + dimensions = representationDimensions + index = i + } + } + + return representations[index] + } +} diff --git a/TelegramUI/Info.plist b/TelegramUI/Info.plist index 9b89d8a15e..fbe1e6b314 100644 --- a/TelegramUI/Info.plist +++ b/TelegramUI/Info.plist @@ -4,8 +4,6 @@ CFBundleDevelopmentRegion en - CFBundleDisplayName - TelegramUI CFBundleExecutable $(EXECUTABLE_NAME) CFBundleIdentifier diff --git a/TelegramUI/ListController.swift b/TelegramUI/ListController.swift new file mode 100644 index 0000000000..395ba7d2dc --- /dev/null +++ b/TelegramUI/ListController.swift @@ -0,0 +1,30 @@ +import Foundation +import UIKit +import Display +import AsyncDisplayKit + +public class ListController: ViewController { + public var items: [ListControllerItem] = [] + + public var listDisplayNode: ListControllerNode { + get { + return super.displayNode as! ListControllerNode + } + } + + override public func loadDisplayNode() { + self.displayNode = ListControllerNode() + + self.displayNode.backgroundColor = UIColor(0xefeff4) + + self.listDisplayNode.listView.deleteAndInsertItems(deleteIndices: [], insertIndicesAndItems: (0 ..< self.items.count).map({ ListViewInsertItem(index: $0, previousIndex: nil, item: self.items[$0], directionHint: .Down) }), updateIndicesAndItems: [], options: []) + + self.displayNodeDidLoad() + } + + override public func containerLayoutUpdated(_ layout: ContainerViewLayout, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, transition: transition) + + self.listDisplayNode.containerLayoutUpdated(layout, navigationBarHeight: self.navigationBar.frame.maxY, transition: transition) + } +} diff --git a/TelegramUI/ListControllerButtonItem.swift b/TelegramUI/ListControllerButtonItem.swift new file mode 100644 index 0000000000..2a6d6442f4 --- /dev/null +++ b/TelegramUI/ListControllerButtonItem.swift @@ -0,0 +1,60 @@ +import Foundation +import UIKit +import Display + +private let titleFont = Font.regular(17.0) + +class ListControllerButtonItem: ListControllerGroupableItem { + fileprivate let title: String + fileprivate let action: () -> () + fileprivate let color: UIColor + + let selectable: Bool = true + + init(title: String, action: @escaping () -> (), color: UIColor = .blue) { + self.title = title + self.action = action + self.color = color + } + + func setupNode(async: @escaping (@escaping () -> Void) -> Void, completion: @escaping (ListControllerGroupableItemNode) -> Void) { + let node = ListControllerButtonItemNode() + completion(node) + } + + func selected() { + self.action() + } +} + +class ListControllerButtonItemNode: ListControllerGroupableItemNode { + let label: TextNode + + override init() { + self.label = TextNode() + + super.init() + + self.label.isLayerBacked = true + self.addSubnode(self.label) + } + + override func asyncLayoutContent() -> (_ item: ListControllerGroupableItem, _ width: CGFloat) -> (CGSize, () -> Void) { + let layoutLabel = TextNode.asyncLayout(self.label) + return { item, width in + if let item = item as? ListControllerButtonItem { + let (labelLayout, labelApply) = layoutLabel(NSAttributedString(string: item.title, font: titleFont, textColor: item.color), nil, 1, .end, CGSize(width: width - 20, height: CGFloat.greatestFiniteMagnitude), nil) + return (CGSize(width: width, height: 44.0), { [weak self] in + if let strongSelf = self { + let _ = labelApply() + + strongSelf.label.frame = CGRect(origin: CGPoint(x: 16.0, y: floorToScreenPixels((44.0 - labelLayout.size.height) / 2.0)), size: labelLayout.size) + } + }) + } else { + return (CGSize(width: width, height: 0.0), { + }) + } + } + } +} diff --git a/TelegramUI/ListControllerDisclosureActionItem.swift b/TelegramUI/ListControllerDisclosureActionItem.swift new file mode 100644 index 0000000000..f464e6eeb0 --- /dev/null +++ b/TelegramUI/ListControllerDisclosureActionItem.swift @@ -0,0 +1,80 @@ +import Foundation +import UIKit +import Display +import AsyncDisplayKit + +private let titleFont = Font.regular(17.0) + +private func generateDisclosureIconImage(color: UIColor) -> UIImage? { + return generateImage(CGSize(width: 8.0, height: 14.0), contextGenerator: { size, context -> Void in + context.clear(CGRect(origin: CGPoint(), size: size)) + context.setFillColor(color.cgColor) + let _ = try? drawSvgPath(context, path: "M6.36396103,7.4746212 L7.4246212,6.41396103 L1.06066017,0.0500000007 L0,1.11066017 L6.36396103,7.4746212 Z M1.06066017,12.9697384 L7.4246212,6.60577736 L6.36396103,5.54511719 L0,11.9090782 L1.06066017,12.9697384 L1.06066017,12.9697384 Z") + }) +} + +private let disclosureIconImage = generateDisclosureIconImage(color: UIColor(0xc6c6ca)) + +class ListControllerDisclosureActionItem: ListControllerGroupableItem { + fileprivate let title: String + private let action: () -> () + + let selectable: Bool = true + + init(title: String, action: @escaping () -> ()) { + self.title = title + self.action = action + } + + func setupNode(async: @escaping (@escaping () -> Void) -> Void, completion: @escaping (ListControllerGroupableItemNode) -> Void) { + let node = ListControllerDisclosureActionItemNode() + completion(node) + } + + func selected() { + self.action() + } +} + +class ListControllerDisclosureActionItemNode: ListControllerGroupableItemNode { + let label: TextNode + let disclosureIcon: ASDisplayNode + + override init() { + self.label = TextNode() + self.label.isLayerBacked = true + + self.disclosureIcon = ASDisplayNode() + if let disclosureIconImage = disclosureIconImage { + self.disclosureIcon.frame = CGRect(origin: CGPoint(), size: disclosureIconImage.size) + self.disclosureIcon.contents = disclosureIconImage.cgImage + } + self.disclosureIcon.isLayerBacked = true + + super.init() + + self.addSubnode(self.label) + self.addSubnode(self.disclosureIcon) + } + + override func asyncLayoutContent() -> (_ item: ListControllerGroupableItem, _ width: CGFloat) -> (CGSize, () -> Void) { + let layoutLabel = TextNode.asyncLayout(self.label) + return { item, width in + if let item = item as? ListControllerDisclosureActionItem { + let (labelLayout, labelApply) = layoutLabel(NSAttributedString(string: item.title, font: titleFont, textColor: UIColor.black), nil, 1, .end, CGSize(width: width - 20, height: CGFloat.greatestFiniteMagnitude), nil) + return (CGSize(width: width, height: 44.0), { [weak self] in + if let strongSelf = self { + let _ = labelApply() + let disclosureSize = strongSelf.disclosureIcon.bounds.size + strongSelf.disclosureIcon.frame = CGRect(origin: CGPoint(x: width - 15.0 - disclosureSize.width, y: floorToScreenPixels((44.0 - disclosureSize.height) / 2.0)), size: disclosureSize) + + strongSelf.label.frame = CGRect(origin: CGPoint(x: 16.0, y: floorToScreenPixels((44.0 - labelLayout.size.height) / 2.0 + 0.5)), size: labelLayout.size) + } + }) + } else { + return (CGSize(width: width, height: 0.0), { + }) + } + } + } +} diff --git a/TelegramUI/ListControllerGroupableItem.swift b/TelegramUI/ListControllerGroupableItem.swift new file mode 100644 index 0000000000..6763daf7a0 --- /dev/null +++ b/TelegramUI/ListControllerGroupableItem.swift @@ -0,0 +1,143 @@ +import Foundation +import UIKit +import Display +import AsyncDisplayKit +import SwiftSignalKit + +private let separatorHeight = 1.0 / UIScreen.main.scale + +protocol ListControllerGroupableItem: ListControllerItem { + func setupNode(async: @escaping (@escaping () -> Void) -> Void, completion: @escaping (ListControllerGroupableItemNode) -> Void) +} + +extension ListControllerGroupableItem { + func nodeConfiguredForWidth(async: @escaping (@escaping() -> Void) -> Void, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, completion: @escaping(ListViewItemNode, @escaping() -> Void) -> Void) { + self.setupNode(async: async, completion: { node in + let asyncLayout = node.asyncLayout() + let (layout, apply) = asyncLayout(self, width, previousItem is ListControllerGroupableItem, nextItem is ListControllerGroupableItem) + node.contentSize = layout.contentSize + node.insets = layout.insets + completion(node, apply) + }) + } + + func updateNode(async: @escaping (@escaping () -> Void) -> Void, node: ListViewItemNode, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, animation: ListViewItemUpdateAnimation, completion: @escaping (ListViewItemNodeLayout, @escaping () -> Void) -> Void) { + if let node = node as? ListControllerGroupableItemNode { + Queue.mainQueue().async { + let asyncLayout = node.asyncLayout() + async { + let (layout, apply) = asyncLayout(self, width, previousItem is ListControllerGroupableItem, nextItem is ListControllerGroupableItem) + Queue.mainQueue().async { + completion(layout, apply) + } + } + } + } + } +} + +class ListControllerGroupableItemNode: ListViewItemNode { + private let backgroundNode: ASDisplayNode + private let highlightedBackgroundNode: ASDisplayNode + + private let topStripeNode: ASDisplayNode + private let bottomStripeNode: ASDisplayNode + + init() { + self.backgroundNode = ASDisplayNode() + self.backgroundNode.backgroundColor = UIColor.white + self.backgroundNode.isLayerBacked = true + + self.highlightedBackgroundNode = ASDisplayNode() + self.highlightedBackgroundNode.backgroundColor = UIColor(0xd9d9d9) + self.highlightedBackgroundNode.isLayerBacked = true + + self.topStripeNode = ASDisplayNode() + self.topStripeNode.backgroundColor = UIColor(0xc8c7cc) + self.topStripeNode.isLayerBacked = true + + self.bottomStripeNode = ASDisplayNode() + self.bottomStripeNode.backgroundColor = UIColor(0xc8c7cc) + self.bottomStripeNode.isLayerBacked = true + + super.init(layerBacked: false, dynamicBounce: false) + + self.addSubnode(self.backgroundNode) + self.addSubnode(self.topStripeNode) + self.addSubnode(self.bottomStripeNode) + } + + override func layoutForWidth(_ width: CGFloat, item: ListViewItem, previousItem: ListViewItem?, nextItem: ListViewItem?) { + if let item = item as? ListControllerGroupableItem { + let layout = self.asyncLayout() + let (_, apply) = layout(item, width, previousItem is ListControllerGroupableItem, nextItem is ListControllerGroupableItem) + apply() + } + } + + func updateBackgroundAndSeparatorsLayout(groupBottom: Bool) { + let size = self.bounds.size + let insets = self.insets + + self.backgroundNode.frame = CGRect(origin: CGPoint(x: 0.0, y: -insets.top), size: CGSize(width: size.width, height: size.height)) + self.highlightedBackgroundNode.frame = CGRect(origin: CGPoint(x: 0.0, y: -separatorHeight), size: CGSize(width: size.width, height: size.height + separatorHeight - insets.top)) + self.topStripeNode.frame = CGRect(origin: CGPoint(x: 0.0, y: -insets.top), size: CGSize(width: size.width, height: separatorHeight)) + let bottomStripeInset: CGFloat = groupBottom ? 16.0 : 0.0 + self.bottomStripeNode.frame = CGRect(origin: CGPoint(x: bottomStripeInset, y: size.height - insets.top - separatorHeight), size: CGSize(width: size.width - bottomStripeInset, height: separatorHeight)) + } + + func asyncLayoutContent() -> (_ item: ListControllerGroupableItem, _ width: CGFloat) -> (CGSize, () -> Void) { + return { _, width in + return (CGSize(width: width, height: 0.0), { + }) + } + } + + fileprivate func asyncLayout() -> (_ item: ListControllerGroupableItem, _ width: CGFloat, _ groupedTop: Bool, _ groupedBottom: Bool) -> (ListViewItemNodeLayout, () -> Void) { + let contentLayout = self.asyncLayoutContent() + + return { item, width, groupedTop, groupedBottom in + let (contentSize, contentApply) = contentLayout(item, width) + + let insets = UIEdgeInsets(top: groupedTop ? 0.0 : separatorHeight, left: 0.0, bottom: separatorHeight, right: 0.0) + let layout = ListViewItemNodeLayout(contentSize: CGSize(width: width, height: contentSize.height), insets: insets) + + return (layout, { [weak self] in + if let strongSelf = self { + contentApply() + + strongSelf.topStripeNode.isHidden = groupedTop + strongSelf.contentSize = layout.contentSize + strongSelf.insets = layout.insets + strongSelf.updateBackgroundAndSeparatorsLayout(groupBottom: groupedBottom) + } + }) + } + } + + override func setHighlighted(_ highlighted: Bool, animated: Bool) { + super.setHighlighted(highlighted, animated: animated) + + if highlighted { + self.highlightedBackgroundNode.alpha = 1.0 + if self.highlightedBackgroundNode.supernode == nil { + self.insertSubnode(self.highlightedBackgroundNode, aboveSubnode: self.bottomStripeNode) + } + } else { + if self.highlightedBackgroundNode.supernode != nil { + if animated { + self.highlightedBackgroundNode.layer.animateAlpha(from: self.highlightedBackgroundNode.alpha, to: 0.0, duration: 0.4, completion: { [weak self] completed in + if let strongSelf = self { + if completed { + strongSelf.highlightedBackgroundNode.removeFromSupernode() + } + } + }) + self.highlightedBackgroundNode.alpha = 0.0 + } else { + self.highlightedBackgroundNode.removeFromSupernode() + } + } + } + } +} diff --git a/TelegramUI/ListControllerItem.swift b/TelegramUI/ListControllerItem.swift new file mode 100644 index 0000000000..db2871d041 --- /dev/null +++ b/TelegramUI/ListControllerItem.swift @@ -0,0 +1,4 @@ +import Display + +public protocol ListControllerItem: ListViewItem { +} diff --git a/TelegramUI/ListControllerNode.swift b/TelegramUI/ListControllerNode.swift new file mode 100644 index 0000000000..b4ae02b8ae --- /dev/null +++ b/TelegramUI/ListControllerNode.swift @@ -0,0 +1,39 @@ +import Foundation +import AsyncDisplayKit +import Display + +public class ListControllerNode: ASDisplayNode { + let listView: ListView + + override init() { + self.listView = ListView() + + super.init() + + self.addSubnode(self.listView) + } + + func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + var duration: Double = 0.0 + var curve: UInt = 0 + switch transition { + case .immediate: + break + case let .animated(animationDuration, animationCurve): + duration = animationDuration + switch animationCurve { + case .easeInOut: + break + case .spring: + curve = 7 + } + } + + var insets = layout.insets(options: [.input]) + insets.top += navigationBarHeight + + self.listView.bounds = CGRect(x: 0.0, y: 0.0, width: layout.size.width, height: layout.size.height) + self.listView.position = CGPoint(x: layout.size.width / 2.0, y: layout.size.height / 2.0) + self.listView.updateSizeAndInsets(size: layout.size, insets: UIEdgeInsets(top: insets.top, left: insets.left, bottom: insets.bottom, right: insets.right), duration: duration, options: UIViewAnimationOptions(rawValue: curve << 16)) + } +} diff --git a/TelegramUI/ListControllerSpacerItem.swift b/TelegramUI/ListControllerSpacerItem.swift new file mode 100644 index 0000000000..e1f9795307 --- /dev/null +++ b/TelegramUI/ListControllerSpacerItem.swift @@ -0,0 +1,35 @@ +import Foundation +import Display + +class ListControllerSpacerItem: ListControllerItem { + private let height: CGFloat + + init(height: CGFloat) { + self.height = height + } + + func mergesBackgroundWithItem(other: ListControllerItem) -> Bool { + return false + } + + func nodeConfiguredForWidth(async: @escaping (@escaping () -> Void) -> Void, width: CGFloat, previousItem: ListViewItem?, nextItem: ListViewItem?, completion: @escaping (ListViewItemNode, @escaping () -> Void) -> Void) { + async { + let node = ListControllerSpacerItemNode() + node.height = self.height + node.layoutForWidth(width, item: self, previousItem: previousItem, nextItem: nextItem) + completion(node, {}) + } + } +} + +class ListControllerSpacerItemNode: ListViewItemNode { + var height: CGFloat = 0.0 + + init() { + super.init(layerBacked: true, dynamicBounce: false) + } + + override func layoutForWidth(_ width: CGFloat, item: ListViewItem, previousItem: ListViewItem?, nextItem: ListViewItem?) { + self.frame = CGRect(origin: CGPoint(), size: CGSize(width: width, height: self.height)) + } +} diff --git a/TelegramUI/ListSectionHeaderNode.swift b/TelegramUI/ListSectionHeaderNode.swift new file mode 100644 index 0000000000..1f33bed101 --- /dev/null +++ b/TelegramUI/ListSectionHeaderNode.swift @@ -0,0 +1,35 @@ +import Foundation +import AsyncDisplayKit +import Display + +final class ListSectionHeaderNode: ASDisplayNode { + private let label: TextNode + + var title: String? { + didSet { + self.calculatedLayoutDidChange() + self.setNeedsLayout() + } + } + + override init() { + self.label = TextNode() + self.label.isLayerBacked = true + self.label.isOpaque = true + + super.init() + + self.addSubnode(self.label) + + self.backgroundColor = UIColor(0xf7f7f7) + } + + override func layout() { + let size = self.bounds.size + + let makeLayout = TextNode.asyncLayout(self.label) + let (labelLayout, labelApply) = makeLayout(NSAttributedString(string: self.title ?? "", font: Font.medium(12.0), textColor: UIColor(0x8e8e93)), self.backgroundColor, 1, .end, CGSize(width: max(0.0, size.width - 18.0), height: size.height), nil) + let _ = labelApply() + self.label.frame = CGRect(origin: CGPoint(x: 9.0, y: 6.0), size: labelLayout.size) + } +} diff --git a/TelegramUI/Localizable.swift b/TelegramUI/Localizable.swift new file mode 100644 index 0000000000..f7298d61f8 --- /dev/null +++ b/TelegramUI/Localizable.swift @@ -0,0 +1,73 @@ +// Generated using SwiftGen, by O.Halligon — https://github.com/AliSoftware/SwiftGen + +import Foundation + +enum L10n { + /// Group Created + case ChatServiceGroupCreated + /// %@ invited %@ + case ChatServiceGroupAddedMembers(String, String) + /// %@ joined group + case ChatServiceGroupAddedSelf(String) + /// %@ kicked %@ + case ChatServiceGroupRemovedMembers(String, String) + /// %@ left group + case ChatServiceGroupRemovedSelf(String) + /// %@ updated group photo + case ChatServiceGroupUpdatedPhoto(String) + /// %@ removed group photo + case ChatServiceGroupRemovedPhoto(String) + /// %@ renamed group to \"%@\" + case ChatServiceGroupUpdatedTitle(String, String) + /// %@ pinned %@ + case ChatServiceGroupUpdatedPinnedMessage(String, String) + /// %@ removed pinned message + case ChatServiceGroupRemovedPinnedMessage(String) + /// %@ joined group via invite link + case ChatServiceGroupJoinedByLink(String) + /// The group was upgraded to a supergroup + case ChatServiceGroupMigratedToSupergroup +} + +extension L10n: CustomStringConvertible { + var description: String { return self.string } + + var string: String { + switch self { + case .ChatServiceGroupCreated: + return L10n.tr("Chat.Service.Group.Created") + case .ChatServiceGroupAddedMembers(let p0, let p1): + return L10n.tr("Chat.Service.Group.AddedMembers", p0, p1) + case .ChatServiceGroupAddedSelf(let p0): + return L10n.tr("Chat.Service.Group.AddedSelf", p0) + case .ChatServiceGroupRemovedMembers(let p0, let p1): + return L10n.tr("Chat.Service.Group.RemovedMembers", p0, p1) + case .ChatServiceGroupRemovedSelf(let p0): + return L10n.tr("Chat.Service.Group.RemovedSelf", p0) + case .ChatServiceGroupUpdatedPhoto(let p0): + return L10n.tr("Chat.Service.Group.UpdatedPhoto", p0) + case .ChatServiceGroupRemovedPhoto(let p0): + return L10n.tr("Chat.Service.Group.RemovedPhoto", p0) + case .ChatServiceGroupUpdatedTitle(let p0, let p1): + return L10n.tr("Chat.Service.Group.UpdatedTitle", p0, p1) + case .ChatServiceGroupUpdatedPinnedMessage(let p0, let p1): + return L10n.tr("Chat.Service.Group.UpdatedPinnedMessage", p0, p1) + case .ChatServiceGroupRemovedPinnedMessage(let p0): + return L10n.tr("Chat.Service.Group.RemovedPinnedMessage", p0) + case .ChatServiceGroupJoinedByLink(let p0): + return L10n.tr("Chat.Service.Group.JoinedByLink", p0) + case .ChatServiceGroupMigratedToSupergroup: + return L10n.tr("Chat.Service.Group.MigratedToSupergroup") + } + } + + private static func tr(_ key: String, _ args: CVarArg...) -> String { + let format = NSLocalizedString(key, comment: "") + return String(format: format, locale: Locale.current, arguments: args) + } +} + +func tr(_ key: L10n) -> String { + return key.string +} + diff --git a/TelegramUI/MapInputController.swift b/TelegramUI/MapInputController.swift new file mode 100644 index 0000000000..afcbd7bfb2 --- /dev/null +++ b/TelegramUI/MapInputController.swift @@ -0,0 +1,62 @@ +import Foundation +import Display +import AsyncDisplayKit +import UIKit +import SwiftSignalKit + +final class MapInputController: ViewController { + private let _ready = Promise() + override var ready: Promise { + return self._ready + } + private var didSetReady = false + + var mapInputNode: MapInputControllerNode { + get { + return super.displayNode as! MapInputControllerNode + } + } + + override init() { + super.init() + + self._ready.set(.single(true)) + + /*self.statusBar.style = .White + self.navigationBar.backgroundColor = UIColor(white: 0.0, alpha: 0.9) + self.navigationBar.foregroundColor = UIColor.white() + self.navigationBar.accentColor = UIColor.white() + self.navigationBar.stripeColor = UIColor.black()*/ + + self.navigationItem.title = "Location" + self.navigationItem.leftBarButtonItem = UIBarButtonItem(title: "Cancel", style: .plain, target: self, action: #selector(self.cancelPressed)) + } + + required init(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + @objc func cancelPressed() { + self.mapInputNode.animateOut() + } + + override func loadDisplayNode() { + self.displayNode = MapInputControllerNode() + + self.mapInputNode.dismiss = { [weak self] in + self?.presentingViewController?.dismiss(animated: true, completion: nil) + } + } + + override func viewDidAppear(_ animated: Bool) { + super.viewDidAppear(animated) + + self.mapInputNode.animateIn() + } + + override func containerLayoutUpdated(_ layout: ContainerViewLayout, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, transition: transition) + + self.mapInputNode.containerLayoutUpdated(layout, navigationBarHeight: self.navigationBar.frame.maxY, transition: transition) + } +} diff --git a/TelegramUI/MapInputControllerNode.swift b/TelegramUI/MapInputControllerNode.swift new file mode 100644 index 0000000000..fc100e8d81 --- /dev/null +++ b/TelegramUI/MapInputControllerNode.swift @@ -0,0 +1,67 @@ +import Foundation +import Display +import AsyncDisplayKit +import MapKit + +private var previousUserLocation: CLLocation? + +final class MapInputControllerNode: ASDisplayNode, MKMapViewDelegate { + var dismiss: () -> Void = { } + + let locationManager: CLLocationManager + let mapView: MKMapView + + override init() { + self.locationManager = CLLocationManager() + self.mapView = MKMapView() + + super.init(viewBlock: { + return UITracingLayerView() + }, didLoad: nil) + + self.backgroundColor = UIColor.white + + self.mapView.delegate = self + self.view.addSubview(self.mapView) + + if let location = previousUserLocation { + let coordinateRegion = MKCoordinateRegionMakeWithDistance(location.coordinate, 1000.0 * 2.0, 1000.0 * 2.0) + mapView.setRegion(coordinateRegion, animated: true) + } + } + + func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + self.mapView.frame = CGRect(origin: CGPoint(), size: layout.size) + } + + func animateIn() { + self.checkLocationAuthorizationStatus() + + self.layer.animatePosition(from: CGPoint(x: self.layer.position.x, y: self.layer.position.y + self.layer.bounds.size.height), to: self.layer.position, duration: 0.5, timingFunction: kCAMediaTimingFunctionSpring) + } + + func animateOut() { + self.layer.animatePosition(from: self.layer.position, to: CGPoint(x: self.layer.position.x, y: self.layer.position.y + self.layer.bounds.size.height), duration: 0.2, timingFunction: kCAMediaTimingFunctionEaseInEaseOut, removeOnCompletion: false, completion: { [weak self] _ in + if let strongSelf = self { + strongSelf.dismiss() + } + }) + } + + private func checkLocationAuthorizationStatus() { + if CLLocationManager.authorizationStatus() == .authorizedWhenInUse { + mapView.showsUserLocation = true + } else { + locationManager.requestWhenInUseAuthorization() + } + } + + func mapView(_ mapView: MKMapView, didUpdate userLocation: MKUserLocation) { + if let location = userLocation.location { + previousUserLocation = location + + let coordinateRegion = MKCoordinateRegionMakeWithDistance(location.coordinate, 1000.0 * 2.0, 1000.0 * 2.0) + mapView.setRegion(coordinateRegion, animated: true) + } + } +} diff --git a/TelegramUI/MediaFrameSource.swift b/TelegramUI/MediaFrameSource.swift new file mode 100644 index 0000000000..413eacdd6c --- /dev/null +++ b/TelegramUI/MediaFrameSource.swift @@ -0,0 +1,23 @@ +import Foundation +import SwiftSignalKit +import CoreMedia + +enum MediaTrackEvent { + case frames([MediaTrackDecodableFrame]) +} + +struct MediaFrameSourceSeekResult { + let buffers: MediaPlaybackBuffers + let timestamp: CMTime +} + +enum MediaFrameSourceSeekError { + case generic +} + +protocol MediaFrameSource { + func addEventSink(_ f: @escaping (MediaTrackEvent) -> Void) -> Int + func removeEventSink(_ index: Int) + func generateFrames(until timestamp: Double) + func seek(timestamp: Double) -> Signal +} diff --git a/TelegramUI/MediaManager.swift b/TelegramUI/MediaManager.swift new file mode 100644 index 0000000000..979481d462 --- /dev/null +++ b/TelegramUI/MediaManager.swift @@ -0,0 +1,22 @@ +import Foundation +import SwiftSignalKit +import Postbox +import AVFoundation +import MobileCoreServices +import TelegramCore + +final class MediaManager { + let queue = Queue() +} + +//private var globalPlayer: AudioStreamPlayer? + +func debugPlayMedia(account: Account, file: TelegramMediaFile) { + /*globalPlayer = nil + let player = AudioStreamPlayer(account: account, resource: CloudFileMediaResource(location: file.location, size: file.size)) + globalPlayer = player*/ + + /*let player = STKAudioPlayer() + player.play("http://www.stephaniequinn.com/Music/Canon.mp3") + testPlayer = player*/ +} diff --git a/TelegramUI/MediaPlaybackData.swift b/TelegramUI/MediaPlaybackData.swift new file mode 100644 index 0000000000..3b0f44758f --- /dev/null +++ b/TelegramUI/MediaPlaybackData.swift @@ -0,0 +1,6 @@ +import Foundation + +struct MediaPlaybackBuffers { + let audioBuffer: MediaTrackFrameBuffer? + let videoBuffer: MediaTrackFrameBuffer? +} diff --git a/TelegramUI/MediaPlayer.swift b/TelegramUI/MediaPlayer.swift new file mode 100644 index 0000000000..262ec8655e --- /dev/null +++ b/TelegramUI/MediaPlayer.swift @@ -0,0 +1,516 @@ +import Foundation +import SwiftSignalKit +import Postbox +import CoreMedia +import TelegramCore + +private struct MediaPlayerControlTimebase { + let timebase: CMTimebase + let isAudio: Bool +} + +private enum MediaPlayerPlaybackAction { + case play + case pause +} + +private struct MediaPlayerLoadedState { + fileprivate let frameSource: MediaFrameSource + fileprivate let mediaBuffers: MediaPlaybackBuffers + fileprivate let controlTimebase: MediaPlayerControlTimebase +} + +private enum MediaPlayerState { + case empty + case seeking(frameSource: MediaFrameSource, timestamp: Double, disposable: Disposable, action: MediaPlayerPlaybackAction) + case paused(MediaPlayerLoadedState) + case playing(MediaPlayerLoadedState) +} + +private final class MediaPlayerContext { + private let queue: Queue + private let account: Account + private let resource: MediaResource + + private var state: MediaPlayerState = .empty + private var audioRenderer: MediaPlayerAudioRenderer? + + private var tickTimer: SwiftSignalKit.Timer? + + fileprivate var status = Promise() + + fileprivate var playerNode: MediaPlayerNode? { + didSet { + if let playerNode = self.playerNode { + var controlTimebase: CMTimebase? + + switch self.state { + case let .paused(loadedState): + controlTimebase = loadedState.controlTimebase.timebase + case let .playing(loadedState): + controlTimebase = loadedState.controlTimebase.timebase + case .empty, .seeking: + break + } + if let controlTimebase = controlTimebase { + DispatchQueue.main.async { + playerNode.controlTimebase = controlTimebase + } + } + } + } + } + + init(queue: Queue, account: Account, resource: MediaResource) { + assert(queue.isCurrent()) + + self.queue = queue + self.account = account + self.resource = resource + } + + deinit { + assert(self.queue.isCurrent()) + + self.tickTimer?.invalidate() + + if case let .seeking(_, _, disposable, _) = self.state { + disposable.dispose() + } + } + + fileprivate func seek(timestamp: Double) { + let action: MediaPlayerPlaybackAction + switch self.state { + case .empty, .paused: + action = .pause + case .playing: + action = .play + case let .seeking(_, _, _, currentAction): + action = currentAction + } + self.seek(timestamp: timestamp, action: action) + } + + fileprivate func seek(timestamp: Double, action: MediaPlayerPlaybackAction) { + assert(self.queue.isCurrent()) + + var loadedState: MediaPlayerLoadedState? + switch self.state { + case .empty: + break + case let .playing(currentLoadedState): + loadedState = currentLoadedState + case let .paused(currentLoadedState): + loadedState = currentLoadedState + case let .seeking(previousFrameSource, previousTimestamp, previousDisposable, _): + if previousTimestamp.isEqual(to: timestamp) { + self.state = .seeking(frameSource: previousFrameSource, timestamp: previousTimestamp, disposable: previousDisposable, action: action) + return + } else { + previousDisposable.dispose() + } + } + + self.tickTimer?.invalidate() + if let loadedState = loadedState { + if loadedState.controlTimebase.isAudio { + self.audioRenderer?.rate = 0.0 + } else { + if !CMTimebaseGetRate(loadedState.controlTimebase.timebase).isEqual(to: 0.0) { + CMTimebaseSetRate(loadedState.controlTimebase.timebase, 0.0) + } + } + } + + let frameSource = FFMpegMediaFrameSource(queue: self.queue, account: account, resource: resource) + let disposable = MetaDisposable() + self.state = .seeking(frameSource: frameSource, timestamp: timestamp, disposable: disposable, action: action) + + let seekResult = frameSource.seek(timestamp: timestamp) |> deliverOn(self.queue) + + disposable.set(seekResult.start(next: { [weak self] seekResult in + if let strongSelf = self { + strongSelf.seekingCompleted(seekResult: seekResult) + } + }, error: { _ in + })) + } + + fileprivate func seekingCompleted(seekResult: MediaFrameSourceSeekResult) { + print("seekingCompleted at \(CMTimeGetSeconds(seekResult.timestamp))") + + assert(self.queue.isCurrent()) + + guard case let .seeking(frameSource, _, _, action) = self.state else { + assertionFailure() + return + } + + seekResult.buffers.audioBuffer?.statusUpdated = { [weak self] in + self?.tick() + } + seekResult.buffers.videoBuffer?.statusUpdated = { [weak self] in + self?.tick() + } + let controlTimebase: MediaPlayerControlTimebase + + if let _ = seekResult.buffers.audioBuffer { + let renderer: MediaPlayerAudioRenderer + if let currentRenderer = self.audioRenderer { + renderer = currentRenderer + } else { + renderer = MediaPlayerAudioRenderer() + self.audioRenderer = renderer + } + + controlTimebase = MediaPlayerControlTimebase(timebase: renderer.audioTimebase, isAudio: true) + } else { + self.audioRenderer?.stop() + self.audioRenderer = nil + + var timebase: CMTimebase? + CMTimebaseCreateWithMasterClock(nil, CMClockGetHostTimeClock(), &timebase) + controlTimebase = MediaPlayerControlTimebase(timebase: timebase!, isAudio: false) + CMTimebaseSetTime(timebase!, seekResult.timestamp) + } + + let loadedState = MediaPlayerLoadedState(frameSource: frameSource, mediaBuffers: seekResult.buffers, controlTimebase: controlTimebase) + + if let audioRenderer = self.audioRenderer { + let queue = self.queue + audioRenderer.flushBuffers(at: seekResult.timestamp, completion: { [weak self] in + queue.async { [weak self] in + if let strongSelf = self { + if let playerNode = strongSelf.playerNode { + let queue = strongSelf.queue + + DispatchQueue.main.async { + playerNode.reset() + playerNode.controlTimebase = controlTimebase.timebase + + queue.async { [weak self] in + if let strongSelf = self { + switch action { + case .play: + strongSelf.state = .playing(loadedState) + strongSelf.audioRenderer?.start() + case .pause: + strongSelf.state = .paused(loadedState) + } + + strongSelf.tick() + } + } + } + } else { + switch action { + case .play: + strongSelf.state = .playing(loadedState) + strongSelf.audioRenderer?.start() + case .pause: + strongSelf.state = .paused(loadedState) + } + + strongSelf.tick() + } + } + } + }) + } else { + if let playerNode = self.playerNode { + let queue = self.queue + + DispatchQueue.main.async { + playerNode.reset() + playerNode.controlTimebase = controlTimebase.timebase + + queue.async { [weak self] in + if let strongSelf = self { + switch action { + case .play: + strongSelf.state = .playing(loadedState) + case .pause: + strongSelf.state = .paused(loadedState) + } + + strongSelf.tick() + } + } + } + } + } + } + + fileprivate func play() { + assert(self.queue.isCurrent()) + + switch self.state { + case .empty: + self.seek(timestamp: 0.0, action: .play) + case let .seeking(frameSource, timestamp, disposable, _): + self.state = .seeking(frameSource: frameSource, timestamp: timestamp, disposable: disposable, action: .play) + case let .paused(loadedState): + self.state = .playing(loadedState) + self.tick() + case .playing: + break + } + } + + fileprivate func pause() { + assert(self.queue.isCurrent()) + + switch self.state { + case .empty: + break + case let .seeking(frameSource, timestamp, disposable, _): + self.state = .seeking(frameSource: frameSource, timestamp: timestamp, disposable: disposable, action: .pause) + case .paused: + break + case let .playing(loadedState): + self.state = .paused(loadedState) + self.tick() + } + } + + private func tick() { + self.tickTimer?.invalidate() + + var maybeLoadedState: MediaPlayerLoadedState? + + switch self.state { + case .empty: + return + case let .paused(state): + maybeLoadedState = state + case let .playing(state): + maybeLoadedState = state + case .seeking: + return + } + + guard let loadedState = maybeLoadedState else { + return + } + + let timestamp = CMTimeGetSeconds(CMTimebaseGetTime(loadedState.controlTimebase.timebase)) + print("tick at \(timestamp)") + + var duration: Double = 0.0 + var videoStatus: MediaTrackFrameBufferStatus? + if let videoTrackFrameBuffer = loadedState.mediaBuffers.videoBuffer { + videoStatus = videoTrackFrameBuffer.status(at: timestamp) + duration = max(duration, CMTimeGetSeconds(videoTrackFrameBuffer.duration)) + } + + var audioStatus: MediaTrackFrameBufferStatus? + if let audioTrackFrameBuffer = loadedState.mediaBuffers.audioBuffer { + audioStatus = audioTrackFrameBuffer.status(at: timestamp) + duration = max(duration, CMTimeGetSeconds(audioTrackFrameBuffer.duration)) + } + + var worstStatus: MediaTrackFrameBufferStatus? + for status in [videoStatus, audioStatus] { + if let status = status { + if let worst = worstStatus { + switch status { + case .buffering: + worstStatus = status + case let .full(currentFullUntil): + switch worst { + case .buffering: + worstStatus = worst + case let .full(worstFullUntil): + if currentFullUntil < worstFullUntil { + worstStatus = status + } else { + worstStatus = worst + } + case .finished: + worstStatus = status + } + case let .finished(currentFinishedAt): + switch worst { + case .buffering, .full: + worstStatus = worst + case let .finished(worstFinishedAt): + if currentFinishedAt < worstFinishedAt { + worstStatus = worst + } else { + worstStatus = status + } + } + } + } else { + worstStatus = status + } + } + } + + let rate: Double + var buffering = false + + if let worstStatus = worstStatus, case let .full(fullUntil) = worstStatus, fullUntil.isFinite { + let nextTickDelay = max(0.0, fullUntil - timestamp) + let tickTimer = SwiftSignalKit.Timer(timeout: nextTickDelay, repeat: false, completion: { [weak self] in + self?.tick() + }, queue: self.queue) + self.tickTimer = tickTimer + tickTimer.start() + + if case .playing = self.state { + rate = 1.0 + } else { + rate = 0.0 + } + } else if let worstStatus = worstStatus, case let .finished(finishedAt) = worstStatus, finishedAt.isFinite { + let nextTickDelay = max(0.0, finishedAt - timestamp) + if nextTickDelay.isLessThanOrEqualTo(0.0) { + rate = 0.0 + } else { + let tickTimer = SwiftSignalKit.Timer(timeout: nextTickDelay, repeat: false, completion: { [weak self] in + self?.tick() + }, queue: self.queue) + self.tickTimer = tickTimer + tickTimer.start() + + if case .playing = self.state { + rate = 1.0 + } else { + rate = 0.0 + } + } + } else { + buffering = true + rate = 0.0 + } + + if loadedState.controlTimebase.isAudio { + self.audioRenderer?.rate = rate + } else { + if !CMTimebaseGetRate(loadedState.controlTimebase.timebase).isEqual(to: rate) { + CMTimebaseSetRate(loadedState.controlTimebase.timebase, rate) + } + } + + if let playerNode = self.playerNode, let videoTrackFrameBuffer = loadedState.mediaBuffers.videoBuffer, videoTrackFrameBuffer.hasFrames { + let queue = self.queue.queue + playerNode.beginRequestingFrames(queue: queue, takeFrame: { [weak videoTrackFrameBuffer] in + if let videoTrackFrameBuffer = videoTrackFrameBuffer { + return videoTrackFrameBuffer.takeFrame() + } else { + return .noFrames + } + }) + } + + if let audioRenderer = self.audioRenderer, let audioTrackFrameBuffer = loadedState.mediaBuffers.audioBuffer, audioTrackFrameBuffer.hasFrames { + let queue = self.queue.queue + audioRenderer.beginRequestingFrames(queue: queue, takeFrame: { [weak audioTrackFrameBuffer] in + if let audioTrackFrameBuffer = audioTrackFrameBuffer { + return audioTrackFrameBuffer.takeFrame() + } else { + return .noFrames + } + }) + } + + let playbackStatus: MediaPlayerPlaybackStatus + if buffering { + playbackStatus = .buffering + } else if rate.isEqual(to: 1.0) { + playbackStatus = .playing + } else { + playbackStatus = .paused + } + let status = MediaPlayerStatus(duration: duration, timestamp: timestamp, status: playbackStatus) + self.status.set(.single(status)) + } +} + +enum MediaPlayerPlaybackStatus { + case playing + case paused + case buffering +} + +struct MediaPlayerStatus { + let duration: Double + let timestamp: Double + let status: MediaPlayerPlaybackStatus +} + +final class MediaPlayer { + private let queue = Queue() + private var contextRef: Unmanaged? + + var status: Signal { + return Signal { [weak self] subscriber in + let disposable = MetaDisposable() + + if let strongSelf = self { + strongSelf.queue.async { + if let context = strongSelf.contextRef?.takeUnretainedValue() { + disposable.set(context.status.get().start(next: { next in + subscriber.putNext(next) + }, error: { error in + subscriber.putError(error) + }, completed: { + subscriber.putCompletion() + })) + } + } + } + + return disposable + } + } + + init(account: Account, resource: MediaResource) { + self.queue.async { + let context = MediaPlayerContext(queue: self.queue, account: account, resource: resource) + self.contextRef = Unmanaged.passRetained(context) + } + } + + deinit { + let contextRef = self.contextRef + self.queue.async { + contextRef?.release() + } + } + + func play() { + self.queue.async { + if let context = self.contextRef?.takeUnretainedValue() { + context.play() + } + } + } + + func pause() { + self.queue.async { + if let context = self.contextRef?.takeUnretainedValue() { + context.pause() + } + } + } + + func seek(timestamp: Double) { + self.queue.async { + if let context = self.contextRef?.takeUnretainedValue() { + context.seek(timestamp: timestamp) + } + } + } + + func attachPlayerNode(_ node: MediaPlayerNode) { + self.queue.async { + if let context = self.contextRef?.takeUnretainedValue() { + node.queue = self.queue + context.playerNode = node + } + } + } +} diff --git a/TelegramUI/MediaPlayerAudioRenderer.swift b/TelegramUI/MediaPlayerAudioRenderer.swift new file mode 100644 index 0000000000..2bf371543d --- /dev/null +++ b/TelegramUI/MediaPlayerAudioRenderer.swift @@ -0,0 +1,545 @@ +import Foundation +import SwiftSignalKit +import CoreMedia +import AVFoundation +import TelegramCore + +private enum AudioPlayerRendererState { + case paused + case playing(didSetRate: Bool) +} + +private final class AudioPlayerRendererBufferContext { + var state: AudioPlayerRendererState = .paused + let timebase: CMTimebase + let buffer: RingByteBuffer + var bufferMaxChannelSampleIndex: Int64 = 0 + var lowWaterSize: Int + var notifyLowWater: () -> Void + var notifiedLowWater = false + var overflowData = Data() + var overflowDataMaxChannelSampleIndex: Int64 = 0 + var renderTimestampTick: Int64 = 0 + + init(timebase: CMTimebase, buffer: RingByteBuffer, lowWaterSize: Int, notifyLowWater: @escaping () -> Void) { + self.timebase = timebase + self.buffer = buffer + self.lowWaterSize = lowWaterSize + self.notifyLowWater = notifyLowWater + } +} + +private let audioPlayerRendererBufferContextMap = Atomic<[Int32: Atomic]>(value: [:]) +private let audioPlayerRendererQueue = Queue() + +private var _nextPlayerRendererBufferContextId: Int32 = 1 +private func registerPlayerRendererBufferContext(_ context: Atomic) -> Int32 { + var id: Int32 = 0 + + let _ = audioPlayerRendererBufferContextMap.modify { contextMap in + id = _nextPlayerRendererBufferContextId + _nextPlayerRendererBufferContextId += 1 + + var contextMap = contextMap + contextMap[id] = context + return contextMap + } + return id +} + +private func unregisterPlayerRendererBufferContext(_ id: Int32) { + let _ = audioPlayerRendererBufferContextMap.modify { contextMap in + var contextMap = contextMap + let _ = contextMap.removeValue(forKey: id) + return contextMap + } +} + +private func withPlayerRendererBuffer(_ id: Int32, _ f: (Atomic) -> Void) { + audioPlayerRendererBufferContextMap.with { contextMap in + if let context = contextMap[id] { + f(context) + } + } +} + +private let kOutputBus: UInt32 = 0 +private let kInputBus: UInt32 = 1 + +private func rendererInputProc(refCon: UnsafeMutableRawPointer, ioActionFlags: UnsafeMutablePointer, inTimeStamp: UnsafePointer, inBusNumber: UInt32, inNumberFrames: UInt32, ioData: UnsafeMutablePointer?) -> OSStatus { + guard let ioData = ioData else { + return noErr + } + + let bufferList = UnsafeMutableAudioBufferListPointer(ioData) + + var rendererFillOffset = (0, 0) + var notifyLowWater: (() -> Void)? + + withPlayerRendererBuffer(Int32(unsafeBitCast(refCon, to: intptr_t.self)), { context in + context.with { context in + switch context.state { + case let .playing(didSetRate): + if context.buffer.availableBytes != 0 { + let sampleIndex = context.bufferMaxChannelSampleIndex - Int64(context.buffer.availableBytes / (2 * + 2)) + + if !didSetRate { + context.state = .playing(didSetRate: true) + let masterClock: CMClockOrTimebase + if #available(iOS 9.0, *) { + masterClock = CMTimebaseCopyMaster(context.timebase)! + } else { + masterClock = CMTimebaseGetMaster(context.timebase)! + } + CMTimebaseSetRateAndAnchorTime(context.timebase, 1.0, CMTimeMake(sampleIndex, 44100), CMSyncGetTime(masterClock)) + } else { + context.renderTimestampTick += 1 + if context.renderTimestampTick % 1000 == 0 { + let delta = (Double(sampleIndex) / 44100.0) - CMTimeGetSeconds(CMTimebaseGetTime(context.timebase)) + if delta > 0.01 { + CMTimebaseSetTime(context.timebase, CMTimeMake(sampleIndex, 44100)) + } + } + } + + let rendererBuffer = context.buffer + + while rendererFillOffset.0 < bufferList.count { + if let bufferData = bufferList[rendererFillOffset.0].mData { + let bufferDataSize = Int(bufferList[rendererFillOffset.0].mDataByteSize) + + let dataOffset = rendererFillOffset.1 + if dataOffset == bufferDataSize { + rendererFillOffset = (rendererFillOffset.0 + 1, 0) + continue + } + + let consumeCount = bufferDataSize - dataOffset + + let actualConsumedCount = rendererBuffer.dequeue(bufferData.advanced(by: dataOffset), count: consumeCount) + rendererFillOffset.1 += actualConsumedCount + + if actualConsumedCount == 0 { + break + } + } else { + break + } + } + } + + if !context.notifiedLowWater { + let availableBytes = context.buffer.availableBytes + if availableBytes <= context.lowWaterSize { + context.notifiedLowWater = true + notifyLowWater = context.notifyLowWater + } + } + case .paused: + break + } + } + }) + + for i in rendererFillOffset.0 ..< bufferList.count { + var dataOffset = 0 + if i == rendererFillOffset.0 { + dataOffset = rendererFillOffset.1 + } + if let data = bufferList[i].mData { + memset(data.advanced(by: dataOffset), 0, Int(bufferList[i].mDataByteSize) - dataOffset) + } + } + + if let notifyLowWater = notifyLowWater { + notifyLowWater() + } + + return noErr +} + +private struct RequestingFramesContext { + let queue: DispatchQueue + let takeFrame: () -> MediaTrackFrameResult +} + +private final class AudioPlayerRendererContext { + let audioStreamDescription: AudioStreamBasicDescription + let bufferSizeInSeconds: Int = 5 + let lowWaterSizeInSeconds: Int = 2 + + let controlTimebase: CMTimebase + + var audioUnit: AudioComponentInstance? + + var bufferContextId: Int32! + let bufferContext: Atomic + + var requestingFramesContext: RequestingFramesContext? + + init(controlTimebase: CMTimebase) { + assert(audioPlayerRendererQueue.isCurrent()) + + self.controlTimebase = controlTimebase + + self.audioStreamDescription = audioRendererNativeStreamDescription() + + let bufferSize = Int(self.audioStreamDescription.mSampleRate) * self.bufferSizeInSeconds * Int(self.audioStreamDescription.mBytesPerFrame) + let lowWaterSize = Int(self.audioStreamDescription.mSampleRate) * self.lowWaterSizeInSeconds * Int(self.audioStreamDescription.mBytesPerFrame) + + var notifyLowWater: () -> Void = { } + + self.bufferContext = Atomic(value: AudioPlayerRendererBufferContext(timebase: controlTimebase, buffer: RingByteBuffer(size: bufferSize), lowWaterSize: lowWaterSize, notifyLowWater: { + notifyLowWater() + })) + self.bufferContextId = registerPlayerRendererBufferContext(self.bufferContext) + + notifyLowWater = { [weak self] in + audioPlayerRendererQueue.async { + if let strongSelf = self { + strongSelf.checkBuffer() + } + } + } + } + + deinit { + assert(audioPlayerRendererQueue.isCurrent()) + + unregisterPlayerRendererBufferContext(self.bufferContextId) + + self.closeAudioUnit() + } + + fileprivate func setPlaying(_ playing: Bool) { + assert(audioPlayerRendererQueue.isCurrent()) + + self.bufferContext.with { context in + if playing { + context.state = .playing(didSetRate: false) + } else { + context.state = .paused + CMTimebaseSetRate(context.timebase, 0.0) + } + } + } + + fileprivate func flushBuffers(at timestamp: CMTime, completion: () -> Void) { + assert(audioPlayerRendererQueue.isCurrent()) + + self.bufferContext.with { context in + context.buffer.clear() + context.bufferMaxChannelSampleIndex = 0 + context.notifiedLowWater = false + context.overflowData = Data() + context.overflowDataMaxChannelSampleIndex = 0 + CMTimebaseSetTime(context.timebase, timestamp) + + switch context.state { + case .playing: + context.state = .playing(didSetRate: false) + case .paused: + break + } + + completion() + } + } + + fileprivate func startAudioUnit() { + if self.audioUnit == nil { + guard let _ = try? AVAudioSession.sharedInstance().setCategory(AVAudioSessionCategoryPlayback) else { + return + } + guard let _ = try? AVAudioSession.sharedInstance().setActive(true) else { + return + } + + var desc = AudioComponentDescription() + desc.componentType = kAudioUnitType_Output + desc.componentSubType = kAudioUnitSubType_RemoteIO + desc.componentFlags = 0 + desc.componentFlagsMask = 0 + desc.componentManufacturer = kAudioUnitManufacturer_Apple + guard let inputComponent = AudioComponentFindNext(nil, &desc) else { + return + } + + var maybeAudioUnit: AudioComponentInstance? + + guard AudioComponentInstanceNew(inputComponent, &maybeAudioUnit) == noErr else { + return + } + + guard let audioUnit = maybeAudioUnit else { + return + } + + var one: UInt32 = 1 + guard AudioUnitSetProperty(audioUnit, kAudioOutputUnitProperty_EnableIO, kAudioUnitScope_Output, kOutputBus, &one, 4) == noErr else { + AudioComponentInstanceDispose(audioUnit) + return + } + + var audioStreamDescription = self.audioStreamDescription + guard AudioUnitSetProperty(audioUnit, kAudioUnitProperty_StreamFormat, kAudioUnitScope_Input, kOutputBus, &audioStreamDescription, UInt32(MemoryLayout.size)) == noErr else { + AudioComponentInstanceDispose(audioUnit) + return + } + + var callbackStruct = AURenderCallbackStruct() + callbackStruct.inputProc = rendererInputProc + callbackStruct.inputProcRefCon = unsafeBitCast(intptr_t(self.bufferContextId), to: UnsafeMutableRawPointer.self) + guard AudioUnitSetProperty(audioUnit, kAudioUnitProperty_SetRenderCallback, kAudioUnitScope_Global, kOutputBus, &callbackStruct, UInt32(MemoryLayout.size)) == noErr else { + AudioComponentInstanceDispose(audioUnit) + return + } + + guard AudioUnitInitialize(audioUnit) == noErr else { + AudioComponentInstanceDispose(audioUnit) + return + } + + guard AudioOutputUnitStart(audioUnit) == noErr else { + AudioComponentInstanceDispose(audioUnit) + return + } + + self.audioUnit = audioUnit + } + } + + fileprivate func closeAudioUnit() { + assert(audioPlayerRendererQueue.isCurrent()) + + if let audioUnit = self.audioUnit { + var status = noErr + + self.bufferContext.with { context in + context.buffer.clear() + } + + status = AudioOutputUnitStop(audioUnit) + if status != noErr { + trace("AudioPlayerRenderer", what: "AudioOutputUnitStop error \(status)") + } + + status = AudioComponentInstanceDispose(audioUnit); + if status != noErr { + trace("AudioPlayerRenderer", what: "AudioComponentInstanceDispose error \(status)") + } + self.audioUnit = nil + } + } + + func checkBuffer() { + assert(audioPlayerRendererQueue.isCurrent()) + + while true { + let bytesToRequest = self.bufferContext.with { context -> Int in + let availableBytes = context.buffer.availableBytes + if availableBytes <= context.lowWaterSize { + return context.buffer.size - availableBytes + } else { + return 0 + } + } + + if bytesToRequest == 0 { + self.bufferContext.with { context in + context.notifiedLowWater = false + } + break + } + + let overflowTakenLength = self.bufferContext.with { context -> Int in + let takeLength = min(context.overflowData.count, bytesToRequest) + if takeLength != 0 { + if takeLength == context.overflowData.count { + let data = context.overflowData + context.overflowData = Data() + self.enqueueSamples(data, sampleIndex: context.overflowDataMaxChannelSampleIndex - (data.count / (2 * 2))) + } else { + let data = context.overflowData.subdata(in: 0 ..< takeLength) + self.enqueueSamples(data, sampleIndex: context.overflowDataMaxChannelSampleIndex - (context.overflowData.count / (2 * 2))) + context.overflowData.replaceSubrange(0 ..< takeLength, with: Data()) + } + } + return takeLength + } + + if overflowTakenLength != 0 { + continue + } + + if let requestingFramesContext = self.requestingFramesContext { + requestingFramesContext.queue.async { + let takenFrame = requestingFramesContext.takeFrame() + audioPlayerRendererQueue.async { + switch takenFrame { + case let .frame(frame): + if let dataBuffer = CMSampleBufferGetDataBuffer(frame.sampleBuffer) { + let dataLength = CMBlockBufferGetDataLength(dataBuffer) + let takeLength = min(dataLength, bytesToRequest) + + let pts = CMSampleBufferGetPresentationTimeStamp(frame.sampleBuffer) + let bufferSampleIndex = CMTimeConvertScale(pts, 44100, .roundAwayFromZero).value + + let bytes = malloc(takeLength)! + CMBlockBufferCopyDataBytes(dataBuffer, 0, takeLength, bytes) + self.enqueueSamples(Data(bytesNoCopy: bytes.assumingMemoryBound(to: UInt8.self), count: takeLength, deallocator: .free), sampleIndex: bufferSampleIndex) + + if takeLength < dataLength { + self.bufferContext.with { context in + let copyOffset = context.overflowData.count + context.overflowData.count += dataLength - takeLength + context.overflowData.withUnsafeMutableBytes { (bytes: UnsafeMutablePointer) -> Void in + CMBlockBufferCopyDataBytes(dataBuffer, takeLength, dataLength - takeLength, bytes.advanced(by: copyOffset)) + } + } + } + + self.checkBuffer() + } else { + assertionFailure() + } + case .skipFrame: + self.checkBuffer() + break + case .noFrames: + self.requestingFramesContext = nil + } + } + } + } else { + self.bufferContext.with { context in + context.notifiedLowWater = false + } + } + + break + } + } + + private func enqueueSamples(_ data: Data, sampleIndex: Int64) { + assert(audioPlayerRendererQueue.isCurrent()) + + self.bufferContext.with { context in + let bytesToCopy = min(context.buffer.size - context.buffer.availableBytes, data.count) + data.withUnsafeBytes { (bytes: UnsafePointer) -> Void in + let _ = context.buffer.enqueue(UnsafeRawPointer(bytes), count: bytesToCopy) + context.bufferMaxChannelSampleIndex = sampleIndex + (data.count / (2 * 2)) + } + } + } + + fileprivate func beginRequestingFrames(queue: DispatchQueue, takeFrame: @escaping () -> MediaTrackFrameResult) { + if let _ = self.requestingFramesContext { + return + } + + self.requestingFramesContext = RequestingFramesContext(queue: queue, takeFrame: takeFrame) + + self.checkBuffer() + } + + func endRequestingFrames() { + self.requestingFramesContext = nil + } +} + +private func audioRendererNativeStreamDescription() -> AudioStreamBasicDescription { + var canonicalBasicStreamDescription = AudioStreamBasicDescription() + canonicalBasicStreamDescription.mSampleRate = 44100.00 + canonicalBasicStreamDescription.mFormatID = kAudioFormatLinearPCM + canonicalBasicStreamDescription.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagsNativeEndian | kAudioFormatFlagIsPacked + canonicalBasicStreamDescription.mFramesPerPacket = 1 + canonicalBasicStreamDescription.mChannelsPerFrame = 2 + canonicalBasicStreamDescription.mBytesPerFrame = 2 * 2 + canonicalBasicStreamDescription.mBitsPerChannel = 8 * 2 + canonicalBasicStreamDescription.mBytesPerPacket = 2 * 2 + return canonicalBasicStreamDescription +} + +final class MediaPlayerAudioRenderer { + private var contextRef: Unmanaged? + + private let audioClock: CMClock + let audioTimebase: CMTimebase + + var rate: Double = 0.0 { + didSet { + let rate = self.rate + if !oldValue.isEqual(to: rate) { + print("setting audio rate to \(rate)") + assert(rate.isEqual(to: 1.0) || rate.isEqual(to: 0.0)) + + audioPlayerRendererQueue.async { + if let contextRef = self.contextRef { + let context = contextRef.takeUnretainedValue() + context.setPlaying(rate.isEqual(to: 1.0)) + } + } + } + } + } + + init() { + var audioClock: CMClock? + CMAudioClockCreate(nil, &audioClock) + self.audioClock = audioClock! + + var audioTimebase: CMTimebase? + CMTimebaseCreateWithMasterClock(nil, audioClock!, &audioTimebase) + self.audioTimebase = audioTimebase! + + audioPlayerRendererQueue.async { + let context = AudioPlayerRendererContext(controlTimebase: audioTimebase!) + self.contextRef = Unmanaged.passRetained(context) + } + } + + deinit { + let contextRef = self.contextRef + audioPlayerRendererQueue.async { + contextRef?.release() + } + } + + func start() { + audioPlayerRendererQueue.async { + if let contextRef = self.contextRef { + let context = contextRef.takeUnretainedValue() + context.startAudioUnit() + } + } + } + + func stop() { + audioPlayerRendererQueue.async { + if let contextRef = self.contextRef { + let context = contextRef.takeUnretainedValue() + context.closeAudioUnit() + } + } + } + + func beginRequestingFrames(queue: DispatchQueue, takeFrame: @escaping () -> MediaTrackFrameResult) { + audioPlayerRendererQueue.async { + if let contextRef = self.contextRef { + let context = contextRef.takeUnretainedValue() + context.beginRequestingFrames(queue: queue, takeFrame: takeFrame) + } + } + } + + func flushBuffers(at timestamp: CMTime, completion: @escaping () -> Void) { + audioPlayerRendererQueue.async { + if let contextRef = self.contextRef { + let context = contextRef.takeUnretainedValue() + context.flushBuffers(at: timestamp, completion: completion) + } + } + } +} diff --git a/TelegramUI/MediaPlayerNode.swift b/TelegramUI/MediaPlayerNode.swift new file mode 100644 index 0000000000..df93336977 --- /dev/null +++ b/TelegramUI/MediaPlayerNode.swift @@ -0,0 +1,91 @@ +import Foundation +import UIKit +import AsyncDisplayKit +import SwiftSignalKit + +private final class MediaPlayerNodeDisplayView: UIView { + override class var layerClass: AnyClass { + return AVSampleBufferDisplayLayer.self + } +} + +final class MediaPlayerNode: ASDisplayNode { + private var displayView: MediaPlayerNodeDisplayView? + var snapshotNode: ASDisplayNode? { + didSet { + if let snapshotNode = oldValue { + snapshotNode.removeFromSupernode() + } + + if let snapshotNode = self.snapshotNode { + snapshotNode.frame = self.bounds + self.insertSubnode(snapshotNode, at: 0) + } + } + } + + var controlTimebase: CMTimebase? { + get { + return (self.displayView?.layer as? AVSampleBufferDisplayLayer)?.controlTimebase + } set(value) { + (self.displayView?.layer as? AVSampleBufferDisplayLayer)?.controlTimebase = value + } + } + var queue: Queue? + private var isRequestingFrames = false + + override init() { + super.init() + + self.displayView = MediaPlayerNodeDisplayView() + self.view.addSubview(self.displayView!) + } + + override var frame: CGRect { + didSet { + if !oldValue.size.equalTo(self.frame.size) { + self.displayView?.frame = self.bounds + self.snapshotNode?.frame = self.bounds + } + } + } + + func reset() { + (self.displayView?.layer as? AVSampleBufferDisplayLayer)?.flush() + } + + func beginRequestingFrames(queue: DispatchQueue, takeFrame: @escaping () -> MediaTrackFrameResult) { + assert(self.queue != nil && self.queue!.isCurrent()) + + if isRequestingFrames { + return + } + isRequestingFrames = true + //print("begin requesting") + + (self.displayView?.layer as? AVSampleBufferDisplayLayer)?.requestMediaDataWhenReady(on: queue, using: { [weak self] in + if let strongSelf = self, let layer = strongSelf.displayView?.layer as? AVSampleBufferDisplayLayer { + loop: while layer.isReadyForMoreMediaData { + switch takeFrame() { + case let .frame(frame): + if frame.resetDecoder { + layer.flush() + } + layer.enqueue(frame.sampleBuffer) + case .skipFrame: + break + case .noFrames: + if let strongSelf = self, strongSelf.isRequestingFrames { + strongSelf.isRequestingFrames = false + if let layer = (strongSelf.displayView?.layer as? AVSampleBufferDisplayLayer) { + layer.stopRequestingMediaData() + } + //print("stop requesting") + } + break loop + } + } + } + }) + } +} diff --git a/TelegramUI/MediaTrackDecodableFrame.swift b/TelegramUI/MediaTrackDecodableFrame.swift new file mode 100644 index 0000000000..db36077da7 --- /dev/null +++ b/TelegramUI/MediaTrackDecodableFrame.swift @@ -0,0 +1,33 @@ +import Foundation +import CoreMedia +import TelegramUIPrivateModule + +enum MediaTrackFrameType { + case video + case audio +} + +final class MediaTrackDecodableFrame { + let type: MediaTrackFrameType + let packet: UnsafeMutablePointer + let pts: CMTime + let dts: CMTime + let duration: CMTime + + init(type: MediaTrackFrameType, packet: UnsafePointer, pts: CMTime, dts: CMTime, duration: CMTime) { + self.type = type + + self.pts = pts + self.dts = dts + self.duration = duration + + self.packet = UnsafeMutablePointer.allocate(capacity: 1) + av_init_packet(self.packet) + av_packet_ref(self.packet, packet) + } + + deinit { + av_packet_unref(self.packet) + self.packet.deallocate(capacity: 1) + } +} diff --git a/TelegramUI/MediaTrackFrame.swift b/TelegramUI/MediaTrackFrame.swift new file mode 100644 index 0000000000..e48bd4edb2 --- /dev/null +++ b/TelegramUI/MediaTrackFrame.swift @@ -0,0 +1,22 @@ +import Foundation +import CoreMedia + +final class MediaTrackFrame { + let type: MediaTrackFrameType + let sampleBuffer: CMSampleBuffer + let resetDecoder: Bool + + init(type: MediaTrackFrameType, sampleBuffer: CMSampleBuffer, resetDecoder: Bool) { + self.type = type + self.sampleBuffer = sampleBuffer + self.resetDecoder = resetDecoder + } + + var position: CMTime { + return CMSampleBufferGetPresentationTimeStamp(self.sampleBuffer) + } + + var duration: CMTime { + return CMSampleBufferGetDuration(self.sampleBuffer) + } +} diff --git a/TelegramUI/MediaTrackFrameBuffer.swift b/TelegramUI/MediaTrackFrameBuffer.swift new file mode 100644 index 0000000000..7071f54193 --- /dev/null +++ b/TelegramUI/MediaTrackFrameBuffer.swift @@ -0,0 +1,123 @@ +import Foundation +import SwiftSignalKit +import CoreMedia + +enum MediaTrackFrameBufferStatus { + case buffering + case full(until: Double) + case finished(at: Double) +} + +enum MediaTrackFrameResult { + case noFrames + case skipFrame + case frame(MediaTrackFrame) +} + +final class MediaTrackFrameBuffer { + private let stallDuration: Double = 1.0 + private let lowWaterDuration: Double = 2.0 + private let highWaterDuration: Double = 3.0 + + private let frameSource: MediaFrameSource + private let decoder: MediaTrackFrameDecoder + private let type: MediaTrackFrameType + let duration: CMTime + + var statusUpdated: () -> Void = { } + + private var frameSourceSinkIndex: Int? + + private var frames: [MediaTrackDecodableFrame] = [] + private var bufferedUntilTime: CMTime? + + init(frameSource: MediaFrameSource, decoder: MediaTrackFrameDecoder, type: MediaTrackFrameType, duration: CMTime) { + self.frameSource = frameSource + self.type = type + self.decoder = decoder + self.duration = duration + + self.frameSourceSinkIndex = self.frameSource.addEventSink { [weak self] event in + if let strongSelf = self { + switch event { + case let .frames(frames): + var filteredFrames: [MediaTrackDecodableFrame] = [] + for frame in frames { + if frame.type == type { + filteredFrames.append(frame) + } + } + if !filteredFrames.isEmpty { + strongSelf.addFrames(filteredFrames) + } + } + } + } + } + + deinit { + if let frameSourceSinkIndex = self.frameSourceSinkIndex { + self.frameSource.removeEventSink(frameSourceSinkIndex) + } + } + + private func addFrames(_ frames: [MediaTrackDecodableFrame]) { + self.frames.append(contentsOf: frames) + var maxUntilTime: CMTime? + for frame in frames { + let frameEndTime = CMTimeAdd(frame.pts, frame.duration) + if self.bufferedUntilTime == nil || CMTimeCompare(self.bufferedUntilTime!, frameEndTime) < 0 { + self.bufferedUntilTime = frameEndTime + maxUntilTime = frameEndTime + } + } + + if let maxUntilTime = maxUntilTime { + print("added \(frames.count) frames until \(CMTimeGetSeconds(maxUntilTime)), \(self.frames.count) total") + } + + self.statusUpdated() + } + + func status(at timestamp: Double) -> MediaTrackFrameBufferStatus { + var bufferedDuration = 0.0 + if let bufferedUntilTime = bufferedUntilTime { + if CMTimeCompare(bufferedUntilTime, self.duration) >= 0 { + return .finished(at: CMTimeGetSeconds(bufferedUntilTime)) + } + + bufferedDuration = CMTimeGetSeconds(bufferedUntilTime) - timestamp + } + + if bufferedDuration < self.lowWaterDuration { + print("buffered duration: \(bufferedDuration), requesting until \(timestamp) + \(self.highWaterDuration - bufferedDuration)") + self.frameSource.generateFrames(until: timestamp + self.highWaterDuration) + + if bufferedDuration > self.stallDuration { + print("buffered1 duration: \(bufferedDuration), wait until \(timestamp) + \(self.highWaterDuration - bufferedDuration)") + return .full(until: timestamp + self.highWaterDuration) + } else { + return .buffering + } + } else { + print("buffered2 duration: \(bufferedDuration), wait until \(timestamp) + \(bufferedDuration - self.lowWaterDuration)") + return .full(until: timestamp + max(0.0, bufferedDuration - self.lowWaterDuration)) + } + } + + var hasFrames: Bool { + return !self.frames.isEmpty + } + + func takeFrame() -> MediaTrackFrameResult { + if !self.frames.isEmpty { + let frame = self.frames.removeFirst() + if let decodedFrame = self.decoder.decode(frame: frame) { + return .frame(decodedFrame) + } else { + return .skipFrame + } + } + return .noFrames + } +} diff --git a/TelegramUI/MediaTrackFrameDecoder.swift b/TelegramUI/MediaTrackFrameDecoder.swift new file mode 100644 index 0000000000..25e45f2e19 --- /dev/null +++ b/TelegramUI/MediaTrackFrameDecoder.swift @@ -0,0 +1,5 @@ + +protocol MediaTrackFrameDecoder { + func decode(frame: MediaTrackDecodableFrame) -> MediaTrackFrame? + func reset() +} diff --git a/TelegramUI/PeerAvatar.swift b/TelegramUI/PeerAvatar.swift new file mode 100644 index 0000000000..1b6017dfc0 --- /dev/null +++ b/TelegramUI/PeerAvatar.swift @@ -0,0 +1,68 @@ +import Foundation +import SwiftSignalKit +import Postbox +import Display +import ImageIO +import TelegramCore + +private let roundCorners = { () -> UIImage in + let diameter: CGFloat = 60.0 + UIGraphicsBeginImageContextWithOptions(CGSize(width: diameter, height: diameter), false, 0.0) + let context = UIGraphicsGetCurrentContext()! + context.setBlendMode(.copy) + context.setFillColor(UIColor.black.cgColor) + context.fill(CGRect(origin: CGPoint(), size: CGSize(width: diameter, height: diameter))) + context.setFillColor(UIColor.clear.cgColor) + context.fillEllipse(in: CGRect(origin: CGPoint(), size: CGSize(width: diameter, height: diameter))) + let image = UIGraphicsGetImageFromCurrentImageContext()!.stretchableImage(withLeftCapWidth: Int(diameter / 2.0), topCapHeight: Int(diameter / 2.0)) + UIGraphicsEndImageContext() + return image +}() + +func peerAvatarImage(account: Account, peer: Peer, displayDimensions: CGSize = CGSize(width: 60.0, height: 60.0)) -> Signal? { + var location: TelegramCloudMediaLocation? + + if let user = peer as? TelegramUser { + if let photo = user.photo.first { + location = photo.location.cloudLocation + } + } else if let group = peer as? TelegramGroup { + if let photo = group.photo.first { + location = photo.location.cloudLocation + } + } + + if let location = location { + return deferred { () -> Signal in + return cachedCloudFileLocation(location) + |> `catch` { _ in + return multipartDownloadFromCloudLocation(account: account, location: location, size: nil) + |> afterNext { data in + cacheCloudFileLocation(location, data: data) + } + } + |> runOn(account.graphicsThreadPool) |> deliverOn(account.graphicsThreadPool) + |> map { data -> UIImage in + assertNotOnMainThread() + + if let image = generateImage(displayDimensions, contextGenerator: { size, context -> Void in + if let imageSource = CGImageSourceCreateWithData(data as CFData, nil), let dataImage = CGImageSourceCreateImageAtIndex(imageSource, 0, nil) { + context.setBlendMode(.copy) + context.draw(dataImage, in: CGRect(origin: CGPoint(), size: displayDimensions)) + context.setBlendMode(.destinationOut) + context.draw(roundCorners.cgImage!, in: CGRect(origin: CGPoint(), size: displayDimensions)) + } + }) { + return image + } else { + UIGraphicsBeginImageContextWithOptions(displayDimensions, false, 0.0) + let image = UIGraphicsGetImageFromCurrentImageContext()! + UIGraphicsEndImageContext() + return image + } + } + } |> runOn(account.graphicsThreadPool) + } else { + return nil + } +} diff --git a/TelegramUI/PhotoResources.swift b/TelegramUI/PhotoResources.swift new file mode 100644 index 0000000000..d62ddb74ce --- /dev/null +++ b/TelegramUI/PhotoResources.swift @@ -0,0 +1,708 @@ +import Foundation +import Postbox +import SwiftSignalKit +import Display +import AVFoundation +import ImageIO +import TelegramUIPrivateModule +import TelegramCore + +func largestRepresentationForPhoto(_ photo: TelegramMediaImage) -> TelegramMediaImageRepresentation? { + return photo.representationForDisplayAtSize(CGSize(width: 1280.0, height: 1280.0)) +} + +private func chatMessagePhotoDatas(account: Account, photo: TelegramMediaImage) -> Signal<(Data?, Data?, Int), NoError> { + if let smallestRepresentation = smallestImageRepresentation(photo.representations), let largestRepresentation = largestRepresentationForPhoto(photo), let smallestSize = smallestRepresentation.size, let largestSize = largestRepresentation.size { + let thumbnailResource = CloudFileMediaResource(location: smallestRepresentation.location, size: smallestSize) + let fullSizeResource = CloudFileMediaResource(location: largestRepresentation.location, size: largestSize) + + let maybeFullSize = account.postbox.mediaBox.resourceData(fullSizeResource) + + let signal = maybeFullSize |> take(1) |> mapToSignal { maybeData -> Signal<(Data?, Data?, Int), NoError> in + if maybeData.size >= fullSizeResource.size { + let loadedData: Data? = try? Data(contentsOf: URL(fileURLWithPath: maybeData.path), options: []) + + return .single((nil, loadedData, fullSizeResource.size)) + } else { + let fetchedThumbnail = account.postbox.mediaBox.fetchedResource(thumbnailResource) + + let thumbnail = Signal { subscriber in + let fetchedDisposable = fetchedThumbnail.start() + let thumbnailDisposable = account.postbox.mediaBox.resourceData(thumbnailResource).start(next: { next in + subscriber.putNext(next.size == 0 ? nil : try? Data(contentsOf: URL(fileURLWithPath: next.path), options: [])) + }, error: subscriber.putError, completed: subscriber.putCompletion) + + return ActionDisposable { + fetchedDisposable.dispose() + thumbnailDisposable.dispose() + } + } + + let fullSizeData = account.postbox.mediaBox.resourceData(fullSizeResource) |> map { next in + return next.size == 0 ? nil : try? Data(contentsOf: URL(fileURLWithPath: next.path), options: .mappedIfSafe) + } + + return thumbnail |> mapToSignal { thumbnailData in + return fullSizeData |> map { fullSizeData in + return (thumbnailData, fullSizeData, fullSizeResource.size) + } + } + } + } |> filter({ $0.0 != nil || $0.1 != nil }) + + return signal + } else { + return .never() + } +} + +private func chatMessageFileDatas(account: Account, file: TelegramMediaFile, progressive: Bool = false) -> Signal<(Data?, (Data, String)?, Int), NoError> { + if let smallestRepresentation = smallestImageRepresentation(file.previewRepresentations), let smallestSize = smallestRepresentation.size { + let thumbnailResource = CloudFileMediaResource(location: smallestRepresentation.location, size: smallestSize) + let fullSizeResource = CloudFileMediaResource(location: file.location, size: file.size) + + let maybeFullSize = account.postbox.mediaBox.resourceData(fullSizeResource) + + let signal = maybeFullSize |> take(1) |> mapToSignal { maybeData -> Signal<(Data?, (Data, String)?, Int), NoError> in + if maybeData.size >= fullSizeResource.size { + let loadedData: Data? = try? Data(contentsOf: URL(fileURLWithPath: maybeData.path), options: []) + + return .single((nil, loadedData == nil ? nil : (loadedData!, maybeData.path), fullSizeResource.size)) + } else { + let fetchedThumbnail = account.postbox.mediaBox.fetchedResource(thumbnailResource) + + let thumbnail = Signal { subscriber in + let fetchedDisposable = fetchedThumbnail.start() + let thumbnailDisposable = account.postbox.mediaBox.resourceData(thumbnailResource).start(next: { next in + subscriber.putNext(next.size == 0 ? nil : try? Data(contentsOf: URL(fileURLWithPath: next.path), options: [])) + }, error: subscriber.putError, completed: subscriber.putCompletion) + + return ActionDisposable { + fetchedDisposable.dispose() + thumbnailDisposable.dispose() + } + } + + + let fullSizeDataAndPath = account.postbox.mediaBox.resourceData(fullSizeResource, complete: !progressive) |> map { next -> (Data, String)? in + let data = next.size == 0 ? nil : try? Data(contentsOf: URL(fileURLWithPath: next.path), options: .mappedIfSafe) + return data == nil ? nil : (data!, next.path) + } + + return thumbnail |> mapToSignal { thumbnailData in + return fullSizeDataAndPath |> map { dataAndPath in + return (thumbnailData, dataAndPath, fullSizeResource.size) + } + } + } + } |> filter({ $0.0 != nil || $0.1 != nil }) + + return signal + } else { + return .never() + } +} + +private enum Corner: Hashable { + case TopLeft(Int), TopRight(Int), BottomLeft(Int), BottomRight(Int) + + var hashValue: Int { + switch self { + case let .TopLeft(radius): + return radius | (1 << 24) + case let .TopRight(radius): + return radius | (2 << 24) + case let .BottomLeft(radius): + return radius | (3 << 24) + case let .BottomRight(radius): + return radius | (2 << 24) + } + } + + var radius: Int { + switch self { + case let .TopLeft(radius): + return radius + case let .TopRight(radius): + return radius + case let .BottomLeft(radius): + return radius + case let .BottomRight(radius): + return radius + } + } +} + +private func ==(lhs: Corner, rhs: Corner) -> Bool { + switch lhs { + case let .TopLeft(lhsRadius): + switch rhs { + case let .TopLeft(rhsRadius) where rhsRadius == lhsRadius: + return true + default: + return false + } + case let .TopRight(lhsRadius): + switch rhs { + case let .TopRight(rhsRadius) where rhsRadius == lhsRadius: + return true + default: + return false + } + case let .BottomLeft(lhsRadius): + switch rhs { + case let .BottomLeft(rhsRadius) where rhsRadius == lhsRadius: + return true + default: + return false + } + case let .BottomRight(lhsRadius): + switch rhs { + case let .BottomRight(rhsRadius) where rhsRadius == lhsRadius: + return true + default: + return false + } + } +} + +private enum Tail: Hashable { + case BottomLeft(Int) + case BottomRight(Int) + + var hashValue: Int { + switch self { + case let .BottomLeft(radius): + return radius | (1 << 24) + case let .BottomRight(radius): + return radius | (2 << 24) + } + } + + var radius: Int { + switch self { + case let .BottomLeft(radius): + return radius + case let .BottomRight(radius): + return radius + } + } +} + +private func ==(lhs: Tail, rhs: Tail) -> Bool { + switch lhs { + case let .BottomLeft(lhsRadius): + switch rhs { + case let .BottomLeft(rhsRadius) where rhsRadius == lhsRadius: + return true + default: + return false + } + case let .BottomRight(lhsRadius): + switch rhs { + case let .BottomRight(rhsRadius) where rhsRadius == lhsRadius: + return true + default: + return false + } + } +} + +private var cachedCorners: [Corner: DrawingContext] = [:] +private let cachedCornersLock = SwiftSignalKit.Lock() +private var cachedTails: [Tail: DrawingContext] = [:] +private let cachedTailsLock = SwiftSignalKit.Lock() + +private func cornerContext(_ corner: Corner) -> DrawingContext { + var cached: DrawingContext? + cachedCornersLock.locked { + cached = cachedCorners[corner] + } + + if let cached = cached { + return cached + } else { + let context = DrawingContext(size: CGSize(width: CGFloat(corner.radius), height: CGFloat(corner.radius)), clear: true) + + context.withContext { c in + c.setBlendMode(.copy) + c.setFillColor(UIColor.black.cgColor) + let rect: CGRect + switch corner { + case let .TopLeft(radius): + rect = CGRect(origin: CGPoint(), size: CGSize(width: CGFloat(radius << 1), height: CGFloat(radius << 1))) + case let .TopRight(radius): + rect = CGRect(origin: CGPoint(x: -CGFloat(radius), y: 0.0), size: CGSize(width: CGFloat(radius << 1), height: CGFloat(radius << 1))) + case let .BottomLeft(radius): + rect = CGRect(origin: CGPoint(x: 0.0, y: -CGFloat(radius)), size: CGSize(width: CGFloat(radius << 1), height: CGFloat(radius << 1))) + case let .BottomRight(radius): + rect = CGRect(origin: CGPoint(x: -CGFloat(radius), y: -CGFloat(radius)), size: CGSize(width: CGFloat(radius << 1), height: CGFloat(radius << 1))) + } + c.fillEllipse(in: rect) + } + + cachedCornersLock.locked { + cachedCorners[corner] = context + } + return context + } +} + +private func tailContext(_ tail: Tail) -> DrawingContext { + var cached: DrawingContext? + cachedTailsLock.locked { + cached = cachedTails[tail] + } + + if let cached = cached { + return cached + } else { + let context = DrawingContext(size: CGSize(width: CGFloat(tail.radius) + 3.0, height: CGFloat(tail.radius)), clear: true) + + context.withContext { c in + c.setBlendMode(.copy) + c.setFillColor(UIColor.black.cgColor) + let rect: CGRect + switch tail { + case let .BottomLeft(radius): + rect = CGRect(origin: CGPoint(x: 3.0, y: -CGFloat(radius)), size: CGSize(width: CGFloat(radius << 1), height: CGFloat(radius << 1))) + + c.move(to: CGPoint(x: 3.0, y: 0.0)) + c.addLine(to: CGPoint(x: 3.0, y: 8.7)) + c.addLine(to: CGPoint(x: 2.0, y: 11.7)) + c.addLine(to: CGPoint(x: 1.5, y: 12.7)) + c.addLine(to: CGPoint(x: 0.8, y: 13.7)) + c.addLine(to: CGPoint(x: 0.2, y: 14.4)) + c.addLine(to: CGPoint(x: 3.5, y: 13.8)) + c.addLine(to: CGPoint(x: 5.0, y: 13.2)) + c.addLine(to: CGPoint(x: 3.0 + CGFloat(radius) - 9.5, y: 11.5)) + c.closePath() + c.fillPath() + case let .BottomRight(radius): + rect = CGRect(origin: CGPoint(x: -CGFloat(radius) + 3.0, y: -CGFloat(radius)), size: CGSize(width: CGFloat(radius << 1), height: CGFloat(radius << 1))) + + /*CGContextMoveToPoint(c, 3.0, 0.0) + CGContextAddLineToPoint(c, 3.0, 8.7) + CGContextAddLineToPoint(c, 2.0, 11.7) + CGContextAddLineToPoint(c, 1.5, 12.7) + CGContextAddLineToPoint(c, 0.8, 13.7) + CGContextAddLineToPoint(c, 0.2, 14.4) + CGContextAddLineToPoint(c, 3.5, 13.8) + CGContextAddLineToPoint(c, 5.0, 13.2) + CGContextAddLineToPoint(c, 3.0 + CGFloat(radius) - 9.5, 11.5) + CGContextClosePath(c) + CGContextFillPath(c)*/ + } + c.fillEllipse(in: rect) + } + + cachedCornersLock.locked { + cachedTails[tail] = context + } + return context + } +} + +private func addCorners(_ context: DrawingContext, arguments: TransformImageArguments) { + let corners = arguments.corners + let drawingRect = arguments.drawingRect + + if case let .Corner(radius) = corners.topLeft, radius > CGFloat(FLT_EPSILON) { + let corner = cornerContext(.TopLeft(Int(radius))) + context.blt(corner, at: CGPoint(x: drawingRect.minX, y: drawingRect.minY)) + } + + if case let .Corner(radius) = corners.topRight, radius > CGFloat(FLT_EPSILON) { + let corner = cornerContext(.TopRight(Int(radius))) + context.blt(corner, at: CGPoint(x: drawingRect.maxX - radius, y: drawingRect.minY)) + } + + switch corners.bottomLeft { + case let .Corner(radius): + if radius > CGFloat(FLT_EPSILON) { + let corner = cornerContext(.BottomLeft(Int(radius))) + context.blt(corner, at: CGPoint(x: drawingRect.minX, y: drawingRect.maxY - radius)) + } + case let .Tail(radius): + if radius > CGFloat(FLT_EPSILON) { + let tail = tailContext(.BottomLeft(Int(radius))) + let color = context.colorAt(CGPoint(x: drawingRect.minX, y: drawingRect.maxY - 1.0)) + context.withContext { c in + c.setFillColor(color.cgColor) + c.fill(CGRect(x: 0.0, y: drawingRect.maxY - 6.0, width: 3.0, height: 6.0)) + } + context.blt(tail, at: CGPoint(x: drawingRect.minX - 3.0, y: drawingRect.maxY - radius)) + } + + } + + switch corners.bottomRight { + case let .Corner(radius): + if radius > CGFloat(FLT_EPSILON) { + let corner = cornerContext(.BottomRight(Int(radius))) + context.blt(corner, at: CGPoint(x: drawingRect.maxX - radius, y: drawingRect.maxY - radius)) + } + case let .Tail(radius): + if radius > CGFloat(FLT_EPSILON) { + let tail = tailContext(.BottomRight(Int(radius))) + context.blt(tail, at: CGPoint(x: drawingRect.maxX - radius - 3.0, y: drawingRect.maxY - radius)) + } + } +} + +func chatMessagePhoto(account: Account, photo: TelegramMediaImage) -> Signal<(TransformImageArguments) -> DrawingContext, NoError> { + let signal = chatMessagePhotoDatas(account: account, photo: photo) + + return signal |> map { (thumbnailData, fullSizeData, fullTotalSize) in + return { arguments in + assertNotOnMainThread() + let context = DrawingContext(size: arguments.drawingSize, clear: true) + + let drawingRect = arguments.drawingRect + let fittedSize = arguments.imageSize.aspectFilled(arguments.boundingSize).fitted(arguments.imageSize) + let fittedRect = CGRect(origin: CGPoint(x: drawingRect.origin.x + (drawingRect.size.width - fittedSize.width) / 2.0, y: drawingRect.origin.y + (drawingRect.size.height - fittedSize.height) / 2.0), size: fittedSize) + + var fullSizeImage: CGImage? + if let fullSizeData = fullSizeData { + if fullSizeData.count >= fullTotalSize { + let options = NSMutableDictionary() + options.setValue(max(fittedSize.width * context.scale, fittedSize.height * context.scale) as NSNumber, forKey: kCGImageSourceThumbnailMaxPixelSize as String) + options.setValue(true as NSNumber, forKey: kCGImageSourceCreateThumbnailFromImageAlways as String) + if let imageSource = CGImageSourceCreateWithData(fullSizeData as CFData, nil), let image = CGImageSourceCreateThumbnailAtIndex(imageSource, 0, options as CFDictionary) { + fullSizeImage = image + } + } else { + let imageSource = CGImageSourceCreateIncremental(nil) + CGImageSourceUpdateData(imageSource, fullSizeData as CFData, fullSizeData.count >= fullTotalSize) + + let options = NSMutableDictionary() + options[kCGImageSourceShouldCache as NSString] = false as NSNumber + if let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + fullSizeImage = image + } + } + } + + var thumbnailImage: CGImage? + if let thumbnailData = thumbnailData, let imageSource = CGImageSourceCreateWithData(thumbnailData as CFData, nil), let image = CGImageSourceCreateImageAtIndex(imageSource, 0, nil) { + thumbnailImage = image + } + + var blurredThumbnailImage: UIImage? + if let thumbnailImage = thumbnailImage { + let thumbnailSize = CGSize(width: thumbnailImage.width, height: thumbnailImage.height) + let thumbnailContextSize = thumbnailSize.aspectFitted(CGSize(width: 150.0, height: 150.0)) + let thumbnailContext = DrawingContext(size: thumbnailContextSize, scale: 1.0) + thumbnailContext.withFlippedContext { c in + c.interpolationQuality = .none + c.draw(thumbnailImage, in: CGRect(origin: CGPoint(), size: thumbnailContextSize)) + } + telegramFastBlur(Int32(thumbnailContextSize.width), Int32(thumbnailContextSize.height), Int32(thumbnailContext.bytesPerRow), thumbnailContext.bytes) + + blurredThumbnailImage = thumbnailContext.generateImage() + } + + context.withFlippedContext { c in + c.setBlendMode(.copy) + if arguments.boundingSize != arguments.imageSize { + c.fill(arguments.drawingRect) + } + + c.setBlendMode(.copy) + if let blurredThumbnailImage = blurredThumbnailImage, let cgImage = blurredThumbnailImage.cgImage { + c.interpolationQuality = .low + c.draw(cgImage, in: fittedRect) + } + + if let fullSizeImage = fullSizeImage { + c.setBlendMode(.normal) + c.interpolationQuality = .medium + c.draw(fullSizeImage, in: fittedRect) + } + } + + addCorners(context, arguments: arguments) + + return context + } + } +} + +func chatMessagePhotoStatus(account: Account, photo: TelegramMediaImage) -> Signal { + if let largestRepresentation = largestRepresentationForPhoto(photo), let largestSize = largestRepresentation.size { + let fullSizeResource = CloudFileMediaResource(location: largestRepresentation.location, size: largestSize) + return account.postbox.mediaBox.resourceStatus(fullSizeResource) + } else { + return .never() + } +} + +func chatMessagePhotoInteractiveFetched(account: Account, photo: TelegramMediaImage) -> Signal { + if let largestRepresentation = largestRepresentationForPhoto(photo), let largestSize = largestRepresentation.size { + let fullSizeResource = CloudFileMediaResource(location: largestRepresentation.location, size: largestSize) + return account.postbox.mediaBox.fetchedResource(fullSizeResource) + } else { + return .never() + } +} + +func chatMessagePhotoCancelInteractiveFetch(account: Account, photo: TelegramMediaImage) { + if let largestRepresentation = largestRepresentationForPhoto(photo), let largestSize = largestRepresentation.size { + let fullSizeResource = CloudFileMediaResource(location: largestRepresentation.location, size: largestSize) + return account.postbox.mediaBox.cancelInteractiveResourceFetch(fullSizeResource) + } +} + +func chatWebpageSnippetPhotoData(account: Account, photo: TelegramMediaImage) -> Signal { + if let closestRepresentation = photo.representationForDisplayAtSize(CGSize(width: 120.0, height: 120.0)) { + let resource = CloudFileMediaResource(location: closestRepresentation.location, size: closestRepresentation.size ?? 0) + let resourceData = account.postbox.mediaBox.resourceData(resource) |> map { next in + return next.size == 0 ? nil : try? Data(contentsOf: URL(fileURLWithPath: next.path), options: .mappedIfSafe) + } + + return Signal { subscriber in + let disposable = DisposableSet() + disposable.add(resourceData.start(next: { data in + subscriber.putNext(data) + }, error: { error in + subscriber.putError(error) + }, completed: { + subscriber.putCompletion() + })) + disposable.add(account.postbox.mediaBox.fetchedResource(resource).start()) + return disposable + } + } else { + return .never() + } +} + +func chatWebpageSnippetPhoto(account: Account, photo: TelegramMediaImage) -> Signal<(TransformImageArguments) -> DrawingContext, NoError> { + let signal = chatWebpageSnippetPhotoData(account: account, photo: photo) + + return signal |> map { fullSizeData in + return { arguments in + assertNotOnMainThread() + let context = DrawingContext(size: arguments.drawingSize, clear: true) + + let drawingRect = arguments.drawingRect + let fittedSize = arguments.imageSize.aspectFilled(arguments.boundingSize).fitted(arguments.imageSize) + let fittedRect = CGRect(origin: CGPoint(x: drawingRect.origin.x + (drawingRect.size.width - fittedSize.width) / 2.0, y: drawingRect.origin.y + (drawingRect.size.height - fittedSize.height) / 2.0), size: fittedSize) + + var fullSizeImage: CGImage? + if let fullSizeData = fullSizeData { + let options = NSMutableDictionary() + options.setValue(max(fittedSize.width * context.scale, fittedSize.height * context.scale) as NSNumber, forKey: kCGImageSourceThumbnailMaxPixelSize as String) + options.setValue(true as NSNumber, forKey: kCGImageSourceCreateThumbnailFromImageAlways as String) + if let imageSource = CGImageSourceCreateWithData(fullSizeData as CFData, nil), let image = CGImageSourceCreateThumbnailAtIndex(imageSource, 0, options as CFDictionary) { + fullSizeImage = image + } + } + + context.withFlippedContext { c in + c.setBlendMode(.copy) + if arguments.boundingSize.width > arguments.imageSize.width || arguments.boundingSize.height > arguments.imageSize.height { + c.fill(arguments.drawingRect) + } + + if let fullSizeImage = fullSizeImage { + c.interpolationQuality = .medium + c.draw(fullSizeImage, in: fittedRect) + } + } + + addCorners(context, arguments: arguments) + + return context + } + } +} + +func chatMessageVideo(account: Account, video: TelegramMediaFile) -> Signal<(TransformImageArguments) -> DrawingContext, NoError> { + let signal = chatMessageFileDatas(account: account, file: video) + + return signal |> map { (thumbnailData, fullSizeDataAndPath, fullTotalSize) in + return { arguments in + assertNotOnMainThread() + let context = DrawingContext(size: arguments.drawingSize, clear: true) + if arguments.drawingSize.width.isLessThanOrEqualTo(0.0) || arguments.drawingSize.height.isLessThanOrEqualTo(0.0) { + return context + } + + let drawingRect = arguments.drawingRect + let fittedSize = arguments.imageSize.aspectFilled(arguments.boundingSize).fitted(arguments.imageSize) + let fittedRect = CGRect(origin: CGPoint(x: drawingRect.origin.x + (drawingRect.size.width - fittedSize.width) / 2.0, y: drawingRect.origin.y + (drawingRect.size.height - fittedSize.height) / 2.0), size: fittedSize) + + var fullSizeImage: CGImage? + if let fullSizeDataAndPath = fullSizeDataAndPath { + if fullSizeDataAndPath.0.count >= fullTotalSize { + if video.mimeType.hasPrefix("video/") { + let tempFilePath = NSTemporaryDirectory() + "\(arc4random()).mov" + + _ = try? FileManager.default.removeItem(atPath: tempFilePath) + _ = try? FileManager.default.linkItem(atPath: fullSizeDataAndPath.1, toPath: tempFilePath) + + let asset = AVAsset(url: URL(fileURLWithPath: tempFilePath)) + let imageGenerator = AVAssetImageGenerator(asset: asset) + imageGenerator.maximumSize = CGSize(width: 800.0, height: 800.0) + imageGenerator.appliesPreferredTrackTransform = true + if let image = try? imageGenerator.copyCGImage(at: CMTime(seconds: 0.0, preferredTimescale: asset.duration.timescale), actualTime: nil) { + fullSizeImage = image + } + } + /*let options: [NSString: NSObject] = [ + kCGImageSourceThumbnailMaxPixelSize: max(fittedSize.width * context.scale, fittedSize.height * context.scale), + kCGImageSourceCreateThumbnailFromImageAlways: true + ] + if let imageSource = CGImageSourceCreateWithData(fullSizeData, nil), image = CGImageSourceCreateThumbnailAtIndex(imageSource, 0, options) { + fullSizeImage = image + }*/ + } else { + /*let imageSource = CGImageSourceCreateIncremental(nil) + CGImageSourceUpdateData(imageSource, fullSizeData as CFDataRef, fullSizeData.length >= fullTotalSize) + + var options: [NSString : NSObject!] = [:] + options[kCGImageSourceShouldCache as NSString] = false as NSNumber + if let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionaryRef) { + fullSizeImage = image + }*/ + } + } + + var thumbnailImage: CGImage? + if let thumbnailData = thumbnailData, let imageSource = CGImageSourceCreateWithData(thumbnailData as CFData, nil), let image = CGImageSourceCreateImageAtIndex(imageSource, 0, nil) { + thumbnailImage = image + } + + var blurredThumbnailImage: UIImage? + if let thumbnailImage = thumbnailImage { + let thumbnailSize = CGSize(width: thumbnailImage.width, height: thumbnailImage.height) + let thumbnailContextSize = thumbnailSize.aspectFitted(CGSize(width: 150.0, height: 150.0)) + let thumbnailContext = DrawingContext(size: thumbnailContextSize, scale: 1.0) + thumbnailContext.withFlippedContext { c in + c.interpolationQuality = .none + c.draw(thumbnailImage, in: CGRect(origin: CGPoint(), size: thumbnailContextSize)) + } + telegramFastBlur(Int32(thumbnailContextSize.width), Int32(thumbnailContextSize.height), Int32(thumbnailContext.bytesPerRow), thumbnailContext.bytes) + + blurredThumbnailImage = thumbnailContext.generateImage() + } + + context.withFlippedContext { c in + c.setBlendMode(.copy) + if arguments.boundingSize != arguments.imageSize { + c.fill(arguments.drawingRect) + } + + c.setBlendMode(.copy) + if let blurredThumbnailImage = blurredThumbnailImage, let cgImage = blurredThumbnailImage.cgImage { + c.interpolationQuality = .low + c.draw(cgImage, in: fittedRect) + } + + if let fullSizeImage = fullSizeImage { + c.setBlendMode(.normal) + c.interpolationQuality = .medium + c.draw(fullSizeImage, in: fittedRect) + } + } + + addCorners(context, arguments: arguments) + + return context + } + } +} + +func chatMessageImageFile(account: Account, file: TelegramMediaFile, progressive: Bool = false) -> Signal<(TransformImageArguments) -> DrawingContext, NoError> { + let signal = chatMessageFileDatas(account: account, file: file, progressive: progressive) + + return signal |> map { (thumbnailData, fullSizeDataAndPath, fullTotalSize) in + return { arguments in + assertNotOnMainThread() + let context = DrawingContext(size: arguments.drawingSize, clear: true) + + let drawingRect = arguments.drawingRect + let fittedSize = arguments.imageSize.aspectFilled(arguments.boundingSize).fitted(arguments.imageSize) + let fittedRect = CGRect(origin: CGPoint(x: drawingRect.origin.x + (drawingRect.size.width - fittedSize.width) / 2.0, y: drawingRect.origin.y + (drawingRect.size.height - fittedSize.height) / 2.0), size: fittedSize) + + var fullSizeImage: CGImage? + if let fullSizeDataAndPath = fullSizeDataAndPath { + if fullSizeDataAndPath.0.count >= fullTotalSize { + let options = NSMutableDictionary() + options.setValue(max(fittedSize.width * context.scale, fittedSize.height * context.scale) as NSNumber, forKey: kCGImageSourceThumbnailMaxPixelSize as String) + options.setValue(true as NSNumber, forKey: kCGImageSourceCreateThumbnailFromImageAlways as String) + if let imageSource = CGImageSourceCreateWithData(fullSizeDataAndPath.0 as CFData, nil), let image = CGImageSourceCreateThumbnailAtIndex(imageSource, 0, options) { + fullSizeImage = image + } + } else if progressive { + let imageSource = CGImageSourceCreateIncremental(nil) + CGImageSourceUpdateData(imageSource, fullSizeDataAndPath.0 as CFData, fullSizeDataAndPath.0.count >= fullTotalSize) + + let options = NSMutableDictionary() + options[kCGImageSourceShouldCache as NSString] = false as NSNumber + if let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + fullSizeImage = image + } + } + } + + var thumbnailImage: CGImage? + if let thumbnailData = thumbnailData, let imageSource = CGImageSourceCreateWithData(thumbnailData as CFData, nil), let image = CGImageSourceCreateImageAtIndex(imageSource, 0, nil) { + thumbnailImage = image + } + + var blurredThumbnailImage: UIImage? + if let thumbnailImage = thumbnailImage { + let thumbnailSize = CGSize(width: thumbnailImage.width, height: thumbnailImage.height) + let thumbnailContextSize = thumbnailSize.aspectFitted(CGSize(width: 150.0, height: 150.0)) + let thumbnailContext = DrawingContext(size: thumbnailContextSize, scale: 1.0) + thumbnailContext.withFlippedContext { c in + c.interpolationQuality = .none + c.draw(thumbnailImage, in: CGRect(origin: CGPoint(), size: thumbnailContextSize)) + } + telegramFastBlur(Int32(thumbnailContextSize.width), Int32(thumbnailContextSize.height), Int32(thumbnailContext.bytesPerRow), thumbnailContext.bytes) + + blurredThumbnailImage = thumbnailContext.generateImage() + } + + context.withFlippedContext { c in + c.setBlendMode(.copy) + if arguments.boundingSize != arguments.imageSize { + c.fill(arguments.drawingRect) + } + + c.setBlendMode(.copy) + if let blurredThumbnailImage = blurredThumbnailImage, let cgImage = blurredThumbnailImage.cgImage { + c.interpolationQuality = .low + c.draw(cgImage, in: fittedRect) + } + + if let fullSizeImage = fullSizeImage { + c.setBlendMode(.normal) + c.interpolationQuality = .medium + c.draw(fullSizeImage, in: fittedRect) + } + } + + addCorners(context, arguments: arguments) + + return context + } + } +} + +func chatMessageFileStatus(account: Account, file: TelegramMediaFile) -> Signal { + let fullSizeResource = CloudFileMediaResource(location: file.location, size: file.size) + return account.postbox.mediaBox.resourceStatus(fullSizeResource) +} + +func chatMessageFileInteractiveFetched(account: Account, file: TelegramMediaFile) -> Signal { + let fullSizeResource = CloudFileMediaResource(location: file.location, size: file.size) + return account.postbox.mediaBox.fetchedResource(fullSizeResource) +} + +func chatMessageFileCancelInteractiveFetch(account: Account, file: TelegramMediaFile) { + let fullSizeResource = CloudFileMediaResource(location: file.location, size: file.size) + return account.postbox.mediaBox.cancelInteractiveResourceFetch(fullSizeResource) +} diff --git a/TelegramUI/ProgressiveImage.swift b/TelegramUI/ProgressiveImage.swift new file mode 100644 index 0000000000..328f420a84 --- /dev/null +++ b/TelegramUI/ProgressiveImage.swift @@ -0,0 +1,52 @@ +import Foundation +import UIKit +import SwiftSignalKit +import Display +import ImageIO + +public final class ProgressiveImage { + let backgroundImage: UIImage? + let image: UIImage? + + public init(backgroundImage: UIImage?, image: UIImage?) { + self.backgroundImage = backgroundImage + self.image = image + } +} + +public func progressiveImage(dataSignal: Signal, size: Int, mapping: @escaping (CGImage) -> UIImage) -> Signal { + return Signal { subscriber in + let imageSource = CGImageSourceCreateIncremental(nil) + var lastSize = 0 + + return dataSignal.start(next: { data in + if let data = data { + if data.count >= lastSize + 24 * 1024 || (lastSize != data.count && data.count >= size) { + lastSize = data.count + + let copyData = data.withUnsafeBytes { bytes -> CFData in + return CFDataCreate(nil, bytes, data.count) + } + CGImageSourceUpdateData(imageSource, copyData, data.count >= size) + + let options = NSMutableDictionary() + options[kCGImageSourceShouldCache as NSString] = false as NSNumber + if let image = CGImageSourceCreateImageAtIndex(imageSource, 0, options as CFDictionary) { + subscriber.putNext(mapping(image)) + } else { + subscriber.putNext(nil) + } + if data.count >= size { + subscriber.putCompletion() + } + } + } else { + subscriber.putNext(nil) + } + }, error: { error in + subscriber.putError(error) + }, completed: { + subscriber.putCompletion() + }) + } +} diff --git a/TelegramUI/RadialProgressNode.swift b/TelegramUI/RadialProgressNode.swift new file mode 100644 index 0000000000..5fe5fa8c70 --- /dev/null +++ b/TelegramUI/RadialProgressNode.swift @@ -0,0 +1,254 @@ +import Foundation +import AsyncDisplayKit +import SwiftSignalKit +import Display + +private class RadialProgressParameters: NSObject { + let theme: RadialProgressTheme + let diameter: CGFloat + let state: RadialProgressState + + init(theme: RadialProgressTheme, diameter: CGFloat, state: RadialProgressState) { + self.theme = theme + self.diameter = diameter + self.state = state + + super.init() + } +} + +private class RadialProgressOverlayParameters: NSObject { + let theme: RadialProgressTheme + let diameter: CGFloat + let state: RadialProgressState + + init(theme: RadialProgressTheme, diameter: CGFloat, state: RadialProgressState) { + self.theme = theme + self.diameter = diameter + self.state = state + + super.init() + } +} + +private class RadialProgressOverlayNode: ASDisplayNode { + let theme: RadialProgressTheme + + var state: RadialProgressState = .None { + didSet { + self.setNeedsDisplay() + } + } + + init(theme: RadialProgressTheme) { + self.theme = theme + + super.init() + + self.isOpaque = false + } + + override func drawParameters(forAsyncLayer layer: _ASDisplayLayer) -> NSObjectProtocol? { + return RadialProgressOverlayParameters(theme: self.theme, diameter: self.frame.size.width, state: self.state) + } + + @objc override class func draw(_ bounds: CGRect, withParameters parameters: NSObjectProtocol!, isCancelled: asdisplaynode_iscancelled_block_t, isRasterizing: Bool) { + let context = UIGraphicsGetCurrentContext()! + + if !isRasterizing { + context.setBlendMode(.copy) + context.setFillColor(UIColor.clear.cgColor) + context.fill(bounds) + } + + if let parameters = parameters as? RadialProgressOverlayParameters { + context.setStrokeColor(parameters.theme.foregroundColor.cgColor) + //CGContextSetLineWidth(context, 2.5) + //CGContextSetLineCap(context, .Round) + + switch parameters.state { + case .None, .Remote, .Play: + break + case let .Fetching(progress): + let startAngle = -CGFloat(M_PI_2) + let endAngle = 2.0 * (CGFloat(M_PI)) * CGFloat(progress) - CGFloat(M_PI_2) + + let pathDiameter = parameters.diameter - 2.25 - 2.5 * 2.0 + + let path = UIBezierPath(arcCenter: CGPoint(x: parameters.diameter / 2.0, y: parameters.diameter / 2.0), radius: pathDiameter / 2.0, startAngle: startAngle, endAngle:endAngle, clockwise:true) + path.lineWidth = 2.25; + path.lineCapStyle = .round; + path.stroke() + } + } + } + + override func willEnterHierarchy() { + super.willEnterHierarchy() + + let basicAnimation = CABasicAnimation(keyPath: "transform.rotation.z") + basicAnimation.timingFunction = CAMediaTimingFunction(name: kCAMediaTimingFunctionEaseInEaseOut) + basicAnimation.duration = 2.0 + basicAnimation.fromValue = NSNumber(value: Float(0.0)) + basicAnimation.toValue = NSNumber(value: Float(M_PI * 2.0)) + basicAnimation.repeatCount = Float.infinity + basicAnimation.timingFunction = CAMediaTimingFunction(name: kCAMediaTimingFunctionLinear) + + self.layer.add(basicAnimation, forKey: "progressRotation") + } + + override func didExitHierarchy() { + super.didExitHierarchy() + + self.layer.removeAnimation(forKey: "progressRotation") + } +} + +public enum RadialProgressState { + case None + case Remote + case Fetching(progress: Float) + case Play +} + +public struct RadialProgressTheme { + public let backgroundColor: UIColor + public let foregroundColor: UIColor + public let icon: UIImage? +} + +class RadialProgressNode: ASControlNode { + private let theme: RadialProgressTheme + private let overlay: RadialProgressOverlayNode + + var state: RadialProgressState = .None { + didSet { + self.overlay.state = self.state + if case .Fetching = self.state { + if self.overlay.supernode == nil { + self.addSubnode(self.overlay) + } + } else { + if self.overlay.supernode != nil { + self.overlay.removeFromSupernode() + } + } + switch oldValue { + case .Fetching: + switch self.state { + case .Fetching: + break + default: + self.setNeedsDisplay() + } + case .Remote: + switch self.state { + case .Remote: + break + default: + self.setNeedsDisplay() + } + case .None: + switch self.state { + case .None: + break + default: + self.setNeedsDisplay() + } + case .Play: + switch self.state { + case .Play: + break + default: + self.setNeedsDisplay() + } + } + } + } + + convenience override init() { + self.init(theme: RadialProgressTheme(backgroundColor: UIColor(white: 0.0, alpha: 0.6), foregroundColor: UIColor.white, icon: nil)) + } + + init(theme: RadialProgressTheme) { + self.theme = theme + self.overlay = RadialProgressOverlayNode(theme: theme) + + super.init() + + self.isOpaque = false + } + + override var frame: CGRect { + get { + return super.frame + } set(value) { + let redraw = value.size != self.frame.size + super.frame = value + + if redraw { + self.overlay.frame = CGRect(origin: CGPoint(), size: value.size) + self.setNeedsDisplay() + self.overlay.setNeedsDisplay() + } + } + } + + override func drawParameters(forAsyncLayer layer: _ASDisplayLayer) -> NSObjectProtocol? { + return RadialProgressParameters(theme: self.theme, diameter: self.frame.size.width, state: self.state) + } + + @objc override class func draw(_ bounds: CGRect, withParameters parameters: NSObjectProtocol!, isCancelled: asdisplaynode_iscancelled_block_t, isRasterizing: Bool) { + let context = UIGraphicsGetCurrentContext()! + + if !isRasterizing { + context.setBlendMode(.copy) + context.setFillColor(UIColor.clear.cgColor) + context.fill(bounds) + } + + if let parameters = parameters as? RadialProgressParameters { + context.setFillColor(parameters.theme.backgroundColor.cgColor) + context.fillEllipse(in: CGRect(origin: CGPoint(), size: CGSize(width: parameters.diameter, height: parameters.diameter))) + + switch parameters.state { + case .None: + break + case .Fetching: + context.setStrokeColor(parameters.theme.foregroundColor.cgColor) + context.setLineWidth(2.0) + context.setLineCap(.round) + + let crossSize: CGFloat = 14.0 + context.move(to: CGPoint(x: parameters.diameter / 2.0 - crossSize / 2.0, y: parameters.diameter / 2.0 - crossSize / 2.0)) + context.addLine(to: CGPoint(x: parameters.diameter / 2.0 + crossSize / 2.0, y: parameters.diameter / 2.0 + crossSize / 2.0)) + context.strokePath() + context.move(to: CGPoint(x: parameters.diameter / 2.0 + crossSize / 2.0, y: parameters.diameter / 2.0 - crossSize / 2.0)) + context.addLine(to: CGPoint(x: parameters.diameter / 2.0 - crossSize / 2.0, y: parameters.diameter / 2.0 + crossSize / 2.0)) + context.strokePath() + case .Remote: + context.setStrokeColor(parameters.theme.foregroundColor.cgColor) + context.setLineWidth(2.0) + context.setLineCap(.round) + context.setLineJoin(.round) + + let arrowHeadSize: CGFloat = 15.0 + let arrowLength: CGFloat = 18.0 + let arrowHeadOffset: CGFloat = 1.0 + + context.move(to: CGPoint(x: parameters.diameter / 2.0, y: parameters.diameter / 2.0 - arrowLength / 2.0 + arrowHeadOffset)) + context.addLine(to: CGPoint(x: parameters.diameter / 2.0, y: parameters.diameter / 2.0 + arrowLength / 2.0 - 1.0 + arrowHeadOffset)) + context.strokePath() + + context.move(to: CGPoint(x: parameters.diameter / 2.0 - arrowHeadSize / 2.0, y: parameters.diameter / 2.0 + arrowLength / 2.0 - arrowHeadSize / 2.0 + arrowHeadOffset)) + context.addLine(to: CGPoint(x: parameters.diameter / 2.0, y: parameters.diameter / 2.0 + arrowLength / 2.0 + arrowHeadOffset)) + context.addLine(to: CGPoint(x: parameters.diameter / 2.0 + arrowHeadSize / 2.0, y: parameters.diameter / 2.0 + arrowLength / 2.0 - arrowHeadSize / 2.0 + arrowHeadOffset)) + context.strokePath() + case .Play: + if let icon = parameters.theme.icon { + icon.draw(at: CGPoint(x: floor((parameters.diameter - icon.size.width) / 2.0), y: floor((parameters.diameter - icon.size.height) / 2.0))) + } + } + } + } +} diff --git a/TelegramUI/ResizeableTextInputView.swift b/TelegramUI/ResizeableTextInputView.swift new file mode 100644 index 0000000000..541643afb7 --- /dev/null +++ b/TelegramUI/ResizeableTextInputView.swift @@ -0,0 +1,112 @@ +import Foundation +import UIKit +import Display + +class ResizeableTextInputViewImpl: UITextView { + override func setContentOffset(_ contentOffset: CGPoint, animated: Bool) { + super.setContentOffset(contentOffset, animated: false) + } +} + +class ResizeableTextInputView: UIView, UITextViewDelegate { + let textView: ResizeableTextInputViewImpl + let shadowTextView: ResizeableTextInputViewImpl + let placeholderLabel: UILabel + var updateHeight: () -> Void = { } + var maxHeightForLines: CGFloat + var heightForSingleLine: CGFloat + let insets = UIEdgeInsets(top: 2.0, left: 0.0, bottom: 4.0, right: 0.0) + + var placeholder: String { + get { + return self.placeholderLabel.text ?? "" + } set(value) { + self.placeholderLabel.text = value + self.placeholderLabel.sizeToFit() + let placeholderSize = self.placeholderLabel.bounds.size + self.placeholderLabel.frame = CGRect(x: 2.0, y: self.insets.top, width: placeholderSize.width, height: placeholderSize.height) + } + } + + init() { + self.textView = ResizeableTextInputViewImpl() + self.textView.layoutManager.allowsNonContiguousLayout = true + self.textView.textContainerInset = UIEdgeInsets(top: 0.0, left: self.insets.left, bottom: 0.0, right: self.insets.right) + self.textView.backgroundColor = UIColor.clear + self.textView.textColor = UIColor.black + self.textView.isOpaque = false + self.textView.font = Font.regular(16.0) + + self.shadowTextView = ResizeableTextInputViewImpl() + self.shadowTextView.font = self.textView.font + self.shadowTextView.textContainerInset = self.textView.textContainerInset + self.shadowTextView.layoutManager.allowsNonContiguousLayout = true + self.shadowTextView.frame = CGRect(x: 0.0, y: 0.0, width: 100.0, height: CGFloat.greatestFiniteMagnitude) + + self.shadowTextView.text = "A" + self.shadowTextView.layoutManager.ensureLayout(for: shadowTextView.textContainer) + let singleLineHeight = ceil(shadowTextView.layoutManager.usedRect(for: shadowTextView.textContainer).size.height) + self.heightForSingleLine = singleLineHeight + 2.0 + self.insets.top + self.insets.bottom + + self.shadowTextView.text = "\n\n\n" + self.shadowTextView.layoutManager.ensureLayout(for: shadowTextView.textContainer) + let maxHeight = ceil(shadowTextView.layoutManager.usedRect(for: shadowTextView.textContainer).size.height) + self.maxHeightForLines = maxHeight + 2.0 + self.insets.top + self.insets.bottom + + self.placeholderLabel = UILabel() + + super.init(frame: CGRect()) + + self.clipsToBounds = true + + self.textView.delegate = self + self.addSubview(textView) + + self.placeholderLabel.font = self.textView.font + self.placeholderLabel.textColor = UIColor(0xbebec0) + self.addSubview(self.placeholderLabel) + } + + required init?(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + private func maxHeight() -> CGFloat { + return self.maxHeightForLines + } + + func calculateSizeThatFits(constrainedSize: CGSize) -> CGSize { + self.shadowTextView.frame = CGRect(x: 0.0, y: 0.0, width: constrainedSize.width + 4.0, height: CGFloat.greatestFiniteMagnitude) + self.shadowTextView.text = "\n" + //shadowTextView.layoutManager.ensureLayoutForTextContainer(shadowTextView.textContainer) + self.shadowTextView.text = textView.text + + shadowTextView.layoutManager.glyphRange(for: shadowTextView.textContainer) + let height = ceil(shadowTextView.layoutManager.usedRect(for: shadowTextView.textContainer).size.height) + + return CGSize(width: constrainedSize.width, height: min(height + 2.0 + self.insets.top + self.insets.bottom, self.maxHeight())) + } + + func textViewDidChange(_ textView: UITextView) { + self.placeholderLabel.isHidden = textView.text.startIndex != textView.text.endIndex + self.updateHeight() + } + + override var frame: CGRect { + get { + return super.frame + } set(value) { + super.frame = value + + let heightFix: CGFloat = 25.0 + self.textView.frame = CGRect(x: -4.0, y: -0.5, width: value.size.width + 4.0, height: value.size.height + heightFix - self.insets.bottom) + let distance = -(self.maxHeight() - self.textView.frame.size.height) + self.clipsToBounds = distance > 0.0 + self.textView.contentInset = UIEdgeInsets(top: 2.0 + self.insets.top, left: 0.0, bottom: max(0.0, distance) + self.insets.bottom, right: 0.0) + self.textView.scrollIndicatorInsets = UIEdgeInsets(top: 2.0 + self.insets.top, left: 0.0, bottom: max(0.0, distance) + self.insets.bottom, right: -2.0) + + let placeholderSize = self.placeholderLabel.bounds.size + self.placeholderLabel.frame = CGRect(x: 1.0, y: self.insets.top + 2.0, width: placeholderSize.width, height: placeholderSize.height) + } + } +} diff --git a/TelegramUI/RingBuffer.h b/TelegramUI/RingBuffer.h new file mode 100644 index 0000000000..46f07f9dfb --- /dev/null +++ b/TelegramUI/RingBuffer.h @@ -0,0 +1,140 @@ +#import + +#include +#include + +#ifdef __cplusplus +extern "C" { +#endif + + typedef struct { + void *buffer; + int32_t length; + int32_t tail; + int32_t head; + int32_t fillCount; + } TPCircularBuffer; + + /*! + * Initialise buffer + * + * Note that the length is advisory only: Because of the way the + * memory mirroring technique works, the true buffer length will + * be multiples of the device page size (e.g. 4096 bytes) + * + * If you intend to use the AudioBufferList utilities, you should + * always allocate a bit more space than you need for pure audio + * data, so there's room for the metadata. How much extra is required + * depends on how many AudioBufferList structures are used, which is + * a function of how many audio frames each buffer holds. A good rule + * of thumb is to add 15%, or at least another 2048 bytes or so. + * + * @param buffer Circular buffer + * @param length Length of buffer + */ + bool TPCircularBufferInit(TPCircularBuffer *buffer, int32_t length); + bool _TPCircularBufferInit(TPCircularBuffer *buffer, int32_t length, size_t structSize); + + /*! + * Cleanup buffer + * + * Releases buffer resources. + */ + void TPCircularBufferCleanup(TPCircularBuffer *buffer); + + /*! + * Clear buffer + * + * Resets buffer to original, empty state. + * + * This is safe for use by consumer while producer is accessing + * buffer. + */ + void TPCircularBufferClear(TPCircularBuffer *buffer); + + // Reading (consuming) + + /*! + * Access end of buffer + * + * This gives you a pointer to the end of the buffer, ready + * for reading, and the number of available bytes to read. + * + * @param buffer Circular buffer + * @param availableBytes On output, the number of bytes ready for reading + * @return Pointer to the first bytes ready for reading, or NULL if buffer is empty + */ + static __inline__ __attribute__((always_inline)) void* TPCircularBufferTail(TPCircularBuffer *buffer, int32_t* availableBytes) { + *availableBytes = buffer->fillCount; + if ( *availableBytes == 0 ) return NULL; + return (void*)((char*)buffer->buffer + buffer->tail); + } + + /*! + * Consume bytes in buffer + * + * This frees up the just-read bytes, ready for writing again. + * + * @param buffer Circular buffer + * @param amount Number of bytes to consume + */ + static __inline__ __attribute__((always_inline)) void TPCircularBufferConsume(TPCircularBuffer *buffer, int32_t amount) { + buffer->tail = (buffer->tail + amount) % buffer->length; + buffer->fillCount -= amount; + assert(buffer->fillCount >= 0); + } + + /*! + * Access front of buffer + * + * This gives you a pointer to the front of the buffer, ready + * for writing, and the number of available bytes to write. + * + * @param buffer Circular buffer + * @param availableBytes On output, the number of bytes ready for writing + * @return Pointer to the first bytes ready for writing, or NULL if buffer is full + */ + static __inline__ __attribute__((always_inline)) void* TPCircularBufferHead(TPCircularBuffer *buffer, int32_t* availableBytes) { + *availableBytes = (buffer->length - buffer->fillCount); + if ( *availableBytes == 0 ) return NULL; + return (void*)((char*)buffer->buffer + buffer->head); + } + + // Writing (producing) + + /*! + * Produce bytes in buffer + * + * This marks the given section of the buffer ready for reading. + * + * @param buffer Circular buffer + * @param amount Number of bytes to produce + */ + static __inline__ __attribute__((always_inline)) void TPCircularBufferProduce(TPCircularBuffer *buffer, int32_t amount) { + buffer->head = (buffer->head + amount) % buffer->length; + buffer->fillCount += amount; + assert(buffer->fillCount <= buffer->length); + } + + /*! + * Helper routine to copy bytes to buffer + * + * This copies the given bytes to the buffer, and marks them ready for reading. + * + * @param buffer Circular buffer + * @param src Source buffer + * @param len Number of bytes in source buffer + * @return true if bytes copied, false if there was insufficient space + */ + static __inline__ __attribute__((always_inline)) bool TPCircularBufferProduceBytes(TPCircularBuffer *buffer, const void* src, int32_t len) { + int32_t space; + void *ptr = TPCircularBufferHead(buffer, &space); + if ( space < len ) return false; + memcpy(ptr, src, len); + TPCircularBufferProduce(buffer, len); + return true; + } +#ifdef __cplusplus +} +#endif + diff --git a/TelegramUI/RingBuffer.m b/TelegramUI/RingBuffer.m new file mode 100644 index 0000000000..9c6db69b7a --- /dev/null +++ b/TelegramUI/RingBuffer.m @@ -0,0 +1,121 @@ +#import "RingBuffer.h" + +#include +#include +#include + +#define reportResult(result,operation) (_reportResult((result),(operation),strrchr(__FILE__, '/')+1,__LINE__)) +static inline bool _reportResult(kern_return_t result, const char *operation, const char* file, int line) { + if ( result != ERR_SUCCESS ) { + printf("%s:%d: %s: %s\n", file, line, operation, mach_error_string(result)); + return false; + } + return true; +} + +bool TPCircularBufferInit(TPCircularBuffer *buffer, int32_t length) { + return _TPCircularBufferInit(buffer, length, sizeof(TPCircularBuffer)); +} + +bool _TPCircularBufferInit(TPCircularBuffer *buffer, int32_t length, size_t structSize) { + + assert(length > 0); + + if ( structSize != sizeof(TPCircularBuffer) ) { + fprintf(stderr, "TPCircularBuffer: Header version mismatch. Check for old versions of TPCircularBuffer in your project\n"); + abort(); + } + + // Keep trying until we get our buffer, needed to handle race conditions + int retries = 3; + while ( true ) { + + buffer->length = (int32_t)round_page(length); // We need whole page sizes + + // Temporarily allocate twice the length, so we have the contiguous address space to + // support a second instance of the buffer directly after + vm_address_t bufferAddress; + kern_return_t result = vm_allocate(mach_task_self(), + &bufferAddress, + buffer->length * 2, + VM_FLAGS_ANYWHERE); // allocate anywhere it'll fit + if ( result != ERR_SUCCESS ) { + if ( retries-- == 0 ) { + reportResult(result, "Buffer allocation"); + return false; + } + // Try again if we fail + continue; + } + + // Now replace the second half of the allocation with a virtual copy of the first half. Deallocate the second half... + result = vm_deallocate(mach_task_self(), + bufferAddress + buffer->length, + buffer->length); + if ( result != ERR_SUCCESS ) { + if ( retries-- == 0 ) { + reportResult(result, "Buffer deallocation"); + return false; + } + // If this fails somehow, deallocate the whole region and try again + vm_deallocate(mach_task_self(), bufferAddress, buffer->length); + continue; + } + + // Re-map the buffer to the address space immediately after the buffer + vm_address_t virtualAddress = bufferAddress + buffer->length; + vm_prot_t cur_prot, max_prot; + result = vm_remap(mach_task_self(), + &virtualAddress, // mirror target + buffer->length, // size of mirror + 0, // auto alignment + 0, // force remapping to virtualAddress + mach_task_self(), // same task + bufferAddress, // mirror source + 0, // MAP READ-WRITE, NOT COPY + &cur_prot, // unused protection struct + &max_prot, // unused protection struct + VM_INHERIT_DEFAULT); + if ( result != ERR_SUCCESS ) { + if ( retries-- == 0 ) { + reportResult(result, "Remap buffer memory"); + return false; + } + // If this remap failed, we hit a race condition, so deallocate and try again + vm_deallocate(mach_task_self(), bufferAddress, buffer->length); + continue; + } + + if ( virtualAddress != bufferAddress+buffer->length ) { + // If the memory is not contiguous, clean up both allocated buffers and try again + if ( retries-- == 0 ) { + printf("Couldn't map buffer memory to end of buffer\n"); + return false; + } + + vm_deallocate(mach_task_self(), virtualAddress, buffer->length); + vm_deallocate(mach_task_self(), bufferAddress, buffer->length); + continue; + } + + buffer->buffer = (void*)bufferAddress; + buffer->fillCount = 0; + buffer->head = buffer->tail = 0; + + return true; + } + return false; +} + +void TPCircularBufferCleanup(TPCircularBuffer *buffer) { + vm_deallocate(mach_task_self(), (vm_address_t)buffer->buffer, buffer->length * 2); + memset(buffer, 0, sizeof(TPCircularBuffer)); +} + +void TPCircularBufferClear(TPCircularBuffer *buffer) { + int32_t fillCount; + if ( TPCircularBufferTail(buffer, &fillCount) ) { + TPCircularBufferConsume(buffer, fillCount); + } +} + diff --git a/TelegramUI/RingByteBuffer.swift b/TelegramUI/RingByteBuffer.swift new file mode 100644 index 0000000000..1d48fbfd5e --- /dev/null +++ b/TelegramUI/RingByteBuffer.swift @@ -0,0 +1,70 @@ +import Foundation +import Darwin +import TelegramUIPrivateModule + +public final class RingByteBuffer { + public let size: Int + private var buffer: TPCircularBuffer + + public init(size: Int) { + self.size = size + self.buffer = TPCircularBuffer() + TPCircularBufferInit(&self.buffer, Int32(size)) + } + + deinit { + TPCircularBufferCleanup(&self.buffer) + } + + public func enqueue(data: Data) -> Bool { + return data.withUnsafeBytes { (bytes: UnsafePointer) -> Bool in + return TPCircularBufferProduceBytes(&self.buffer, UnsafeRawPointer(bytes), Int32(data.count)) + } + } + + public func enqueue(_ bytes: UnsafeRawPointer, count: Int) -> Bool { + return TPCircularBufferProduceBytes(&self.buffer, bytes, Int32(count)) + } + + public func withMutableHeadBytes(_ f: (UnsafeMutableRawPointer, Int) -> Int) { + var availableBytes: Int32 = 0 + let bytes = TPCircularBufferHead(&self.buffer, &availableBytes) + let enqueuedBytes = f(bytes!, Int(availableBytes)) + TPCircularBufferProduce(&self.buffer, Int32(enqueuedBytes)) + } + + public func dequeue(_ bytes: UnsafeMutableRawPointer, count: Int) -> Int { + var availableBytes: Int32 = 0 + let tail = TPCircularBufferTail(&self.buffer, &availableBytes) + + let copiedCount = min(count, Int(availableBytes)) + memcpy(bytes, tail, copiedCount) + + TPCircularBufferConsume(&self.buffer, Int32(copiedCount)) + + return copiedCount + } + + public func dequeue(count: Int) -> Data { + var availableBytes: Int32 = 0 + let tail = TPCircularBufferTail(&self.buffer, &availableBytes) + + let copiedCount = min(count, Int(availableBytes)) + let bytes = malloc(copiedCount)! + memcpy(bytes, tail, copiedCount) + + TPCircularBufferConsume(&self.buffer, Int32(copiedCount)) + + return Data(bytesNoCopy: bytes.assumingMemoryBound(to: UInt8.self), count: copiedCount, deallocator: .free) + } + + public func clear() { + TPCircularBufferClear(&self.buffer) + } + + public var availableBytes: Int { + var count: Int32 = 0 + TPCircularBufferTail(&self.buffer, &count) + return Int(count) + } +} diff --git a/TelegramUI/SearchBarNode.swift b/TelegramUI/SearchBarNode.swift new file mode 100644 index 0000000000..bcb1f3947a --- /dev/null +++ b/TelegramUI/SearchBarNode.swift @@ -0,0 +1,198 @@ +import Foundation +import SwiftSignalKit +import UIKit +import AsyncDisplayKit +import Display + +private func generateBackground() -> UIImage? { + let diameter: CGFloat = 8.0 + return generateImage(CGSize(width: diameter, height: diameter), contextGenerator: { size, context in + context.setFillColor(UIColor.white.cgColor) + context.fill(CGRect(origin: CGPoint(), size: size)) + context.setFillColor(UIColor(0xededed).cgColor) + context.fillEllipse(in: CGRect(origin: CGPoint(), size: size)) + }, opaque: true)?.stretchableImage(withLeftCapWidth: Int(diameter / 2.0), topCapHeight: Int(diameter / 2.0)) +} + +private let searchBarBackground = generateBackground() + +private class SearchBarTextField: UITextField { + fileprivate let placeholderLabel: UILabel + private var placeholderLabelConstrainedSize: CGSize? + private var placeholderLabelSize: CGSize? + + override init(frame: CGRect) { + self.placeholderLabel = UILabel() + + super.init(frame: frame) + + self.addSubview(self.placeholderLabel) + } + + required init?(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + override func textRect(forBounds bounds: CGRect) -> CGRect { + return bounds.insetBy(dx: 4.0, dy: 4.0) + } + + override func editingRect(forBounds bounds: CGRect) -> CGRect { + return self.textRect(forBounds: bounds) + } + + override func layoutSubviews() { + super.layoutSubviews() + + let constrainedSize = self.textRect(forBounds: self.bounds).size + if self.placeholderLabelConstrainedSize != constrainedSize { + self.placeholderLabelConstrainedSize = constrainedSize + self.placeholderLabelSize = self.placeholderLabel.sizeThatFits(constrainedSize) + } + + if let placeholderLabelSize = self.placeholderLabelSize { + self.placeholderLabel.frame = CGRect(origin: self.textRect(forBounds: self.bounds).origin, size: placeholderLabelSize) + } + } +} + +class SearchBarNode: ASDisplayNode, UITextFieldDelegate { + var cancel: (() -> Void)? + var textUpdated: ((String) -> Void)? + + private let backgroundNode: ASDisplayNode + private let separatorNode: ASDisplayNode + private let textBackgroundNode: ASImageNode + private let textField: SearchBarTextField + private let cancelButton: ASButtonNode + + var placeholderString: NSAttributedString? { + get { + return self.textField.placeholderLabel.attributedText + } set(value) { + self.textField.placeholderLabel.attributedText = value + } + } + + override init() { + self.backgroundNode = ASDisplayNode() + self.backgroundNode.isLayerBacked = true + self.backgroundNode.backgroundColor = UIColor.white + + self.separatorNode = ASDisplayNode() + self.separatorNode.isLayerBacked = true + self.separatorNode.backgroundColor = UIColor(0xc8c7cc) + + self.textBackgroundNode = ASImageNode() + self.textBackgroundNode.isLayerBacked = false + self.textBackgroundNode.displaysAsynchronously = false + self.textBackgroundNode.displayWithoutProcessing = true + self.textBackgroundNode.image = searchBarBackground + + self.textField = SearchBarTextField() + self.textField.font = Font.regular(15.0) + self.textField.returnKeyType = .done + + self.cancelButton = ASButtonNode() + self.cancelButton.hitTestSlop = UIEdgeInsets(top: -8.0, left: -8.0, bottom: -8.0, right: -8.0) + self.cancelButton.setAttributedTitle(NSAttributedString(string: "Cancel", font: Font.regular(17.0), textColor: UIColor(0x1195f2)), for: []) + self.cancelButton.displaysAsynchronously = false + + super.init() + + self.addSubnode(self.backgroundNode) + self.addSubnode(self.separatorNode) + + self.backgroundColor = UIColor.white.withAlphaComponent(0.5) + self.addSubnode(self.textBackgroundNode) + self.view.addSubview(self.textField) + self.addSubnode(self.cancelButton) + + self.textField.delegate = self + self.textField.addTarget(self, action: #selector(self.textFieldDidChange(_:)), for: .editingChanged) + + self.cancelButton.addTarget(self, action: #selector(self.cancelPressed), forControlEvents: .touchUpInside) + } + + override func layout() { + self.backgroundNode.frame = self.bounds + self.separatorNode.frame = CGRect(origin: CGPoint(x: 0.0, y: self.bounds.size.height), size: CGSize(width: self.bounds.size.width, height: UIScreenPixel)) + + let cancelButtonSize = self.cancelButton.measure(CGSize(width: 100.0, height: CGFloat.infinity)) + self.cancelButton.frame = CGRect(origin: CGPoint(x: self.bounds.size.width - 8.0 - cancelButtonSize.width, y: 20.0 + 10.0), size: cancelButtonSize) + + self.textBackgroundNode.frame = CGRect(origin: CGPoint(x: 8.0, y: 20.0 + 8.0), size: CGSize(width: self.bounds.size.width - 16.0 - cancelButtonSize.width - 10.0, height: 28.0)) + + self.textField.frame = self.textBackgroundNode.frame + } + + @objc private func tapGesture(_ recognizer: UITapGestureRecognizer) { + if case .ended = recognizer.state { + if let cancel = self.cancel { + cancel() + } + } + } + + func activate() { + self.textField.becomeFirstResponder() + } + + func animateIn(from node: SearchBarPlaceholderNode, duration: Double, timingFunction: String) { + let initialTextBackgroundFrame = node.convert(node.backgroundNode.frame, to: self) + + let initialBackgroundFrame = CGRect(origin: CGPoint(x: 0.0, y: 0.0), size: CGSize(width: self.bounds.size.width, height: max(0.0, initialTextBackgroundFrame.maxY + 8.0))) + self.backgroundNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: duration) + self.backgroundNode.layer.animateFrame(from: initialBackgroundFrame, to: self.backgroundNode.frame, duration: duration, timingFunction: timingFunction) + + let initialSeparatorFrame = CGRect(origin: CGPoint(x: 0.0, y: max(0.0, initialTextBackgroundFrame.maxY + 8.0)), size: CGSize(width: self.bounds.size.width, height: UIScreenPixel)) + self.separatorNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: duration) + self.separatorNode.layer.animateFrame(from: initialSeparatorFrame, to: self.separatorNode.frame, duration: duration, timingFunction: timingFunction) + + self.textBackgroundNode.layer.animateFrame(from: initialTextBackgroundFrame, to: self.textBackgroundNode.frame, duration: duration, timingFunction: timingFunction) + + let textFieldFrame = self.textField.frame + let initialLabelNodeFrame = CGRect(origin: node.labelNode.frame.offsetBy(dx: initialTextBackgroundFrame.origin.x - 4.0, dy: initialTextBackgroundFrame.origin.y - 6.0).origin, size: textFieldFrame.size) + self.textField.layer.animateFrame(from: initialLabelNodeFrame, to: self.textField.frame, duration: duration, timingFunction: timingFunction) + + let cancelButtonFrame = self.cancelButton.frame + self.cancelButton.layer.animatePosition(from: CGPoint(x: self.bounds.size.width + cancelButtonFrame.size.width / 2.0, y: initialTextBackgroundFrame.minY + 2.0 + cancelButtonFrame.size.height / 2.0), to: self.cancelButton.layer.position, duration: duration, timingFunction: timingFunction) + node.isHidden = true + } + + func deactivate() { + self.textField.resignFirstResponder() + self.textField.text = nil + self.textField.placeholderLabel.isHidden = false + } + + func animateOut(to node: SearchBarPlaceholderNode, duration: Double, timingFunction: String, completion: () -> Void) { + node.isHidden = false + completion() + } + + func textField(_ textField: UITextField, shouldChangeCharactersIn range: NSRange, replacementString string: String) -> Bool { + if string.range(of: "\n") != nil { + return false + } + return true + } + + func textFieldShouldReturn(_ textField: UITextField) -> Bool { + self.textField.resignFirstResponder() + return false + } + + @objc func textFieldDidChange(_ textField: UITextField) { + self.textField.placeholderLabel.isHidden = !(textField.text?.isEmpty ?? true) + if let textUpdated = self.textUpdated { + textUpdated(textField.text ?? "") + } + } + + @objc func cancelPressed() { + if let cancel = self.cancel { + cancel() + } + } +} diff --git a/TelegramUI/SearchBarPlaceholderNode.swift b/TelegramUI/SearchBarPlaceholderNode.swift new file mode 100644 index 0000000000..f430dc39f6 --- /dev/null +++ b/TelegramUI/SearchBarPlaceholderNode.swift @@ -0,0 +1,91 @@ +import Foundation +import SwiftSignalKit +import UIKit +import AsyncDisplayKit +import Display + +private func generateBackground() -> UIImage? { + let diameter: CGFloat = 8.0 + return generateImage(CGSize(width: diameter, height: diameter), contextGenerator: { size, context in + context.setFillColor(UIColor.white.cgColor) + context.fill(CGRect(origin: CGPoint(), size: size)) + context.setFillColor(UIColor(0xededed).cgColor) + context.fillEllipse(in: CGRect(origin: CGPoint(), size: size)) + }, opaque: true)?.stretchableImage(withLeftCapWidth: Int(diameter / 2.0), topCapHeight: Int(diameter / 2.0)) +} + +private let searchBarBackground = generateBackground() + +private class SearchBarPlaceholderNodeLayer: CALayer { +} + +private class SearchBarPlaceholderNodeView: UIView { + override static var layerClass: AnyClass { + return SearchBarPlaceholderNodeLayer.self + } +} + +class SearchBarPlaceholderNode: ASDisplayNode, ASEditableTextNodeDelegate { + var activate: (() -> Void)? + + let backgroundNode: ASImageNode + let labelNode: TextNode + + var placeholderString: NSAttributedString? + + override init() { + self.backgroundNode = ASImageNode() + self.backgroundNode.isLayerBacked = false + self.backgroundNode.displaysAsynchronously = false + self.backgroundNode.displayWithoutProcessing = true + self.backgroundNode.image = searchBarBackground + + self.labelNode = TextNode() + self.labelNode.isOpaque = true + self.labelNode.isLayerBacked = true + self.labelNode.backgroundColor = UIColor(0xededed) + + super.init() + /*super.init(viewBlock: { + return SearchBarPlaceholderNodeView() + }, didLoad: nil)*/ + + self.addSubnode(self.backgroundNode) + self.addSubnode(self.labelNode) + + self.backgroundNode.isUserInteractionEnabled = true + } + + override func didLoad() { + super.didLoad() + + self.backgroundNode.view.addGestureRecognizer(UITapGestureRecognizer(target: self, action: #selector(backgroundTap(_:)))) + } + + func asyncLayout() -> (_ placeholderString: NSAttributedString?, _ constrainedSize: CGSize) -> (() -> Void) { + let labelLayout = TextNode.asyncLayout(self.labelNode) + + return { placeholderString, constrainedSize in + let (labelLayoutResult, labelApply) = labelLayout(placeholderString, UIColor(0xededed), 1, .end, constrainedSize, nil) + + return { [weak self] in + if let strongSelf = self { + let _ = labelApply() + + strongSelf.placeholderString = placeholderString + + strongSelf.labelNode.frame = CGRect(origin: CGPoint(x: floor((constrainedSize.width - labelLayoutResult.size.width) / 2.0), y: floor((28.0 - labelLayoutResult.size.height) / 2.0) + 2.0), size: labelLayoutResult.size) + strongSelf.backgroundNode.frame = CGRect(origin: CGPoint(), size: CGSize(width: constrainedSize.width, height: 28.0)) + } + } + } + } + + @objc private func backgroundTap(_ recognizer: UITapGestureRecognizer) { + if case .ended = recognizer.state { + if let activate = self.activate { + activate() + } + } + } +} diff --git a/TelegramUI/SearchDisplayController.swift b/TelegramUI/SearchDisplayController.swift new file mode 100644 index 0000000000..2046d9d65a --- /dev/null +++ b/TelegramUI/SearchDisplayController.swift @@ -0,0 +1,70 @@ +import Foundation +import AsyncDisplayKit +import SwiftSignalKit +import Display + +final class SearchDisplayController { + private let searchBar: SearchBarNode + private let contentNode: SearchDisplayControllerContentNode + + private var containerLayout: (ContainerViewLayout, CGFloat)? + + init(contentNode: SearchDisplayControllerContentNode, cancel: @escaping () -> Void) { + self.searchBar = SearchBarNode() + self.contentNode = contentNode + + self.searchBar.textUpdated = { [weak contentNode] text in + contentNode?.searchTextUpdated(text: text) + } + self.searchBar.cancel = cancel + } + + func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + let searchBarFrame = CGRect(origin: CGPoint(x: 0.0, y: (layout.statusBarHeight ?? 0.0) - 20.0), size: CGSize(width: layout.size.width, height: 64.0)) + transition.updateFrame(node: self.searchBar, frame: searchBarFrame) + + self.containerLayout = (layout, searchBarFrame.maxY) + + transition.updateFrame(node: self.contentNode, frame: CGRect(origin: CGPoint(), size: layout.size)) + self.contentNode.containerLayoutUpdated(ContainerViewLayout(size: layout.size, intrinsicInsets: layout.intrinsicInsets, statusBarHeight: nil, inputHeight: layout.inputHeight), navigationBarHeight: searchBarFrame.maxY, transition: transition) + } + + func activate(insertSubnode: (ASDisplayNode) -> Void, placeholder: SearchBarPlaceholderNode) { + guard let (layout, navigationBarHeight) = self.containerLayout else { + return + } + + insertSubnode(self.contentNode) + + self.contentNode.frame = CGRect(origin: CGPoint(), size: layout.size) + self.contentNode.containerLayoutUpdated(ContainerViewLayout(size: layout.size, intrinsicInsets: UIEdgeInsets(), statusBarHeight: nil, inputHeight: nil), navigationBarHeight: navigationBarHeight, transition: .immediate) + + let initialTextBackgroundFrame = placeholder.convert(placeholder.backgroundNode.frame, to: self.contentNode.supernode) + + let contentNodePosition = self.contentNode.layer.position + self.contentNode.layer.animatePosition(from: CGPoint(x: contentNodePosition.x, y: contentNodePosition.y + (initialTextBackgroundFrame.maxY + 8.0 - navigationBarHeight)), to: contentNodePosition, duration: 0.5, timingFunction: kCAMediaTimingFunctionSpring) + self.contentNode.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.3, timingFunction: kCAMediaTimingFunctionEaseOut) + + self.searchBar.placeholderString = placeholder.placeholderString + self.searchBar.frame = CGRect(origin: CGPoint(x: 0.0, y: (layout.statusBarHeight ?? 0.0) - 20.0), size: CGSize(width: layout.size.width, height: 64.0)) + insertSubnode(searchBar) + self.searchBar.layout() + + self.searchBar.activate() + self.searchBar.animateIn(from: placeholder, duration: 0.5, timingFunction: kCAMediaTimingFunctionSpring) + } + + func deactivate(placeholder: SearchBarPlaceholderNode?) { + searchBar.deactivate() + + if let placeholder = placeholder { + searchBar.animateOut(to: placeholder, duration: 0.5, timingFunction: kCAMediaTimingFunctionSpring, completion: { + self.searchBar.removeFromSupernode() + }) + } + + self.contentNode.layer.animateAlpha(from: 1.0, to: 0.0, duration: 0.3, removeOnCompletion: false, completion: { _ in + self.contentNode.removeFromSupernode() + }) + } +} diff --git a/TelegramUI/SearchDisplayControllerContentNode.swift b/TelegramUI/SearchDisplayControllerContentNode.swift new file mode 100644 index 0000000000..9ab0ba6833 --- /dev/null +++ b/TelegramUI/SearchDisplayControllerContentNode.swift @@ -0,0 +1,24 @@ +import Foundation +import AsyncDisplayKit +import Display +import SwiftSignalKit + +class SearchDisplayControllerContentNode: ASDisplayNode { + override init() { + super.init() + + self.backgroundColor = UIColor.white + } + + func searchTextUpdated(text: String) { + + } + + func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + + } + + func ready() -> Signal { + return .single(Void()) + } +} diff --git a/TelegramUI/SettingsAccountInfoItem.swift b/TelegramUI/SettingsAccountInfoItem.swift new file mode 100644 index 0000000000..014a0f3ca0 --- /dev/null +++ b/TelegramUI/SettingsAccountInfoItem.swift @@ -0,0 +1,134 @@ +import Foundation +import Display +import SwiftSignalKit +import AsyncDisplayKit +import Postbox +import TelegramCore + +class SettingsAccountInfoItem: ListControllerGroupableItem { + let account: Account + let peer: Peer? + let connectionStatus: ConnectionStatus + + init(account: Account, peer: Peer?, connectionStatus: ConnectionStatus) { + self.account = account + self.peer = peer + self.connectionStatus = connectionStatus + } + + func setupNode(async: @escaping (@escaping () -> Void) -> Void, completion: @escaping (ListControllerGroupableItemNode) -> Void) { + async { + let node = SettingsAccountInfoItemNode() + completion(node) + } + } +} + +private let nameFont = Font.medium(19.0) +private let statusFont = Font.regular(15.0) + +class SettingsAccountInfoItemNode: ListControllerGroupableItemNode { + let avatarNode: ChatListAvatarNode + + let nameNode: TextNode + let statusNode: TextNode + + override init() { + self.avatarNode = ChatListAvatarNode(font: Font.regular(20.0)) + + self.nameNode = TextNode() + self.nameNode.isLayerBacked = true + self.nameNode.contentMode = .left + self.nameNode.contentsScale = UIScreen.main.scale + + self.statusNode = TextNode() + self.statusNode.isLayerBacked = true + self.statusNode.contentMode = .left + self.statusNode.contentsScale = UIScreen.main.scale + + super.init() + + self.addSubnode(self.avatarNode) + self.addSubnode(self.nameNode) + self.addSubnode(self.statusNode) + } + + deinit { + } + + override func asyncLayoutContent() -> (_ item: ListControllerGroupableItem, _ width: CGFloat) -> (CGSize, () -> Void) { + let layoutNameNode = TextNode.asyncLayout(self.nameNode) + let layoutStatusNode = TextNode.asyncLayout(self.statusNode) + + return { item, width in + if let item = item as? SettingsAccountInfoItem { + let (nameNodeLayout, nameNodeApply) = layoutNameNode(NSAttributedString(string: item.peer?.displayTitle ?? "", font: nameFont, textColor: UIColor.black), nil, 1, .end, CGSize(width: width - 20, height: CGFloat.greatestFiniteMagnitude), nil) + + let statusText: String + let statusColor: UIColor + switch item.connectionStatus { + case .WaitingForNetwork: + statusText = "waiting for network" + statusColor = UIColor(0xb3b3b3) + case .Connecting: + statusText = "waiting for network" + statusColor = UIColor(0xb3b3b3) + case .Updating: + statusText = "updating" + statusColor = UIColor(0xb3b3b3) + case .Online: + statusText = "online" + statusColor = UIColor.blue + } + + let (statusNodeLayout, statusNodeApply) = layoutStatusNode(NSAttributedString(string: statusText, font: statusFont, textColor: statusColor), nil, 1, .end, CGSize(width: width - 20, height: CGFloat.greatestFiniteMagnitude), nil) + + return (CGSize(width: width, height: 97.0), { [weak self] in + if let strongSelf = self { + let _ = nameNodeApply() + let _ = statusNodeApply() + + if let peer = item.peer { + strongSelf.avatarNode.setPeer(account: item.account, peer: peer) + } + + strongSelf.avatarNode.frame = CGRect(origin: CGPoint(x: 15.0, y: 15.0), size: CGSize(width: 66.0, height: 66.0)) + strongSelf.nameNode.frame = CGRect(origin: CGPoint(x: 94.0, y: 25.0), size: nameNodeLayout.size) + + + strongSelf.statusNode.frame = CGRect(origin: CGPoint(x: 94.0, y: 25.0 + nameNodeLayout.size.height + 4.0), size: statusNodeLayout.size) + } + }) + } else { + return (CGSize(width: width, height: 0.0), { + }) + } + } + } + + func setupWithAccount1(account: Account, peer: Peer?) { + /*self.peerDisposable.set((account.postbox.peerWithId(account.peerId) + |> deliverOnMainQueue).start(next: {[weak self] peer in + if let strongSelf = self { + strongSelf.avatarNode.setPeer(account, peer: peer) + let width = strongSelf.bounds.size.width + if width > CGFloat(FLT_EPSILON) { + strongSelf.layoutContentForWidth(width) + strongSelf.nameNode.setNeedsDisplay() + } + } + })) + self.connectingStatusDisposable.set((account.network.connectionStatus + |> deliverOnMainQueue).start(next: { [weak self] status in + if let strongSelf = self { + + //strongSelf.statusNode.attributedString = NSAttributedString(string: statusText, font: statusFont, textColor: statusColor) + let width = strongSelf.bounds.size.width + if width > CGFloat(FLT_EPSILON) { + strongSelf.layoutContentForWidth(width) + strongSelf.statusNode.setNeedsDisplay() + } + } + }))*/ + } +} diff --git a/TelegramUI/SettingsController.swift b/TelegramUI/SettingsController.swift new file mode 100644 index 0000000000..aa5733e01a --- /dev/null +++ b/TelegramUI/SettingsController.swift @@ -0,0 +1,69 @@ +import Foundation +import Display +import Postbox +import SwiftSignalKit +import TelegramCore + +public class SettingsController: ListController { + private let account: Account + + private let peer = Promise() + private let connectionStatus = Promise(.Online) + private let peerAndConnectionStatusDisposable = MetaDisposable() + + public init(account: Account) { + self.account = account + + super.init() + + self.title = "Settings" + self.tabBarItem.title = "Settings" + self.tabBarItem.image = UIImage(bundleImageName: "Chat List/Tabs/IconSettings")?.precomposed() + self.tabBarItem.selectedImage = UIImage(bundleImageName: "Chat List/Tabs/IconSettingsSelected")?.precomposed() + + let deselectAction = { [weak self] () -> Void in + self?.listDisplayNode.listView.clearHighlightAnimated(true) + } + + self.items = [ + SettingsAccountInfoItem(account: account, peer: nil, connectionStatus: .Online), + ListControllerButtonItem(title: "Set Profile Photo", action: deselectAction), + ListControllerSpacerItem(height: 35.0), + ListControllerDisclosureActionItem(title: "Notifications and Sounds", action: deselectAction), + ListControllerDisclosureActionItem(title: "Privacy and Security", action: deselectAction), + ListControllerDisclosureActionItem(title: "Chat Settings", action: deselectAction), + //SettingsWallpaperListItem(), + ListControllerSpacerItem(height: 35.0), + ListControllerDisclosureActionItem(title: "Phone Number", action: deselectAction), + ListControllerDisclosureActionItem(title: "Username", action: deselectAction), + ListControllerSpacerItem(height: 35.0), + ListControllerDisclosureActionItem(title: "Ask a Question", action: deselectAction), + ListControllerDisclosureActionItem(title: "Telegram FAQ", action: deselectAction), + ListControllerSpacerItem(height: 35.0), + ListControllerButtonItem(title: "Logout", action: { }, color: UIColor.red), + ListControllerSpacerItem(height: 35.0) + ] + + let peerAndConnectionStatus = combineLatest(peer.get(), connectionStatus.get()) |> deliverOn(Queue.mainQueue()) |> afterNext { [weak self] peer, connectionStatus in + if let strongSelf = self { + let item = SettingsAccountInfoItem(account: account, peer: peer, connectionStatus: connectionStatus) + strongSelf.items[0] = item + if strongSelf.isNodeLoaded { + strongSelf.listDisplayNode.listView.deleteAndInsertItems(deleteIndices: [ListViewDeleteItem(index: 0, directionHint: nil)], insertIndicesAndItems: [ListViewInsertItem(index: 0, previousIndex: 0, item: item, directionHint: .Down)], updateIndicesAndItems: [], options: [.AnimateInsertion]) + } + } + } + peerAndConnectionStatusDisposable.set(peerAndConnectionStatus.start()) + + peer.set(account.postbox.peerWithId(account.peerId)) + connectionStatus.set(account.network.connectionStatus) + } + + required public init(coder aDecoder: NSCoder) { + fatalError("init(coder:) has not been implemented") + } + + deinit { + peerAndConnectionStatusDisposable.dispose() + } +} diff --git a/TelegramUI/StickerResources.swift b/TelegramUI/StickerResources.swift new file mode 100644 index 0000000000..31b71bdca8 --- /dev/null +++ b/TelegramUI/StickerResources.swift @@ -0,0 +1,80 @@ +import Foundation +import Postbox +import SwiftSignalKit +import Display +import TelegramUIPrivateModule +import TelegramCore + +private func chatMessageStickerDatas(account: Account, file: TelegramMediaFile) -> Signal<(Data?, Data?, Int), NoError> { + let fullSizeResource = fileResource(file) + let maybeFetched = account.postbox.mediaBox.resourceData(fullSizeResource, complete: true) + + return maybeFetched |> take(1) |> mapToSignal { maybeData in + if maybeData.size >= fullSizeResource.size { + let loadedData: Data? = try? Data(contentsOf: URL(fileURLWithPath: maybeData.path), options: []) + + return .single((nil, loadedData, fullSizeResource.size)) + } else { + let fullSizeData = account.postbox.mediaBox.resourceData(fullSizeResource, complete: true) |> map { next in + return next.size == 0 ? nil : try? Data(contentsOf: URL(fileURLWithPath: next.path), options: .mappedIfSafe) + } + + return fullSizeData |> map { data -> (Data?, Data?, Int) in + return (nil, data, fullSizeResource.size) + } + } + } +} + +func chatMessageSticker(account: Account, file: TelegramMediaFile) -> Signal<(TransformImageArguments) -> DrawingContext, NoError> { + let signal = chatMessageStickerDatas(account: account, file: file) + + return signal |> map { (thumbnailData, fullSizeData, fullTotalSize) in + return { arguments in + assertNotOnMainThread() + let context = DrawingContext(size: arguments.drawingSize, clear: true) + + var fullSizeImage: UIImage? + if let fullSizeData = fullSizeData { + if fullSizeData.count >= fullTotalSize { + if let image = UIImage.convert(fromWebP: fullSizeData) { + fullSizeImage = image + } + } else { + } + } + + let thumbnailImage: CGImage? = nil + + var blurredThumbnailImage: UIImage? + if let thumbnailImage = thumbnailImage { + let thumbnailSize = CGSize(width: thumbnailImage.width, height: thumbnailImage.height) + let thumbnailContextSize = thumbnailSize.aspectFitted(CGSize(width: 150.0, height: 150.0)) + let thumbnailContext = DrawingContext(size: thumbnailContextSize, scale: 1.0) + thumbnailContext.withFlippedContext { c in + c.interpolationQuality = .none + c.draw(thumbnailImage, in: CGRect(origin: CGPoint(), size: thumbnailContextSize)) + } + telegramFastBlur(Int32(thumbnailContextSize.width), Int32(thumbnailContextSize.height), Int32(thumbnailContext.bytesPerRow), thumbnailContext.bytes) + + blurredThumbnailImage = thumbnailContext.generateImage() + } + + context.withFlippedContext { c in + c.setBlendMode(.copy) + if let blurredThumbnailImage = blurredThumbnailImage { + c.interpolationQuality = .low + c.draw(blurredThumbnailImage.cgImage!, in: arguments.drawingRect) + } + + if let fullSizeImage = fullSizeImage, let cgImage = fullSizeImage.cgImage { + c.setBlendMode(.normal) + c.interpolationQuality = .medium + c.draw(cgImage, in: arguments.drawingRect) + } + } + + return context + } + } +} diff --git a/TelegramUI/TelegramUIIncludes.h b/TelegramUI/TelegramUIIncludes.h new file mode 100644 index 0000000000..94e9f9bb75 --- /dev/null +++ b/TelegramUI/TelegramUIIncludes.h @@ -0,0 +1,5 @@ +#ifndef TelegramUIIncludes_h +#define TelegramUIIncludes_h + + +#endif diff --git a/TelegramUI/TelegramUIPrivate/module.modulemap b/TelegramUI/TelegramUIPrivate/module.modulemap new file mode 100644 index 0000000000..7e3eadc98c --- /dev/null +++ b/TelegramUI/TelegramUIPrivate/module.modulemap @@ -0,0 +1,13 @@ +module TelegramUIPrivateModule { + header "../../third-party/FFmpeg-iOS/include/libavcodec/avcodec.h" + header "../../third-party/FFmpeg-iOS/include/libavformat/avformat.h" + header "../../third-party/FFmpeg-iOS/include/libavformat/avio.h" + header "../../third-party/FFmpeg-iOS/include/libavutil/avutil.h" + header "../../third-party/FFmpeg-iOS/include/libavutil/pixdesc.h" + header "../../third-party/FFmpeg-iOS/include/libswresample/swresample.h" + header "../FFMpegSwResample.h" + header "../FastBlur.h" + header "../UIImage+WebP.h" + header "../RingBuffer.h" + header "../TelegramUIIncludes.h" +} diff --git a/TelegramUI/TextNode.swift b/TelegramUI/TextNode.swift new file mode 100644 index 0000000000..7ca4e29a5c --- /dev/null +++ b/TelegramUI/TextNode.swift @@ -0,0 +1,293 @@ +import Foundation +import AsyncDisplayKit +import Display + +private let defaultFont = UIFont.systemFont(ofSize: 15.0) + +private final class TextNodeLine { + let line: CTLine + let frame: CGRect + + init(line: CTLine, frame: CGRect) { + self.line = line + self.frame = frame + } +} + +enum TextNodeCutoutPosition { + case TopLeft + case TopRight +} + +struct TextNodeCutout: Equatable { + let position: TextNodeCutoutPosition + let size: CGSize +} + +func ==(lhs: TextNodeCutout, rhs: TextNodeCutout) -> Bool { + return lhs.position == rhs.position && lhs.size == rhs.size +} + +final class TextNodeLayout: NSObject { + fileprivate let attributedString: NSAttributedString? + fileprivate let maximumNumberOfLines: Int + fileprivate let truncationType: CTLineTruncationType + fileprivate let backgroundColor: UIColor? + fileprivate let constrainedSize: CGSize + fileprivate let cutout: TextNodeCutout? + let size: CGSize + fileprivate let lines: [TextNodeLine] + + fileprivate init(attributedString: NSAttributedString?, maximumNumberOfLines: Int, truncationType: CTLineTruncationType, constrainedSize: CGSize, cutout: TextNodeCutout?, size: CGSize, lines: [TextNodeLine], backgroundColor: UIColor?) { + self.attributedString = attributedString + self.maximumNumberOfLines = maximumNumberOfLines + self.truncationType = truncationType + self.constrainedSize = constrainedSize + self.cutout = cutout + self.size = size + self.lines = lines + self.backgroundColor = backgroundColor + } + + var numberOfLines: Int { + return self.lines.count + } + + var trailingLineWidth: CGFloat { + if let lastLine = self.lines.last { + return lastLine.frame.width + } else { + return 0.0 + } + } +} + +final class TextNode: ASDisplayNode { + private var cachedLayout: TextNodeLayout? + + override init() { + super.init() + + self.backgroundColor = UIColor.clear + self.isOpaque = false + self.clipsToBounds = false + } + + private class func calculateLayout(attributedString: NSAttributedString?, maximumNumberOfLines: Int, truncationType: CTLineTruncationType, backgroundColor: UIColor?, constrainedSize: CGSize, cutout: TextNodeCutout?) -> TextNodeLayout { + if let attributedString = attributedString { + let font: CTFont + if attributedString.length != 0 { + if let stringFont = attributedString.attribute(kCTFontAttributeName as String, at: 0, effectiveRange: nil) { + font = stringFont as! CTFont + } else { + font = defaultFont + } + } else { + font = defaultFont + } + + let fontAscent = CTFontGetAscent(font) + let fontDescent = CTFontGetDescent(font) + let fontLineHeight = floor(fontAscent + fontDescent) + let fontLineSpacing = floor(fontLineHeight * 0.12) + + var lines: [TextNodeLine] = [] + + var maybeTypesetter: CTTypesetter? + maybeTypesetter = CTTypesetterCreateWithAttributedString(attributedString as CFAttributedString) + if maybeTypesetter == nil { + return TextNodeLayout(attributedString: attributedString, maximumNumberOfLines: maximumNumberOfLines, truncationType: truncationType, constrainedSize: constrainedSize, cutout: cutout, size: CGSize(), lines: [], backgroundColor: backgroundColor) + } + + let typesetter = maybeTypesetter! + + var lastLineCharacterIndex: CFIndex = 0 + var layoutSize = CGSize() + + var cutoutEnabled = false + var cutoutMinY: CGFloat = 0.0 + var cutoutMaxY: CGFloat = 0.0 + var cutoutWidth: CGFloat = 0.0 + var cutoutOffset: CGFloat = 0.0 + if let cutout = cutout { + cutoutMinY = -fontLineSpacing + cutoutMaxY = cutout.size.height + fontLineSpacing + cutoutWidth = cutout.size.width + if case .TopLeft = cutout.position { + cutoutOffset = cutoutWidth + } + cutoutEnabled = true + } + + var first = true + while true { + var lineConstrainedWidth = constrainedSize.width + var lineOriginY = floorToScreenPixels(layoutSize.height + fontLineHeight - fontLineSpacing * 2.0) + if !first { + lineOriginY += fontLineSpacing + } + var lineCutoutOffset: CGFloat = 0.0 + var lineAdditionalWidth: CGFloat = 0.0 + + if cutoutEnabled { + if lineOriginY < cutoutMaxY && lineOriginY + fontLineHeight > cutoutMinY { + lineConstrainedWidth = max(1.0, lineConstrainedWidth - cutoutWidth) + lineCutoutOffset = cutoutOffset + lineAdditionalWidth = cutoutWidth + } + } + + let lineCharacterCount = CTTypesetterSuggestLineBreak(typesetter, lastLineCharacterIndex, Double(lineConstrainedWidth)) + + if maximumNumberOfLines != 0 && lines.count == maximumNumberOfLines - 1 && lineCharacterCount > 0 { + if first { + first = false + } else { + layoutSize.height += fontLineSpacing + } + + let coreTextLine: CTLine + + let originalLine = CTTypesetterCreateLineWithOffset(typesetter, CFRange(location: lastLineCharacterIndex, length: attributedString.length - lastLineCharacterIndex), 0.0) + + if CTLineGetTypographicBounds(originalLine, nil, nil, nil) - CTLineGetTrailingWhitespaceWidth(originalLine) < Double(constrainedSize.width) { + coreTextLine = originalLine + } else { + var truncationTokenAttributes: [String : AnyObject] = [:] + truncationTokenAttributes[kCTFontAttributeName as String] = font + truncationTokenAttributes[kCTForegroundColorFromContextAttributeName as String] = true as NSNumber + let tokenString = "\u{2026}" + let truncatedTokenString = NSAttributedString(string: tokenString, attributes: truncationTokenAttributes) + let truncationToken = CTLineCreateWithAttributedString(truncatedTokenString) + + coreTextLine = CTLineCreateTruncatedLine(originalLine, Double(constrainedSize.width), truncationType, truncationToken) ?? truncationToken + } + + let lineWidth = ceil(CGFloat(CTLineGetTypographicBounds(coreTextLine, nil, nil, nil) - CTLineGetTrailingWhitespaceWidth(coreTextLine))) + let lineFrame = CGRect(x: lineCutoutOffset, y: lineOriginY, width: lineWidth, height: fontLineHeight) + layoutSize.height += fontLineHeight + fontLineSpacing + layoutSize.width = max(layoutSize.width, lineWidth + lineAdditionalWidth) + + lines.append(TextNodeLine(line: coreTextLine, frame: lineFrame)) + + break + } else { + if lineCharacterCount > 0 { + if first { + first = false + } else { + layoutSize.height += fontLineSpacing + } + + let coreTextLine = CTTypesetterCreateLineWithOffset(typesetter, CFRangeMake(lastLineCharacterIndex, lineCharacterCount), 100.0) + lastLineCharacterIndex += lineCharacterCount + + let lineWidth = ceil(CGFloat(CTLineGetTypographicBounds(coreTextLine, nil, nil, nil) - CTLineGetTrailingWhitespaceWidth(coreTextLine))) + let lineFrame = CGRect(x: lineCutoutOffset, y: lineOriginY, width: lineWidth, height: fontLineHeight) + layoutSize.height += fontLineHeight + layoutSize.width = max(layoutSize.width, lineWidth + lineAdditionalWidth) + + lines.append(TextNodeLine(line: coreTextLine, frame: lineFrame)) + } else { + if !lines.isEmpty { + layoutSize.height += fontLineSpacing + } + break + } + } + } + + return TextNodeLayout(attributedString: attributedString, maximumNumberOfLines: maximumNumberOfLines, truncationType: truncationType, constrainedSize: constrainedSize, cutout: cutout, size: CGSize(width: ceil(layoutSize.width), height: ceil(layoutSize.height)), lines: lines, backgroundColor: backgroundColor) + } else { + return TextNodeLayout(attributedString: attributedString, maximumNumberOfLines: maximumNumberOfLines, truncationType: truncationType, constrainedSize: constrainedSize, cutout: cutout, size: CGSize(), lines: [], backgroundColor: backgroundColor) + } + } + + override func drawParameters(forAsyncLayer layer: _ASDisplayLayer) -> NSObjectProtocol? { + return self.cachedLayout + } + + @objc override class func draw(_ bounds: CGRect, withParameters parameters: NSObjectProtocol!, isCancelled: asdisplaynode_iscancelled_block_t, isRasterizing: Bool) { + let context = UIGraphicsGetCurrentContext()! + + context.setAllowsAntialiasing(true) + + context.setAllowsFontSmoothing(false) + context.setShouldSmoothFonts(false) + + context.setAllowsFontSubpixelPositioning(false) + context.setShouldSubpixelPositionFonts(false) + + context.setAllowsFontSubpixelQuantization(true) + context.setShouldSubpixelQuantizeFonts(true) + + if let layout = parameters as? TextNodeLayout { + if !isRasterizing || layout.backgroundColor != nil { + context.setBlendMode(.copy) + context.setFillColor((layout.backgroundColor ?? UIColor.clear).cgColor) + context.fill(bounds) + } + + let textMatrix = context.textMatrix + let textPosition = context.textPosition + //CGContextSaveGState(context) + + context.textMatrix = CGAffineTransform(scaleX: 1.0, y: -1.0) + + //let clipRect = CGContextGetClipBoundingBox(context) + + for i in 0 ..< layout.lines.count { + let line = layout.lines[i] + context.textPosition = CGPoint(x: line.frame.origin.x, y: line.frame.origin.y) + CTLineDraw(line.line, context) + } + + //CGContextRestoreGState(context) + context.textMatrix = textMatrix + context.textPosition = CGPoint(x: textPosition.x, y: textPosition.y) + } + + context.setBlendMode(.normal) + } + + class func asyncLayout(_ maybeNode: TextNode?) -> (_ attributedString: NSAttributedString?, _ backgroundColor: UIColor?, _ maximumNumberOfLines: Int, _ truncationType: CTLineTruncationType, _ constrainedSize: CGSize, _ cutout: TextNodeCutout?) -> (TextNodeLayout, () -> TextNode) { + let existingLayout: TextNodeLayout? = maybeNode?.cachedLayout + + return { attributedString, backgroundColor, maximumNumberOfLines, truncationType, constrainedSize, cutout in + let layout: TextNodeLayout + + var updated = false + if let existingLayout = existingLayout, existingLayout.constrainedSize == constrainedSize && existingLayout.maximumNumberOfLines == maximumNumberOfLines && existingLayout.truncationType == truncationType && existingLayout.cutout == cutout { + let stringMatch: Bool + if let existingString = existingLayout.attributedString, let string = attributedString { + stringMatch = existingString.isEqual(to: string) + } else if existingLayout.attributedString == nil && attributedString == nil { + stringMatch = true + } else { + stringMatch = false + } + + if stringMatch { + layout = existingLayout + } else { + layout = TextNode.calculateLayout(attributedString: attributedString, maximumNumberOfLines: maximumNumberOfLines, truncationType: truncationType, backgroundColor: backgroundColor, constrainedSize: constrainedSize, cutout: cutout) + updated = true + } + } else { + layout = TextNode.calculateLayout(attributedString: attributedString, maximumNumberOfLines: maximumNumberOfLines, truncationType: truncationType, backgroundColor: backgroundColor, constrainedSize: constrainedSize, cutout: cutout) + updated = true + } + + let node = maybeNode ?? TextNode() + + return (layout, { + node.cachedLayout = layout + if updated { + node.setNeedsDisplay() + } + + return node + }) + } + } +} diff --git a/TelegramUI/TouchDownGestureRecognizer.swift b/TelegramUI/TouchDownGestureRecognizer.swift new file mode 100644 index 0000000000..aa425c6aef --- /dev/null +++ b/TelegramUI/TouchDownGestureRecognizer.swift @@ -0,0 +1,78 @@ +import Foundation +import UIKit.UIGestureRecognizerSubclass + +private class TouchDownGestureRecognizerTimerTarget: NSObject { + weak var target: TouchDownGestureRecognizer? + + init(target: TouchDownGestureRecognizer) { + self.target = target + + super.init() + } + + @objc func event() { + self.target?.timerEvent() + } +} + +class TouchDownGestureRecognizer: UIGestureRecognizer, UIGestureRecognizerDelegate { + private var touchLocation = CGPoint() + private var timer: Foundation.Timer? + + override init(target: Any?, action: Selector?) { + super.init(target: target, action: action) + + self.delegate = self + } + + func gestureRecognizer(_ gestureRecognizer: UIGestureRecognizer, shouldRecognizeSimultaneouslyWith otherGestureRecognizer: UIGestureRecognizer) -> Bool { + if otherGestureRecognizer is UIPanGestureRecognizer { + return true + } + return false + } + + override func reset() { + self.timer?.invalidate() + self.timer = nil + + super.reset() + } + + func timerEvent() { + self.state = .began + } + + override func touchesBegan(_ touches: Set, with event: UIEvent) { + super.touchesBegan(touches, with: event) + + if let touch = touches.first { + self.touchLocation = touch.location(in: self.view) + } + + self.timer?.invalidate() + self.timer = Timer(timeInterval: 0.08, target: TouchDownGestureRecognizerTimerTarget(target: self), selector: #selector(TouchDownGestureRecognizerTimerTarget.event), userInfo: nil, repeats: false) + + if let timer = self.timer { + RunLoop.main.add(timer, forMode: RunLoopMode.commonModes) + } + } + + override func touchesMoved(_ touches: Set, with event: UIEvent) { + super.touchesMoved(touches, with: event) + + if let touch = touches.first { + let location = touch.location(in: self.view) + let distance = CGPoint(x: location.x - self.touchLocation.x, y: location.y - self.touchLocation.y) + if distance.x * distance.x + distance.y * distance.y > 4.0 { + self.state = .cancelled + } + } + } + + override func touchesEnded(_ touches: Set, with event: UIEvent) { + super.touchesEnded(touches, with: event) + + self.state = .ended + } +} diff --git a/TelegramUI/TransformImageNode.swift b/TelegramUI/TransformImageNode.swift new file mode 100644 index 0000000000..f847c72451 --- /dev/null +++ b/TelegramUI/TransformImageNode.swift @@ -0,0 +1,90 @@ +import Foundation +import AsyncDisplayKit +import SwiftSignalKit +import Display +import TelegramCore + +public struct TransformImageArguments: Equatable { + public let corners: ImageCorners + + public let imageSize: CGSize + public let boundingSize: CGSize + public let intrinsicInsets: UIEdgeInsets + + public var drawingSize: CGSize { + let cornersExtendedEdges = self.corners.extendedEdges + return CGSize(width: self.boundingSize.width + cornersExtendedEdges.left + cornersExtendedEdges.right + self.intrinsicInsets.left + self.intrinsicInsets.right, height: self.boundingSize.height + cornersExtendedEdges.top + cornersExtendedEdges.bottom + self.intrinsicInsets.top + self.intrinsicInsets.bottom) + } + + public var drawingRect: CGRect { + let cornersExtendedEdges = self.corners.extendedEdges + return CGRect(x: cornersExtendedEdges.left + self.intrinsicInsets.left, y: cornersExtendedEdges.top + self.intrinsicInsets.top, width: self.boundingSize.width, height: self.boundingSize.height); + } + + public var insets: UIEdgeInsets { + let cornersExtendedEdges = self.corners.extendedEdges + return UIEdgeInsets(top: cornersExtendedEdges.top + self.intrinsicInsets.top, left: cornersExtendedEdges.left + self.intrinsicInsets.left, bottom: cornersExtendedEdges.bottom + self.intrinsicInsets.bottom, right: cornersExtendedEdges.right + self.intrinsicInsets.right) + } +} + +public func ==(lhs: TransformImageArguments, rhs: TransformImageArguments) -> Bool { + return lhs.imageSize == rhs.imageSize && lhs.boundingSize == rhs.boundingSize && lhs.corners == rhs.corners +} + +public class TransformImageNode: ASDisplayNode { + public var imageUpdated: (() -> Void)? + public var alphaTransitionOnFirstUpdate = false + private var disposable = MetaDisposable() + + private var argumentsPromise = Promise() + + deinit { + self.disposable.dispose() + } + + func setSignal(account: Account, signal: Signal<(TransformImageArguments) -> DrawingContext, NoError>, dispatchOnDisplayLink: Bool = true) { + let argumentsPromise = self.argumentsPromise + + let result = combineLatest(signal, argumentsPromise.get()) |> deliverOn(account.graphicsThreadPool) |> mapToThrottled { transform, arguments -> Signal in + return deferred { + return Signal.single(transform(arguments).generateImage()) + } + } + + self.disposable.set((result |> deliverOnMainQueue).start(next: {[weak self] next in + if dispatchOnDisplayLink { + displayLinkDispatcher.dispatch { [weak self] in + if let strongSelf = self { + if strongSelf.alphaTransitionOnFirstUpdate && strongSelf.contents == nil { + strongSelf.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.15) + } + strongSelf.contents = next?.cgImage + if let imageUpdated = strongSelf.imageUpdated { + imageUpdated() + } + } + } + } else { + if let strongSelf = self { + if strongSelf.alphaTransitionOnFirstUpdate && strongSelf.contents == nil { + strongSelf.layer.animateAlpha(from: 0.0, to: 1.0, duration: 0.15) + } + strongSelf.contents = next?.cgImage + if let imageUpdated = strongSelf.imageUpdated { + imageUpdated() + } + } + } + })) + } + + public func asyncLayout() -> (TransformImageArguments) -> (() -> Void) { + return { arguments in + self.argumentsPromise.set(.single(arguments)) + + return { + + } + } + } +} diff --git a/TelegramUI/UIImage+WebP.h b/TelegramUI/UIImage+WebP.h new file mode 100644 index 0000000000..56e92783ca --- /dev/null +++ b/TelegramUI/UIImage+WebP.h @@ -0,0 +1,7 @@ +#import + +@interface UIImage (WebP) + ++ (UIImage *)convertFromWebP:(NSData *)data; + +@end diff --git a/TelegramUI/UIImage+WebP.m b/TelegramUI/UIImage+WebP.m new file mode 100644 index 0000000000..c8a671e25d --- /dev/null +++ b/TelegramUI/UIImage+WebP.m @@ -0,0 +1,78 @@ +#import "UIImage+WebP.h" + +#import "../third-party/libwebp/include/webp/decode.h" +#import "../third-party/libwebp/include/webp/encode.h" + +@implementation UIImage (WebP) + ++ (UIImage *)convertFromWebP:(NSData *)imgData { + if (imgData == nil) { + return nil; + } + + // `WebPGetInfo` weill return image width and height + int width = 0, height = 0; + if(!WebPGetInfo([imgData bytes], [imgData length], &width, &height)) { + NSMutableDictionary *errorDetail = [NSMutableDictionary dictionary]; + [errorDetail setValue:@"Header formatting error." forKey:NSLocalizedDescriptionKey]; + return nil; + } + + const struct { int width, height; } targetContextSize = { width, height}; + + size_t targetBytesPerRow = ((4 * (int)targetContextSize.width) + 15) & (~15); + + void *targetMemory = malloc((int)(targetBytesPerRow * targetContextSize.height)); + + CGColorSpaceRef colorSpace = CGColorSpaceCreateDeviceRGB(); + CGBitmapInfo bitmapInfo = kCGImageAlphaPremultipliedFirst | kCGBitmapByteOrder32Host; + + CGContextRef targetContext = CGBitmapContextCreate(targetMemory, (int)targetContextSize.width, (int)targetContextSize.height, 8, targetBytesPerRow, colorSpace, bitmapInfo); + + UIGraphicsPushContext(targetContext); + + CGColorSpaceRelease(colorSpace); + + if (WebPDecodeBGRAInto(imgData.bytes, imgData.length, targetMemory, targetBytesPerRow * targetContextSize.height, (int)targetBytesPerRow) == NULL) + { + //[BridgingTrace objc_trace:@"WebP" what:@"error decoding webp"]; + return nil; + } + + for (int y = 0; y < targetContextSize.height; y++) + { + for (int x = 0; x < targetContextSize.width; x++) + { + uint32_t *color = ((uint32_t *)&targetMemory[y * targetBytesPerRow + x * 4]); + + uint32_t a = (*color >> 24) & 0xff; + uint32_t r = ((*color >> 16) & 0xff) * a; + uint32_t g = ((*color >> 8) & 0xff) * a; + uint32_t b = (*color & 0xff) * a; + + r = (r + 1 + (r >> 8)) >> 8; + g = (g + 1 + (g >> 8)) >> 8; + b = (b + 1 + (b >> 8)) >> 8; + + *color = (a << 24) | (r << 16) | (g << 8) | b; + } + + for (size_t i = y * targetBytesPerRow + targetContextSize.width * 4; i < (targetBytesPerRow >> 2); i++) + { + *((uint32_t *)&targetMemory[i]) = 0; + } + } + + UIGraphicsPopContext(); + + CGImageRef bitmapImage = CGBitmapContextCreateImage(targetContext); + UIImage *image = [[UIImage alloc] initWithCGImage:bitmapImage scale:1.0f orientation:UIImageOrientationUp]; + CGImageRelease(bitmapImage); + + CGContextRelease(targetContext); + free(targetMemory); + + return image; +} + +@end diff --git a/TelegramUI/WebP.swift b/TelegramUI/WebP.swift new file mode 100644 index 0000000000..9a30829567 --- /dev/null +++ b/TelegramUI/WebP.swift @@ -0,0 +1,3 @@ +import Foundation + + diff --git a/TelegramUI/ZoomableContentGalleryItemNode.swift b/TelegramUI/ZoomableContentGalleryItemNode.swift new file mode 100644 index 0000000000..f7de6113b9 --- /dev/null +++ b/TelegramUI/ZoomableContentGalleryItemNode.swift @@ -0,0 +1,143 @@ +import Foundation +import Display +import AsyncDisplayKit + +class ZoomableContentGalleryItemNode: GalleryItemNode, UIScrollViewDelegate { + let scrollView: UIScrollView + + private var containerLayout: ContainerViewLayout? + + var zoomableContent: (CGSize, ASDisplayNode)? { + didSet { + if oldValue?.1 !== self.zoomableContent?.1 { + if let node = oldValue?.1 { + node.view.removeFromSuperview() + } + } + if let node = self.zoomableContent?.1 { + self.scrollView.addSubview(node.view) + } + self.resetScrollViewContents() + } + } + + override init() { + self.scrollView = UIScrollView() + + super.init() + + self.scrollView.delegate = self + self.scrollView.showsVerticalScrollIndicator = false + self.scrollView.showsHorizontalScrollIndicator = false + self.scrollView.clipsToBounds = false + self.scrollView.scrollsToTop = false + self.scrollView.delaysContentTouches = false + + let tapRecognizer = UITapGestureRecognizer(target: self, action: #selector(self.contentTap(_:))) + + self.scrollView.addGestureRecognizer(tapRecognizer) + + self.view.addSubview(self.scrollView) + } + + @objc func contentTap(_ recognizer: UITapGestureRecognizer) { + if recognizer.state == .ended { + self.toggleControlsVisibility() + } + } + + override func containerLayoutUpdated(_ layout: ContainerViewLayout, navigationBarHeight: CGFloat, transition: ContainedViewLayoutTransition) { + super.containerLayoutUpdated(layout, navigationBarHeight: navigationBarHeight, transition: transition) + + var shouldResetContents = false + if let containerLayout = self.containerLayout { + shouldResetContents = !containerLayout.size.equalTo(layout.size) + } else { + shouldResetContents = true + } + self.containerLayout = layout + + if shouldResetContents { + self.scrollView.frame = CGRect(origin: CGPoint(), size: layout.size) + self.resetScrollViewContents() + } + } + + private func resetScrollViewContents() { + guard let (contentSize, contentNode) = self.zoomableContent else { + return + } + + self.scrollView.minimumZoomScale = 1.0 + self.scrollView.maximumZoomScale = 1.0 + //self.scrollView.normalZoomScale = 1.0 + self.scrollView.zoomScale = 1.0 + self.scrollView.contentSize = contentSize + + contentNode.transform = CATransform3DIdentity + contentNode.frame = CGRect(origin: CGPoint(), size: contentSize) + + self.centerScrollViewContents() + + self.scrollView.zoomScale = self.scrollView.minimumZoomScale + } + + private func centerScrollViewContents() { + guard let (contentSize, contentNode) = self.zoomableContent else { + return + } + + let boundsSize = self.scrollView.bounds.size + if contentSize.width.isLessThanOrEqualTo(0.0) || contentSize.height.isLessThanOrEqualTo(0.0) || boundsSize.width.isLessThanOrEqualTo(0.0) || boundsSize.height.isLessThanOrEqualTo(0.0) { + return + } + + let scaleWidth = boundsSize.width / contentSize.width + let scaleHeight = boundsSize.height / contentSize.height + let minScale = min(scaleWidth, scaleHeight) + var maxScale = max(scaleWidth, scaleHeight) + maxScale = max(maxScale, minScale * 3.0) + + if (abs(maxScale - minScale) < 0.01) { + maxScale = minScale + } + + if !self.scrollView.minimumZoomScale.isEqual(to: minScale) { + self.scrollView.minimumZoomScale = minScale + } + + /*if !self.scrollView.normalZoomScale.isEqual(to: minScale) { + self.scrollView.normalZoomScale = minScale + }*/ + + if !self.scrollView.maximumZoomScale.isEqual(to: maxScale) { + self.scrollView.maximumZoomScale = maxScale + } + + var contentFrame = contentNode.view.frame + + if boundsSize.width > contentFrame.size.width { + contentFrame.origin.x = (boundsSize.width - contentFrame.size.width) / 2.0 + } else { + contentFrame.origin.x = 0.0 + } + + if boundsSize.height > contentFrame.size.height { + contentFrame.origin.y = (boundsSize.height - contentFrame.size.height) / 2.0 + } else { + contentFrame.origin.y = 0.0 + } + + contentNode.view.frame = contentFrame + + //self.scrollView.scrollEnabled = ABS(_scrollView.zoomScale - _scrollView.normalZoomScale) > FLT_EPSILON; + } + + func viewForZooming(in scrollView: UIScrollView) -> UIView? { + return self.zoomableContent?.1.view + } + + func scrollViewDidZoom(_ scrollView: UIScrollView) { + self.centerScrollViewContents() + } +} diff --git a/TelegramUI/module.private.modulemap b/TelegramUI/module.private.modulemap new file mode 100644 index 0000000000..0c4e7fe618 --- /dev/null +++ b/TelegramUI/module.private.modulemap @@ -0,0 +1,3 @@ +module TelegramUIPrivate { + export * +} diff --git a/third-party/FFmpeg-iOS/include/libavcodec/avcodec.h b/third-party/FFmpeg-iOS/include/libavcodec/avcodec.h new file mode 100644 index 0000000000..6385220252 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/avcodec.h @@ -0,0 +1,6100 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVCODEC_H +#define AVCODEC_AVCODEC_H + +/** + * @file + * @ingroup libavc + * Libavcodec external API header + */ + +#include +#include "../libavutil/samplefmt.h" +#include "../libavutil/attributes.h" +#include "../libavutil/avutil.h" +#include "../libavutil/buffer.h" +#include "../libavutil/cpu.h" +#include "../libavutil/channel_layout.h" +#include "../libavutil/dict.h" +#include "../libavutil/frame.h" +#include "../libavutil/log.h" +#include "../libavutil/pixfmt.h" +#include "../libavutil/rational.h" + +#include "version.h" + +/** + * @defgroup libavc Encoding/Decoding Library + * @{ + * + * @defgroup lavc_decoding Decoding + * @{ + * @} + * + * @defgroup lavc_encoding Encoding + * @{ + * @} + * + * @defgroup lavc_codec Codecs + * @{ + * @defgroup lavc_codec_native Native Codecs + * @{ + * @} + * @defgroup lavc_codec_wrappers External library wrappers + * @{ + * @} + * @defgroup lavc_codec_hwaccel Hardware Accelerators bridge + * @{ + * @} + * @} + * @defgroup lavc_internal Internal + * @{ + * @} + * @} + */ + +/** + * @ingroup libavc + * @defgroup lavc_encdec send/receive encoding and decoding API overview + * @{ + * + * The avcodec_send_packet()/avcodec_receive_frame()/avcodec_send_frame()/ + * avcodec_receive_packet() functions provide an encode/decode API, which + * decouples input and output. + * + * The API is very similar for encoding/decoding and audio/video, and works as + * follows: + * - Set up and open the AVCodecContext as usual. + * - Send valid input: + * - For decoding, call avcodec_send_packet() to give the decoder raw + * compressed data in an AVPacket. + * - For encoding, call avcodec_send_frame() to give the decoder an AVFrame + * containing uncompressed audio or video. + * In both cases, it is recommended that AVPackets and AVFrames are + * refcounted, or libavcodec might have to copy the input data. (libavformat + * always returns refcounted AVPackets, and av_frame_get_buffer() allocates + * refcounted AVFrames.) + * - Receive output in a loop. Periodically call one of the avcodec_receive_*() + * functions and process their output: + * - For decoding, call avcodec_receive_frame(). On success, it will return + * an AVFrame containing uncompressed audio or video data. + * - For encoding, call avcodec_receive_packet(). On success, it will return + * an AVPacket with a compressed frame. + * Repeat this call until it returns AVERROR(EAGAIN) or an error. The + * AVERROR(EAGAIN) return value means that new input data is required to + * return new output. In this case, continue with sending input. For each + * input frame/packet, the codec will typically return 1 output frame/packet, + * but it can also be 0 or more than 1. + * + * At the beginning of decoding or encoding, the codec might accept multiple + * input frames/packets without returning a frame, until its internal buffers + * are filled. This situation is handled transparently if you follow the steps + * outlined above. + * + * End of stream situations. These require "flushing" (aka draining) the codec, + * as the codec might buffer multiple frames or packets internally for + * performance or out of necessity (consider B-frames). + * This is handled as follows: + * - Instead of valid input, send NULL to the avcodec_send_packet() (decoding) + * or avcodec_send_frame() (encoding) functions. This will enter draining + * mode. + * - Call avcodec_receive_frame() (decoding) or avcodec_receive_packet() + * (encoding) in a loop until AVERROR_EOF is returned. The functions will + * not return AVERROR(EAGAIN), unless you forgot to enter draining mode. + * - Before decoding can be resumed again, the codec has to be reset with + * avcodec_flush_buffers(). + * + * Using the API as outlined above is highly recommended. But it is also + * possible to call functions outside of this rigid schema. For example, you can + * call avcodec_send_packet() repeatedly without calling + * avcodec_receive_frame(). In this case, avcodec_send_packet() will succeed + * until the codec's internal buffer has been filled up (which is typically of + * size 1 per output frame, after initial input), and then reject input with + * AVERROR(EAGAIN). Once it starts rejecting input, you have no choice but to + * read at least some output. + * + * Not all codecs will follow a rigid and predictable dataflow; the only + * guarantee is that an AVERROR(EAGAIN) return value on a send/receive call on + * one end implies that a receive/send call on the other end will succeed. In + * general, no codec will permit unlimited buffering of input or output. + * + * This API replaces the following legacy functions: + * - avcodec_decode_video2() and avcodec_decode_audio4(): + * Use avcodec_send_packet() to feed input to the decoder, then use + * avcodec_receive_frame() to receive decoded frames after each packet. + * Unlike with the old video decoding API, multiple frames might result from + * a packet. For audio, splitting the input packet into frames by partially + * decoding packets becomes transparent to the API user. You never need to + * feed an AVPacket to the API twice. + * Additionally, sending a flush/draining packet is required only once. + * - avcodec_encode_video2()/avcodec_encode_audio2(): + * Use avcodec_send_frame() to feed input to the encoder, then use + * avcodec_receive_packet() to receive encoded packets. + * Providing user-allocated buffers for avcodec_receive_packet() is not + * possible. + * - The new API does not handle subtitles yet. + * + * Mixing new and old function calls on the same AVCodecContext is not allowed, + * and will result in undefined behavior. + * + * Some codecs might require using the new API; using the old API will return + * an error when calling it. + * @} + */ + +/** + * @defgroup lavc_core Core functions/structures. + * @ingroup libavc + * + * Basic definitions, functions for querying libavcodec capabilities, + * allocating core structures, etc. + * @{ + */ + + +/** + * Identify the syntax and semantics of the bitstream. + * The principle is roughly: + * Two decoders with the same ID can decode the same streams. + * Two encoders with the same ID can encode compatible streams. + * There may be slight deviations from the principle due to implementation + * details. + * + * If you add a codec ID to this list, add it so that + * 1. no value of an existing codec ID changes (that would break ABI), + * 2. it is as close as possible to similar codecs + * + * After adding new codec IDs, do not forget to add an entry to the codec + * descriptor list and bump libavcodec minor version. + */ +enum AVCodecID { + AV_CODEC_ID_NONE, + + /* video codecs */ + AV_CODEC_ID_MPEG1VIDEO, + AV_CODEC_ID_MPEG2VIDEO, ///< preferred ID for MPEG-1/2 video decoding +#if FF_API_XVMC + AV_CODEC_ID_MPEG2VIDEO_XVMC, +#endif /* FF_API_XVMC */ + AV_CODEC_ID_H261, + AV_CODEC_ID_H263, + AV_CODEC_ID_RV10, + AV_CODEC_ID_RV20, + AV_CODEC_ID_MJPEG, + AV_CODEC_ID_MJPEGB, + AV_CODEC_ID_LJPEG, + AV_CODEC_ID_SP5X, + AV_CODEC_ID_JPEGLS, + AV_CODEC_ID_MPEG4, + AV_CODEC_ID_RAWVIDEO, + AV_CODEC_ID_MSMPEG4V1, + AV_CODEC_ID_MSMPEG4V2, + AV_CODEC_ID_MSMPEG4V3, + AV_CODEC_ID_WMV1, + AV_CODEC_ID_WMV2, + AV_CODEC_ID_H263P, + AV_CODEC_ID_H263I, + AV_CODEC_ID_FLV1, + AV_CODEC_ID_SVQ1, + AV_CODEC_ID_SVQ3, + AV_CODEC_ID_DVVIDEO, + AV_CODEC_ID_HUFFYUV, + AV_CODEC_ID_CYUV, + AV_CODEC_ID_H264, + AV_CODEC_ID_INDEO3, + AV_CODEC_ID_VP3, + AV_CODEC_ID_THEORA, + AV_CODEC_ID_ASV1, + AV_CODEC_ID_ASV2, + AV_CODEC_ID_FFV1, + AV_CODEC_ID_4XM, + AV_CODEC_ID_VCR1, + AV_CODEC_ID_CLJR, + AV_CODEC_ID_MDEC, + AV_CODEC_ID_ROQ, + AV_CODEC_ID_INTERPLAY_VIDEO, + AV_CODEC_ID_XAN_WC3, + AV_CODEC_ID_XAN_WC4, + AV_CODEC_ID_RPZA, + AV_CODEC_ID_CINEPAK, + AV_CODEC_ID_WS_VQA, + AV_CODEC_ID_MSRLE, + AV_CODEC_ID_MSVIDEO1, + AV_CODEC_ID_IDCIN, + AV_CODEC_ID_8BPS, + AV_CODEC_ID_SMC, + AV_CODEC_ID_FLIC, + AV_CODEC_ID_TRUEMOTION1, + AV_CODEC_ID_VMDVIDEO, + AV_CODEC_ID_MSZH, + AV_CODEC_ID_ZLIB, + AV_CODEC_ID_QTRLE, + AV_CODEC_ID_TSCC, + AV_CODEC_ID_ULTI, + AV_CODEC_ID_QDRAW, + AV_CODEC_ID_VIXL, + AV_CODEC_ID_QPEG, + AV_CODEC_ID_PNG, + AV_CODEC_ID_PPM, + AV_CODEC_ID_PBM, + AV_CODEC_ID_PGM, + AV_CODEC_ID_PGMYUV, + AV_CODEC_ID_PAM, + AV_CODEC_ID_FFVHUFF, + AV_CODEC_ID_RV30, + AV_CODEC_ID_RV40, + AV_CODEC_ID_VC1, + AV_CODEC_ID_WMV3, + AV_CODEC_ID_LOCO, + AV_CODEC_ID_WNV1, + AV_CODEC_ID_AASC, + AV_CODEC_ID_INDEO2, + AV_CODEC_ID_FRAPS, + AV_CODEC_ID_TRUEMOTION2, + AV_CODEC_ID_BMP, + AV_CODEC_ID_CSCD, + AV_CODEC_ID_MMVIDEO, + AV_CODEC_ID_ZMBV, + AV_CODEC_ID_AVS, + AV_CODEC_ID_SMACKVIDEO, + AV_CODEC_ID_NUV, + AV_CODEC_ID_KMVC, + AV_CODEC_ID_FLASHSV, + AV_CODEC_ID_CAVS, + AV_CODEC_ID_JPEG2000, + AV_CODEC_ID_VMNC, + AV_CODEC_ID_VP5, + AV_CODEC_ID_VP6, + AV_CODEC_ID_VP6F, + AV_CODEC_ID_TARGA, + AV_CODEC_ID_DSICINVIDEO, + AV_CODEC_ID_TIERTEXSEQVIDEO, + AV_CODEC_ID_TIFF, + AV_CODEC_ID_GIF, + AV_CODEC_ID_DXA, + AV_CODEC_ID_DNXHD, + AV_CODEC_ID_THP, + AV_CODEC_ID_SGI, + AV_CODEC_ID_C93, + AV_CODEC_ID_BETHSOFTVID, + AV_CODEC_ID_PTX, + AV_CODEC_ID_TXD, + AV_CODEC_ID_VP6A, + AV_CODEC_ID_AMV, + AV_CODEC_ID_VB, + AV_CODEC_ID_PCX, + AV_CODEC_ID_SUNRAST, + AV_CODEC_ID_INDEO4, + AV_CODEC_ID_INDEO5, + AV_CODEC_ID_MIMIC, + AV_CODEC_ID_RL2, + AV_CODEC_ID_ESCAPE124, + AV_CODEC_ID_DIRAC, + AV_CODEC_ID_BFI, + AV_CODEC_ID_CMV, + AV_CODEC_ID_MOTIONPIXELS, + AV_CODEC_ID_TGV, + AV_CODEC_ID_TGQ, + AV_CODEC_ID_TQI, + AV_CODEC_ID_AURA, + AV_CODEC_ID_AURA2, + AV_CODEC_ID_V210X, + AV_CODEC_ID_TMV, + AV_CODEC_ID_V210, + AV_CODEC_ID_DPX, + AV_CODEC_ID_MAD, + AV_CODEC_ID_FRWU, + AV_CODEC_ID_FLASHSV2, + AV_CODEC_ID_CDGRAPHICS, + AV_CODEC_ID_R210, + AV_CODEC_ID_ANM, + AV_CODEC_ID_BINKVIDEO, + AV_CODEC_ID_IFF_ILBM, +#define AV_CODEC_ID_IFF_BYTERUN1 AV_CODEC_ID_IFF_ILBM + AV_CODEC_ID_KGV1, + AV_CODEC_ID_YOP, + AV_CODEC_ID_VP8, + AV_CODEC_ID_PICTOR, + AV_CODEC_ID_ANSI, + AV_CODEC_ID_A64_MULTI, + AV_CODEC_ID_A64_MULTI5, + AV_CODEC_ID_R10K, + AV_CODEC_ID_MXPEG, + AV_CODEC_ID_LAGARITH, + AV_CODEC_ID_PRORES, + AV_CODEC_ID_JV, + AV_CODEC_ID_DFA, + AV_CODEC_ID_WMV3IMAGE, + AV_CODEC_ID_VC1IMAGE, + AV_CODEC_ID_UTVIDEO, + AV_CODEC_ID_BMV_VIDEO, + AV_CODEC_ID_VBLE, + AV_CODEC_ID_DXTORY, + AV_CODEC_ID_V410, + AV_CODEC_ID_XWD, + AV_CODEC_ID_CDXL, + AV_CODEC_ID_XBM, + AV_CODEC_ID_ZEROCODEC, + AV_CODEC_ID_MSS1, + AV_CODEC_ID_MSA1, + AV_CODEC_ID_TSCC2, + AV_CODEC_ID_MTS2, + AV_CODEC_ID_CLLC, + AV_CODEC_ID_MSS2, + AV_CODEC_ID_VP9, + AV_CODEC_ID_AIC, + AV_CODEC_ID_ESCAPE130, + AV_CODEC_ID_G2M, + AV_CODEC_ID_WEBP, + AV_CODEC_ID_HNM4_VIDEO, + AV_CODEC_ID_HEVC, +#define AV_CODEC_ID_H265 AV_CODEC_ID_HEVC + AV_CODEC_ID_FIC, + AV_CODEC_ID_ALIAS_PIX, + AV_CODEC_ID_BRENDER_PIX, + AV_CODEC_ID_PAF_VIDEO, + AV_CODEC_ID_EXR, + AV_CODEC_ID_VP7, + AV_CODEC_ID_SANM, + AV_CODEC_ID_SGIRLE, + AV_CODEC_ID_MVC1, + AV_CODEC_ID_MVC2, + AV_CODEC_ID_HQX, + AV_CODEC_ID_TDSC, + AV_CODEC_ID_HQ_HQA, + AV_CODEC_ID_HAP, + AV_CODEC_ID_DDS, + AV_CODEC_ID_DXV, + AV_CODEC_ID_SCREENPRESSO, + AV_CODEC_ID_RSCC, + + AV_CODEC_ID_Y41P = 0x8000, + AV_CODEC_ID_AVRP, + AV_CODEC_ID_012V, + AV_CODEC_ID_AVUI, + AV_CODEC_ID_AYUV, + AV_CODEC_ID_TARGA_Y216, + AV_CODEC_ID_V308, + AV_CODEC_ID_V408, + AV_CODEC_ID_YUV4, + AV_CODEC_ID_AVRN, + AV_CODEC_ID_CPIA, + AV_CODEC_ID_XFACE, + AV_CODEC_ID_SNOW, + AV_CODEC_ID_SMVJPEG, + AV_CODEC_ID_APNG, + AV_CODEC_ID_DAALA, + AV_CODEC_ID_CFHD, + AV_CODEC_ID_TRUEMOTION2RT, + AV_CODEC_ID_M101, + AV_CODEC_ID_MAGICYUV, + AV_CODEC_ID_SHEERVIDEO, + AV_CODEC_ID_YLC, + + /* various PCM "codecs" */ + AV_CODEC_ID_FIRST_AUDIO = 0x10000, ///< A dummy id pointing at the start of audio codecs + AV_CODEC_ID_PCM_S16LE = 0x10000, + AV_CODEC_ID_PCM_S16BE, + AV_CODEC_ID_PCM_U16LE, + AV_CODEC_ID_PCM_U16BE, + AV_CODEC_ID_PCM_S8, + AV_CODEC_ID_PCM_U8, + AV_CODEC_ID_PCM_MULAW, + AV_CODEC_ID_PCM_ALAW, + AV_CODEC_ID_PCM_S32LE, + AV_CODEC_ID_PCM_S32BE, + AV_CODEC_ID_PCM_U32LE, + AV_CODEC_ID_PCM_U32BE, + AV_CODEC_ID_PCM_S24LE, + AV_CODEC_ID_PCM_S24BE, + AV_CODEC_ID_PCM_U24LE, + AV_CODEC_ID_PCM_U24BE, + AV_CODEC_ID_PCM_S24DAUD, + AV_CODEC_ID_PCM_ZORK, + AV_CODEC_ID_PCM_S16LE_PLANAR, + AV_CODEC_ID_PCM_DVD, + AV_CODEC_ID_PCM_F32BE, + AV_CODEC_ID_PCM_F32LE, + AV_CODEC_ID_PCM_F64BE, + AV_CODEC_ID_PCM_F64LE, + AV_CODEC_ID_PCM_BLURAY, + AV_CODEC_ID_PCM_LXF, + AV_CODEC_ID_S302M, + AV_CODEC_ID_PCM_S8_PLANAR, + AV_CODEC_ID_PCM_S24LE_PLANAR, + AV_CODEC_ID_PCM_S32LE_PLANAR, + AV_CODEC_ID_PCM_S16BE_PLANAR, + /* new PCM "codecs" should be added right below this line starting with + * an explicit value of for example 0x10800 + */ + + /* various ADPCM codecs */ + AV_CODEC_ID_ADPCM_IMA_QT = 0x11000, + AV_CODEC_ID_ADPCM_IMA_WAV, + AV_CODEC_ID_ADPCM_IMA_DK3, + AV_CODEC_ID_ADPCM_IMA_DK4, + AV_CODEC_ID_ADPCM_IMA_WS, + AV_CODEC_ID_ADPCM_IMA_SMJPEG, + AV_CODEC_ID_ADPCM_MS, + AV_CODEC_ID_ADPCM_4XM, + AV_CODEC_ID_ADPCM_XA, + AV_CODEC_ID_ADPCM_ADX, + AV_CODEC_ID_ADPCM_EA, + AV_CODEC_ID_ADPCM_G726, + AV_CODEC_ID_ADPCM_CT, + AV_CODEC_ID_ADPCM_SWF, + AV_CODEC_ID_ADPCM_YAMAHA, + AV_CODEC_ID_ADPCM_SBPRO_4, + AV_CODEC_ID_ADPCM_SBPRO_3, + AV_CODEC_ID_ADPCM_SBPRO_2, + AV_CODEC_ID_ADPCM_THP, + AV_CODEC_ID_ADPCM_IMA_AMV, + AV_CODEC_ID_ADPCM_EA_R1, + AV_CODEC_ID_ADPCM_EA_R3, + AV_CODEC_ID_ADPCM_EA_R2, + AV_CODEC_ID_ADPCM_IMA_EA_SEAD, + AV_CODEC_ID_ADPCM_IMA_EA_EACS, + AV_CODEC_ID_ADPCM_EA_XAS, + AV_CODEC_ID_ADPCM_EA_MAXIS_XA, + AV_CODEC_ID_ADPCM_IMA_ISS, + AV_CODEC_ID_ADPCM_G722, + AV_CODEC_ID_ADPCM_IMA_APC, + AV_CODEC_ID_ADPCM_VIMA, +#if FF_API_VIMA_DECODER + AV_CODEC_ID_VIMA = AV_CODEC_ID_ADPCM_VIMA, +#endif + + AV_CODEC_ID_ADPCM_AFC = 0x11800, + AV_CODEC_ID_ADPCM_IMA_OKI, + AV_CODEC_ID_ADPCM_DTK, + AV_CODEC_ID_ADPCM_IMA_RAD, + AV_CODEC_ID_ADPCM_G726LE, + AV_CODEC_ID_ADPCM_THP_LE, + AV_CODEC_ID_ADPCM_PSX, + AV_CODEC_ID_ADPCM_AICA, + AV_CODEC_ID_ADPCM_IMA_DAT4, + AV_CODEC_ID_ADPCM_MTAF, + + /* AMR */ + AV_CODEC_ID_AMR_NB = 0x12000, + AV_CODEC_ID_AMR_WB, + + /* RealAudio codecs*/ + AV_CODEC_ID_RA_144 = 0x13000, + AV_CODEC_ID_RA_288, + + /* various DPCM codecs */ + AV_CODEC_ID_ROQ_DPCM = 0x14000, + AV_CODEC_ID_INTERPLAY_DPCM, + AV_CODEC_ID_XAN_DPCM, + AV_CODEC_ID_SOL_DPCM, + + AV_CODEC_ID_SDX2_DPCM = 0x14800, + + /* audio codecs */ + AV_CODEC_ID_MP2 = 0x15000, + AV_CODEC_ID_MP3, ///< preferred ID for decoding MPEG audio layer 1, 2 or 3 + AV_CODEC_ID_AAC, + AV_CODEC_ID_AC3, + AV_CODEC_ID_DTS, + AV_CODEC_ID_VORBIS, + AV_CODEC_ID_DVAUDIO, + AV_CODEC_ID_WMAV1, + AV_CODEC_ID_WMAV2, + AV_CODEC_ID_MACE3, + AV_CODEC_ID_MACE6, + AV_CODEC_ID_VMDAUDIO, + AV_CODEC_ID_FLAC, + AV_CODEC_ID_MP3ADU, + AV_CODEC_ID_MP3ON4, + AV_CODEC_ID_SHORTEN, + AV_CODEC_ID_ALAC, + AV_CODEC_ID_WESTWOOD_SND1, + AV_CODEC_ID_GSM, ///< as in Berlin toast format + AV_CODEC_ID_QDM2, + AV_CODEC_ID_COOK, + AV_CODEC_ID_TRUESPEECH, + AV_CODEC_ID_TTA, + AV_CODEC_ID_SMACKAUDIO, + AV_CODEC_ID_QCELP, + AV_CODEC_ID_WAVPACK, + AV_CODEC_ID_DSICINAUDIO, + AV_CODEC_ID_IMC, + AV_CODEC_ID_MUSEPACK7, + AV_CODEC_ID_MLP, + AV_CODEC_ID_GSM_MS, /* as found in WAV */ + AV_CODEC_ID_ATRAC3, +#if FF_API_VOXWARE + AV_CODEC_ID_VOXWARE, +#endif + AV_CODEC_ID_APE, + AV_CODEC_ID_NELLYMOSER, + AV_CODEC_ID_MUSEPACK8, + AV_CODEC_ID_SPEEX, + AV_CODEC_ID_WMAVOICE, + AV_CODEC_ID_WMAPRO, + AV_CODEC_ID_WMALOSSLESS, + AV_CODEC_ID_ATRAC3P, + AV_CODEC_ID_EAC3, + AV_CODEC_ID_SIPR, + AV_CODEC_ID_MP1, + AV_CODEC_ID_TWINVQ, + AV_CODEC_ID_TRUEHD, + AV_CODEC_ID_MP4ALS, + AV_CODEC_ID_ATRAC1, + AV_CODEC_ID_BINKAUDIO_RDFT, + AV_CODEC_ID_BINKAUDIO_DCT, + AV_CODEC_ID_AAC_LATM, + AV_CODEC_ID_QDMC, + AV_CODEC_ID_CELT, + AV_CODEC_ID_G723_1, + AV_CODEC_ID_G729, + AV_CODEC_ID_8SVX_EXP, + AV_CODEC_ID_8SVX_FIB, + AV_CODEC_ID_BMV_AUDIO, + AV_CODEC_ID_RALF, + AV_CODEC_ID_IAC, + AV_CODEC_ID_ILBC, + AV_CODEC_ID_OPUS, + AV_CODEC_ID_COMFORT_NOISE, + AV_CODEC_ID_TAK, + AV_CODEC_ID_METASOUND, + AV_CODEC_ID_PAF_AUDIO, + AV_CODEC_ID_ON2AVC, + AV_CODEC_ID_DSS_SP, + + AV_CODEC_ID_FFWAVESYNTH = 0x15800, + AV_CODEC_ID_SONIC, + AV_CODEC_ID_SONIC_LS, + AV_CODEC_ID_EVRC, + AV_CODEC_ID_SMV, + AV_CODEC_ID_DSD_LSBF, + AV_CODEC_ID_DSD_MSBF, + AV_CODEC_ID_DSD_LSBF_PLANAR, + AV_CODEC_ID_DSD_MSBF_PLANAR, + AV_CODEC_ID_4GV, + AV_CODEC_ID_INTERPLAY_ACM, + AV_CODEC_ID_XMA1, + AV_CODEC_ID_XMA2, + AV_CODEC_ID_DST, + + /* subtitle codecs */ + AV_CODEC_ID_FIRST_SUBTITLE = 0x17000, ///< A dummy ID pointing at the start of subtitle codecs. + AV_CODEC_ID_DVD_SUBTITLE = 0x17000, + AV_CODEC_ID_DVB_SUBTITLE, + AV_CODEC_ID_TEXT, ///< raw UTF-8 text + AV_CODEC_ID_XSUB, + AV_CODEC_ID_SSA, + AV_CODEC_ID_MOV_TEXT, + AV_CODEC_ID_HDMV_PGS_SUBTITLE, + AV_CODEC_ID_DVB_TELETEXT, + AV_CODEC_ID_SRT, + + AV_CODEC_ID_MICRODVD = 0x17800, + AV_CODEC_ID_EIA_608, + AV_CODEC_ID_JACOSUB, + AV_CODEC_ID_SAMI, + AV_CODEC_ID_REALTEXT, + AV_CODEC_ID_STL, + AV_CODEC_ID_SUBVIEWER1, + AV_CODEC_ID_SUBVIEWER, + AV_CODEC_ID_SUBRIP, + AV_CODEC_ID_WEBVTT, + AV_CODEC_ID_MPL2, + AV_CODEC_ID_VPLAYER, + AV_CODEC_ID_PJS, + AV_CODEC_ID_ASS, + AV_CODEC_ID_HDMV_TEXT_SUBTITLE, + + /* other specific kind of codecs (generally used for attachments) */ + AV_CODEC_ID_FIRST_UNKNOWN = 0x18000, ///< A dummy ID pointing at the start of various fake codecs. + AV_CODEC_ID_TTF = 0x18000, + + AV_CODEC_ID_BINTEXT = 0x18800, + AV_CODEC_ID_XBIN, + AV_CODEC_ID_IDF, + AV_CODEC_ID_OTF, + AV_CODEC_ID_SMPTE_KLV, + AV_CODEC_ID_DVD_NAV, + AV_CODEC_ID_TIMED_ID3, + AV_CODEC_ID_BIN_DATA, + + + AV_CODEC_ID_PROBE = 0x19000, ///< codec_id is not known (like AV_CODEC_ID_NONE) but lavf should attempt to identify it + + AV_CODEC_ID_MPEG2TS = 0x20000, /**< _FAKE_ codec to indicate a raw MPEG-2 TS + * stream (only used by libavformat) */ + AV_CODEC_ID_MPEG4SYSTEMS = 0x20001, /**< _FAKE_ codec to indicate a MPEG-4 Systems + * stream (only used by libavformat) */ + AV_CODEC_ID_FFMETADATA = 0x21000, ///< Dummy codec for streams containing only metadata information. + AV_CODEC_ID_WRAPPED_AVFRAME = 0x21001, ///< Passthrough codec, AVFrames wrapped in AVPacket +}; + +/** + * This struct describes the properties of a single codec described by an + * AVCodecID. + * @see avcodec_descriptor_get() + */ +typedef struct AVCodecDescriptor { + enum AVCodecID id; + enum AVMediaType type; + /** + * Name of the codec described by this descriptor. It is non-empty and + * unique for each codec descriptor. It should contain alphanumeric + * characters and '_' only. + */ + const char *name; + /** + * A more descriptive name for this codec. May be NULL. + */ + const char *long_name; + /** + * Codec properties, a combination of AV_CODEC_PROP_* flags. + */ + int props; + /** + * MIME type(s) associated with the codec. + * May be NULL; if not, a NULL-terminated array of MIME types. + * The first item is always non-NULL and is the preferred MIME type. + */ + const char *const *mime_types; + /** + * If non-NULL, an array of profiles recognized for this codec. + * Terminated with FF_PROFILE_UNKNOWN. + */ + const struct AVProfile *profiles; +} AVCodecDescriptor; + +/** + * Codec uses only intra compression. + * Video codecs only. + */ +#define AV_CODEC_PROP_INTRA_ONLY (1 << 0) +/** + * Codec supports lossy compression. Audio and video codecs only. + * @note a codec may support both lossy and lossless + * compression modes + */ +#define AV_CODEC_PROP_LOSSY (1 << 1) +/** + * Codec supports lossless compression. Audio and video codecs only. + */ +#define AV_CODEC_PROP_LOSSLESS (1 << 2) +/** + * Codec supports frame reordering. That is, the coded order (the order in which + * the encoded packets are output by the encoders / stored / input to the + * decoders) may be different from the presentation order of the corresponding + * frames. + * + * For codecs that do not have this property set, PTS and DTS should always be + * equal. + */ +#define AV_CODEC_PROP_REORDER (1 << 3) +/** + * Subtitle codec is bitmap based + * Decoded AVSubtitle data can be read from the AVSubtitleRect->pict field. + */ +#define AV_CODEC_PROP_BITMAP_SUB (1 << 16) +/** + * Subtitle codec is text based. + * Decoded AVSubtitle data can be read from the AVSubtitleRect->ass field. + */ +#define AV_CODEC_PROP_TEXT_SUB (1 << 17) + +/** + * @ingroup lavc_decoding + * Required number of additionally allocated bytes at the end of the input bitstream for decoding. + * This is mainly needed because some optimized bitstream readers read + * 32 or 64 bit at once and could read over the end.
+ * Note: If the first 23 bits of the additional bytes are not 0, then damaged + * MPEG bitstreams could cause overread and segfault. + */ +#define AV_INPUT_BUFFER_PADDING_SIZE 32 + +/** + * @ingroup lavc_encoding + * minimum encoding buffer size + * Used to avoid some checks during header writing. + */ +#define AV_INPUT_BUFFER_MIN_SIZE 16384 + +#if FF_API_WITHOUT_PREFIX +/** + * @deprecated use AV_INPUT_BUFFER_PADDING_SIZE instead + */ +#define FF_INPUT_BUFFER_PADDING_SIZE 32 + +/** + * @deprecated use AV_INPUT_BUFFER_MIN_SIZE instead + */ +#define FF_MIN_BUFFER_SIZE 16384 +#endif /* FF_API_WITHOUT_PREFIX */ + +/** + * @ingroup lavc_encoding + * motion estimation type. + * @deprecated use codec private option instead + */ +#if FF_API_MOTION_EST +enum Motion_Est_ID { + ME_ZERO = 1, ///< no search, that is use 0,0 vector whenever one is needed + ME_FULL, + ME_LOG, + ME_PHODS, + ME_EPZS, ///< enhanced predictive zonal search + ME_X1, ///< reserved for experiments + ME_HEX, ///< hexagon based search + ME_UMH, ///< uneven multi-hexagon search + ME_TESA, ///< transformed exhaustive search algorithm + ME_ITER=50, ///< iterative search +}; +#endif + +/** + * @ingroup lavc_decoding + */ +enum AVDiscard{ + /* We leave some space between them for extensions (drop some + * keyframes for intra-only or drop just some bidir frames). */ + AVDISCARD_NONE =-16, ///< discard nothing + AVDISCARD_DEFAULT = 0, ///< discard useless packets like 0 size packets in avi + AVDISCARD_NONREF = 8, ///< discard all non reference + AVDISCARD_BIDIR = 16, ///< discard all bidirectional frames + AVDISCARD_NONINTRA= 24, ///< discard all non intra frames + AVDISCARD_NONKEY = 32, ///< discard all frames except keyframes + AVDISCARD_ALL = 48, ///< discard all +}; + +enum AVAudioServiceType { + AV_AUDIO_SERVICE_TYPE_MAIN = 0, + AV_AUDIO_SERVICE_TYPE_EFFECTS = 1, + AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED = 2, + AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED = 3, + AV_AUDIO_SERVICE_TYPE_DIALOGUE = 4, + AV_AUDIO_SERVICE_TYPE_COMMENTARY = 5, + AV_AUDIO_SERVICE_TYPE_EMERGENCY = 6, + AV_AUDIO_SERVICE_TYPE_VOICE_OVER = 7, + AV_AUDIO_SERVICE_TYPE_KARAOKE = 8, + AV_AUDIO_SERVICE_TYPE_NB , ///< Not part of ABI +}; + +/** + * @ingroup lavc_encoding + */ +typedef struct RcOverride{ + int start_frame; + int end_frame; + int qscale; // If this is 0 then quality_factor will be used instead. + float quality_factor; +} RcOverride; + +#if FF_API_MAX_BFRAMES +/** + * @deprecated there is no libavcodec-wide limit on the number of B-frames + */ +#define FF_MAX_B_FRAMES 16 +#endif + +/* encoding support + These flags can be passed in AVCodecContext.flags before initialization. + Note: Not everything is supported yet. +*/ + +/** + * Allow decoders to produce frames with data planes that are not aligned + * to CPU requirements (e.g. due to cropping). + */ +#define AV_CODEC_FLAG_UNALIGNED (1 << 0) +/** + * Use fixed qscale. + */ +#define AV_CODEC_FLAG_QSCALE (1 << 1) +/** + * 4 MV per MB allowed / advanced prediction for H.263. + */ +#define AV_CODEC_FLAG_4MV (1 << 2) +/** + * Output even those frames that might be corrupted. + */ +#define AV_CODEC_FLAG_OUTPUT_CORRUPT (1 << 3) +/** + * Use qpel MC. + */ +#define AV_CODEC_FLAG_QPEL (1 << 4) +/** + * Use internal 2pass ratecontrol in first pass mode. + */ +#define AV_CODEC_FLAG_PASS1 (1 << 9) +/** + * Use internal 2pass ratecontrol in second pass mode. + */ +#define AV_CODEC_FLAG_PASS2 (1 << 10) +/** + * loop filter. + */ +#define AV_CODEC_FLAG_LOOP_FILTER (1 << 11) +/** + * Only decode/encode grayscale. + */ +#define AV_CODEC_FLAG_GRAY (1 << 13) +/** + * error[?] variables will be set during encoding. + */ +#define AV_CODEC_FLAG_PSNR (1 << 15) +/** + * Input bitstream might be truncated at a random location + * instead of only at frame boundaries. + */ +#define AV_CODEC_FLAG_TRUNCATED (1 << 16) +/** + * Use interlaced DCT. + */ +#define AV_CODEC_FLAG_INTERLACED_DCT (1 << 18) +/** + * Force low delay. + */ +#define AV_CODEC_FLAG_LOW_DELAY (1 << 19) +/** + * Place global headers in extradata instead of every keyframe. + */ +#define AV_CODEC_FLAG_GLOBAL_HEADER (1 << 22) +/** + * Use only bitexact stuff (except (I)DCT). + */ +#define AV_CODEC_FLAG_BITEXACT (1 << 23) +/* Fx : Flag for H.263+ extra options */ +/** + * H.263 advanced intra coding / MPEG-4 AC prediction + */ +#define AV_CODEC_FLAG_AC_PRED (1 << 24) +/** + * interlaced motion estimation + */ +#define AV_CODEC_FLAG_INTERLACED_ME (1 << 29) +#define AV_CODEC_FLAG_CLOSED_GOP (1U << 31) + +/** + * Allow non spec compliant speedup tricks. + */ +#define AV_CODEC_FLAG2_FAST (1 << 0) +/** + * Skip bitstream encoding. + */ +#define AV_CODEC_FLAG2_NO_OUTPUT (1 << 2) +/** + * Place global headers at every keyframe instead of in extradata. + */ +#define AV_CODEC_FLAG2_LOCAL_HEADER (1 << 3) + +/** + * timecode is in drop frame format. DEPRECATED!!!! + */ +#define AV_CODEC_FLAG2_DROP_FRAME_TIMECODE (1 << 13) + +/** + * Input bitstream might be truncated at a packet boundaries + * instead of only at frame boundaries. + */ +#define AV_CODEC_FLAG2_CHUNKS (1 << 15) +/** + * Discard cropping information from SPS. + */ +#define AV_CODEC_FLAG2_IGNORE_CROP (1 << 16) + +/** + * Show all frames before the first keyframe + */ +#define AV_CODEC_FLAG2_SHOW_ALL (1 << 22) +/** + * Export motion vectors through frame side data + */ +#define AV_CODEC_FLAG2_EXPORT_MVS (1 << 28) +/** + * Do not skip samples and export skip information as frame side data + */ +#define AV_CODEC_FLAG2_SKIP_MANUAL (1 << 29) +/** + * Do not reset ASS ReadOrder field on flush (subtitles decoding) + */ +#define AV_CODEC_FLAG2_RO_FLUSH_NOOP (1 << 30) + +/* Unsupported options : + * Syntax Arithmetic coding (SAC) + * Reference Picture Selection + * Independent Segment Decoding */ +/* /Fx */ +/* codec capabilities */ + +/** + * Decoder can use draw_horiz_band callback. + */ +#define AV_CODEC_CAP_DRAW_HORIZ_BAND (1 << 0) +/** + * Codec uses get_buffer() for allocating buffers and supports custom allocators. + * If not set, it might not use get_buffer() at all or use operations that + * assume the buffer was allocated by avcodec_default_get_buffer. + */ +#define AV_CODEC_CAP_DR1 (1 << 1) +#define AV_CODEC_CAP_TRUNCATED (1 << 3) +/** + * Encoder or decoder requires flushing with NULL input at the end in order to + * give the complete and correct output. + * + * NOTE: If this flag is not set, the codec is guaranteed to never be fed with + * with NULL data. The user can still send NULL data to the public encode + * or decode function, but libavcodec will not pass it along to the codec + * unless this flag is set. + * + * Decoders: + * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to get the delayed data until the decoder no longer + * returns frames. + * + * Encoders: + * The encoder needs to be fed with NULL data at the end of encoding until the + * encoder no longer returns data. + * + * NOTE: For encoders implementing the AVCodec.encode2() function, setting this + * flag also means that the encoder must set the pts and duration for + * each output packet. If this flag is not set, the pts and duration will + * be determined by libavcodec from the input frame. + */ +#define AV_CODEC_CAP_DELAY (1 << 5) +/** + * Codec can be fed a final frame with a smaller size. + * This can be used to prevent truncation of the last audio samples. + */ +#define AV_CODEC_CAP_SMALL_LAST_FRAME (1 << 6) + +#if FF_API_CAP_VDPAU +/** + * Codec can export data for HW decoding (VDPAU). + */ +#define AV_CODEC_CAP_HWACCEL_VDPAU (1 << 7) +#endif + +/** + * Codec can output multiple frames per AVPacket + * Normally demuxers return one frame at a time, demuxers which do not do + * are connected to a parser to split what they return into proper frames. + * This flag is reserved to the very rare category of codecs which have a + * bitstream that cannot be split into frames without timeconsuming + * operations like full decoding. Demuxers carrying such bitstreams thus + * may return multiple frames in a packet. This has many disadvantages like + * prohibiting stream copy in many cases thus it should only be considered + * as a last resort. + */ +#define AV_CODEC_CAP_SUBFRAMES (1 << 8) +/** + * Codec is experimental and is thus avoided in favor of non experimental + * encoders + */ +#define AV_CODEC_CAP_EXPERIMENTAL (1 << 9) +/** + * Codec should fill in channel configuration and samplerate instead of container + */ +#define AV_CODEC_CAP_CHANNEL_CONF (1 << 10) +/** + * Codec supports frame-level multithreading. + */ +#define AV_CODEC_CAP_FRAME_THREADS (1 << 12) +/** + * Codec supports slice-based (or partition-based) multithreading. + */ +#define AV_CODEC_CAP_SLICE_THREADS (1 << 13) +/** + * Codec supports changed parameters at any point. + */ +#define AV_CODEC_CAP_PARAM_CHANGE (1 << 14) +/** + * Codec supports avctx->thread_count == 0 (auto). + */ +#define AV_CODEC_CAP_AUTO_THREADS (1 << 15) +/** + * Audio encoder supports receiving a different number of samples in each call. + */ +#define AV_CODEC_CAP_VARIABLE_FRAME_SIZE (1 << 16) +/** + * Codec is intra only. + */ +#define AV_CODEC_CAP_INTRA_ONLY 0x40000000 +/** + * Codec is lossless. + */ +#define AV_CODEC_CAP_LOSSLESS 0x80000000 + + +#if FF_API_WITHOUT_PREFIX +/** + * Allow decoders to produce frames with data planes that are not aligned + * to CPU requirements (e.g. due to cropping). + */ +#define CODEC_FLAG_UNALIGNED AV_CODEC_FLAG_UNALIGNED +#define CODEC_FLAG_QSCALE AV_CODEC_FLAG_QSCALE +#define CODEC_FLAG_4MV AV_CODEC_FLAG_4MV +#define CODEC_FLAG_OUTPUT_CORRUPT AV_CODEC_FLAG_OUTPUT_CORRUPT +#define CODEC_FLAG_QPEL AV_CODEC_FLAG_QPEL +#if FF_API_GMC +/** + * @deprecated use the "gmc" private option of the libxvid encoder + */ +#define CODEC_FLAG_GMC 0x0020 ///< Use GMC. +#endif +#if FF_API_MV0 +/** + * @deprecated use the flag "mv0" in the "mpv_flags" private option of the + * mpegvideo encoders + */ +#define CODEC_FLAG_MV0 0x0040 +#endif +#if FF_API_INPUT_PRESERVED +/** + * @deprecated passing reference-counted frames to the encoders replaces this + * flag + */ +#define CODEC_FLAG_INPUT_PRESERVED 0x0100 +#endif +#define CODEC_FLAG_PASS1 AV_CODEC_FLAG_PASS1 +#define CODEC_FLAG_PASS2 AV_CODEC_FLAG_PASS2 +#define CODEC_FLAG_GRAY AV_CODEC_FLAG_GRAY +#if FF_API_EMU_EDGE +/** + * @deprecated edges are not used/required anymore. I.e. this flag is now always + * set. + */ +#define CODEC_FLAG_EMU_EDGE 0x4000 +#endif +#define CODEC_FLAG_PSNR AV_CODEC_FLAG_PSNR +#define CODEC_FLAG_TRUNCATED AV_CODEC_FLAG_TRUNCATED + +#if FF_API_NORMALIZE_AQP +/** + * @deprecated use the flag "naq" in the "mpv_flags" private option of the + * mpegvideo encoders + */ +#define CODEC_FLAG_NORMALIZE_AQP 0x00020000 +#endif +#define CODEC_FLAG_INTERLACED_DCT AV_CODEC_FLAG_INTERLACED_DCT +#define CODEC_FLAG_LOW_DELAY AV_CODEC_FLAG_LOW_DELAY +#define CODEC_FLAG_GLOBAL_HEADER AV_CODEC_FLAG_GLOBAL_HEADER +#define CODEC_FLAG_BITEXACT AV_CODEC_FLAG_BITEXACT +#define CODEC_FLAG_AC_PRED AV_CODEC_FLAG_AC_PRED +#define CODEC_FLAG_LOOP_FILTER AV_CODEC_FLAG_LOOP_FILTER +#define CODEC_FLAG_INTERLACED_ME AV_CODEC_FLAG_INTERLACED_ME +#define CODEC_FLAG_CLOSED_GOP AV_CODEC_FLAG_CLOSED_GOP +#define CODEC_FLAG2_FAST AV_CODEC_FLAG2_FAST +#define CODEC_FLAG2_NO_OUTPUT AV_CODEC_FLAG2_NO_OUTPUT +#define CODEC_FLAG2_LOCAL_HEADER AV_CODEC_FLAG2_LOCAL_HEADER +#define CODEC_FLAG2_DROP_FRAME_TIMECODE AV_CODEC_FLAG2_DROP_FRAME_TIMECODE +#define CODEC_FLAG2_IGNORE_CROP AV_CODEC_FLAG2_IGNORE_CROP + +#define CODEC_FLAG2_CHUNKS AV_CODEC_FLAG2_CHUNKS +#define CODEC_FLAG2_SHOW_ALL AV_CODEC_FLAG2_SHOW_ALL +#define CODEC_FLAG2_EXPORT_MVS AV_CODEC_FLAG2_EXPORT_MVS +#define CODEC_FLAG2_SKIP_MANUAL AV_CODEC_FLAG2_SKIP_MANUAL + +/* Unsupported options : + * Syntax Arithmetic coding (SAC) + * Reference Picture Selection + * Independent Segment Decoding */ +/* /Fx */ +/* codec capabilities */ + +#define CODEC_CAP_DRAW_HORIZ_BAND AV_CODEC_CAP_DRAW_HORIZ_BAND ///< Decoder can use draw_horiz_band callback. +/** + * Codec uses get_buffer() for allocating buffers and supports custom allocators. + * If not set, it might not use get_buffer() at all or use operations that + * assume the buffer was allocated by avcodec_default_get_buffer. + */ +#define CODEC_CAP_DR1 AV_CODEC_CAP_DR1 +#define CODEC_CAP_TRUNCATED AV_CODEC_CAP_TRUNCATED +#if FF_API_XVMC +/* Codec can export data for HW decoding. This flag indicates that + * the codec would call get_format() with list that might contain HW accelerated + * pixel formats (XvMC, VDPAU, VAAPI, etc). The application can pick any of them + * including raw image format. + * The application can use the passed context to determine bitstream version, + * chroma format, resolution etc. + */ +#define CODEC_CAP_HWACCEL 0x0010 +#endif /* FF_API_XVMC */ +/** + * Encoder or decoder requires flushing with NULL input at the end in order to + * give the complete and correct output. + * + * NOTE: If this flag is not set, the codec is guaranteed to never be fed with + * with NULL data. The user can still send NULL data to the public encode + * or decode function, but libavcodec will not pass it along to the codec + * unless this flag is set. + * + * Decoders: + * The decoder has a non-zero delay and needs to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to get the delayed data until the decoder no longer + * returns frames. + * + * Encoders: + * The encoder needs to be fed with NULL data at the end of encoding until the + * encoder no longer returns data. + * + * NOTE: For encoders implementing the AVCodec.encode2() function, setting this + * flag also means that the encoder must set the pts and duration for + * each output packet. If this flag is not set, the pts and duration will + * be determined by libavcodec from the input frame. + */ +#define CODEC_CAP_DELAY AV_CODEC_CAP_DELAY +/** + * Codec can be fed a final frame with a smaller size. + * This can be used to prevent truncation of the last audio samples. + */ +#define CODEC_CAP_SMALL_LAST_FRAME AV_CODEC_CAP_SMALL_LAST_FRAME +#if FF_API_CAP_VDPAU +/** + * Codec can export data for HW decoding (VDPAU). + */ +#define CODEC_CAP_HWACCEL_VDPAU AV_CODEC_CAP_HWACCEL_VDPAU +#endif +/** + * Codec can output multiple frames per AVPacket + * Normally demuxers return one frame at a time, demuxers which do not do + * are connected to a parser to split what they return into proper frames. + * This flag is reserved to the very rare category of codecs which have a + * bitstream that cannot be split into frames without timeconsuming + * operations like full decoding. Demuxers carrying such bitstreams thus + * may return multiple frames in a packet. This has many disadvantages like + * prohibiting stream copy in many cases thus it should only be considered + * as a last resort. + */ +#define CODEC_CAP_SUBFRAMES AV_CODEC_CAP_SUBFRAMES +/** + * Codec is experimental and is thus avoided in favor of non experimental + * encoders + */ +#define CODEC_CAP_EXPERIMENTAL AV_CODEC_CAP_EXPERIMENTAL +/** + * Codec should fill in channel configuration and samplerate instead of container + */ +#define CODEC_CAP_CHANNEL_CONF AV_CODEC_CAP_CHANNEL_CONF +#if FF_API_NEG_LINESIZES +/** + * @deprecated no codecs use this capability + */ +#define CODEC_CAP_NEG_LINESIZES 0x0800 +#endif +/** + * Codec supports frame-level multithreading. + */ +#define CODEC_CAP_FRAME_THREADS AV_CODEC_CAP_FRAME_THREADS +/** + * Codec supports slice-based (or partition-based) multithreading. + */ +#define CODEC_CAP_SLICE_THREADS AV_CODEC_CAP_SLICE_THREADS +/** + * Codec supports changed parameters at any point. + */ +#define CODEC_CAP_PARAM_CHANGE AV_CODEC_CAP_PARAM_CHANGE +/** + * Codec supports avctx->thread_count == 0 (auto). + */ +#define CODEC_CAP_AUTO_THREADS AV_CODEC_CAP_AUTO_THREADS +/** + * Audio encoder supports receiving a different number of samples in each call. + */ +#define CODEC_CAP_VARIABLE_FRAME_SIZE AV_CODEC_CAP_VARIABLE_FRAME_SIZE +/** + * Codec is intra only. + */ +#define CODEC_CAP_INTRA_ONLY AV_CODEC_CAP_INTRA_ONLY +/** + * Codec is lossless. + */ +#define CODEC_CAP_LOSSLESS AV_CODEC_CAP_LOSSLESS + +/** + * HWAccel is experimental and is thus avoided in favor of non experimental + * codecs + */ +#define HWACCEL_CODEC_CAP_EXPERIMENTAL 0x0200 +#endif /* FF_API_WITHOUT_PREFIX */ + +#if FF_API_MB_TYPE +//The following defines may change, don't expect compatibility if you use them. +#define MB_TYPE_INTRA4x4 0x0001 +#define MB_TYPE_INTRA16x16 0x0002 //FIXME H.264-specific +#define MB_TYPE_INTRA_PCM 0x0004 //FIXME H.264-specific +#define MB_TYPE_16x16 0x0008 +#define MB_TYPE_16x8 0x0010 +#define MB_TYPE_8x16 0x0020 +#define MB_TYPE_8x8 0x0040 +#define MB_TYPE_INTERLACED 0x0080 +#define MB_TYPE_DIRECT2 0x0100 //FIXME +#define MB_TYPE_ACPRED 0x0200 +#define MB_TYPE_GMC 0x0400 +#define MB_TYPE_SKIP 0x0800 +#define MB_TYPE_P0L0 0x1000 +#define MB_TYPE_P1L0 0x2000 +#define MB_TYPE_P0L1 0x4000 +#define MB_TYPE_P1L1 0x8000 +#define MB_TYPE_L0 (MB_TYPE_P0L0 | MB_TYPE_P1L0) +#define MB_TYPE_L1 (MB_TYPE_P0L1 | MB_TYPE_P1L1) +#define MB_TYPE_L0L1 (MB_TYPE_L0 | MB_TYPE_L1) +#define MB_TYPE_QUANT 0x00010000 +#define MB_TYPE_CBP 0x00020000 +// Note bits 24-31 are reserved for codec specific use (H.264 ref0, MPEG-1 0mv, ...) +#endif + +/** + * Pan Scan area. + * This specifies the area which should be displayed. + * Note there may be multiple such areas for one frame. + */ +typedef struct AVPanScan{ + /** + * id + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int id; + + /** + * width and height in 1/16 pel + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int width; + int height; + + /** + * position of the top left corner in 1/16 pel for up to 3 fields/frames + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int16_t position[3][2]; +}AVPanScan; + +/** + * This structure describes the bitrate properties of an encoded bitstream. It + * roughly corresponds to a subset the VBV parameters for MPEG-2 or HRD + * parameters for H.264/HEVC. + */ +typedef struct AVCPBProperties { + /** + * Maximum bitrate of the stream, in bits per second. + * Zero if unknown or unspecified. + */ + int max_bitrate; + /** + * Minimum bitrate of the stream, in bits per second. + * Zero if unknown or unspecified. + */ + int min_bitrate; + /** + * Average bitrate of the stream, in bits per second. + * Zero if unknown or unspecified. + */ + int avg_bitrate; + + /** + * The size of the buffer to which the ratecontrol is applied, in bits. + * Zero if unknown or unspecified. + */ + int buffer_size; + + /** + * The delay between the time the packet this structure is associated with + * is received and the time when it should be decoded, in periods of a 27MHz + * clock. + * + * UINT64_MAX when unknown or unspecified. + */ + uint64_t vbv_delay; +} AVCPBProperties; + +#if FF_API_QSCALE_TYPE +#define FF_QSCALE_TYPE_MPEG1 0 +#define FF_QSCALE_TYPE_MPEG2 1 +#define FF_QSCALE_TYPE_H264 2 +#define FF_QSCALE_TYPE_VP56 3 +#endif + +/** + * The decoder will keep a reference to the frame and may reuse it later. + */ +#define AV_GET_BUFFER_FLAG_REF (1 << 0) + +/** + * @defgroup lavc_packet AVPacket + * + * Types and functions for working with AVPacket. + * @{ + */ +enum AVPacketSideDataType { + AV_PKT_DATA_PALETTE, + AV_PKT_DATA_NEW_EXTRADATA, + + /** + * An AV_PKT_DATA_PARAM_CHANGE side data packet is laid out as follows: + * @code + * u32le param_flags + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT) + * s32le channel_count + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT) + * u64le channel_layout + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE) + * s32le sample_rate + * if (param_flags & AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS) + * s32le width + * s32le height + * @endcode + */ + AV_PKT_DATA_PARAM_CHANGE, + + /** + * An AV_PKT_DATA_H263_MB_INFO side data packet contains a number of + * structures with info about macroblocks relevant to splitting the + * packet into smaller packets on macroblock edges (e.g. as for RFC 2190). + * That is, it does not necessarily contain info about all macroblocks, + * as long as the distance between macroblocks in the info is smaller + * than the target payload size. + * Each MB info structure is 12 bytes, and is laid out as follows: + * @code + * u32le bit offset from the start of the packet + * u8 current quantizer at the start of the macroblock + * u8 GOB number + * u16le macroblock address within the GOB + * u8 horizontal MV predictor + * u8 vertical MV predictor + * u8 horizontal MV predictor for block number 3 + * u8 vertical MV predictor for block number 3 + * @endcode + */ + AV_PKT_DATA_H263_MB_INFO, + + /** + * This side data should be associated with an audio stream and contains + * ReplayGain information in form of the AVReplayGain struct. + */ + AV_PKT_DATA_REPLAYGAIN, + + /** + * This side data contains a 3x3 transformation matrix describing an affine + * transformation that needs to be applied to the decoded video frames for + * correct presentation. + * + * See libavutil/display.h for a detailed description of the data. + */ + AV_PKT_DATA_DISPLAYMATRIX, + + /** + * This side data should be associated with a video stream and contains + * Stereoscopic 3D information in form of the AVStereo3D struct. + */ + AV_PKT_DATA_STEREO3D, + + /** + * This side data should be associated with an audio stream and corresponds + * to enum AVAudioServiceType. + */ + AV_PKT_DATA_AUDIO_SERVICE_TYPE, + + /** + * This side data contains quality related information from the encoder. + * @code + * u32le quality factor of the compressed frame. Allowed range is between 1 (good) and FF_LAMBDA_MAX (bad). + * u8 picture type + * u8 error count + * u16 reserved + * u64le[error count] sum of squared differences between encoder in and output + * @endcode + */ + AV_PKT_DATA_QUALITY_STATS, + + /** + * This side data contains an integer value representing the stream index + * of a "fallback" track. A fallback track indicates an alternate + * track to use when the current track can not be decoded for some reason. + * e.g. no decoder available for codec. + */ + AV_PKT_DATA_FALLBACK_TRACK, + + /** + * This side data corresponds to the AVCPBProperties struct. + */ + AV_PKT_DATA_CPB_PROPERTIES, + + /** + * Recommmends skipping the specified number of samples + * @code + * u32le number of samples to skip from start of this packet + * u32le number of samples to skip from end of this packet + * u8 reason for start skip + * u8 reason for end skip (0=padding silence, 1=convergence) + * @endcode + */ + AV_PKT_DATA_SKIP_SAMPLES=70, + + /** + * An AV_PKT_DATA_JP_DUALMONO side data packet indicates that + * the packet may contain "dual mono" audio specific to Japanese DTV + * and if it is true, recommends only the selected channel to be used. + * @code + * u8 selected channels (0=mail/left, 1=sub/right, 2=both) + * @endcode + */ + AV_PKT_DATA_JP_DUALMONO, + + /** + * A list of zero terminated key/value strings. There is no end marker for + * the list, so it is required to rely on the side data size to stop. + */ + AV_PKT_DATA_STRINGS_METADATA, + + /** + * Subtitle event position + * @code + * u32le x1 + * u32le y1 + * u32le x2 + * u32le y2 + * @endcode + */ + AV_PKT_DATA_SUBTITLE_POSITION, + + /** + * Data found in BlockAdditional element of matroska container. There is + * no end marker for the data, so it is required to rely on the side data + * size to recognize the end. 8 byte id (as found in BlockAddId) followed + * by data. + */ + AV_PKT_DATA_MATROSKA_BLOCKADDITIONAL, + + /** + * The optional first identifier line of a WebVTT cue. + */ + AV_PKT_DATA_WEBVTT_IDENTIFIER, + + /** + * The optional settings (rendering instructions) that immediately + * follow the timestamp specifier of a WebVTT cue. + */ + AV_PKT_DATA_WEBVTT_SETTINGS, + + /** + * A list of zero terminated key/value strings. There is no end marker for + * the list, so it is required to rely on the side data size to stop. This + * side data includes updated metadata which appeared in the stream. + */ + AV_PKT_DATA_METADATA_UPDATE, + + /** + * MPEGTS stream ID, this is required to pass the stream ID + * information from the demuxer to the corresponding muxer. + */ + AV_PKT_DATA_MPEGTS_STREAM_ID, + + /** + * Mastering display metadata (based on SMPTE-2086:2014). This metadata + * should be associated with a video stream and containts data in the form + * of the AVMasteringDisplayMetadata struct. + */ + AV_PKT_DATA_MASTERING_DISPLAY_METADATA +}; + +#define AV_PKT_DATA_QUALITY_FACTOR AV_PKT_DATA_QUALITY_STATS //DEPRECATED + +typedef struct AVPacketSideData { + uint8_t *data; + int size; + enum AVPacketSideDataType type; +} AVPacketSideData; + +/** + * This structure stores compressed data. It is typically exported by demuxers + * and then passed as input to decoders, or received as output from encoders and + * then passed to muxers. + * + * For video, it should typically contain one compressed frame. For audio it may + * contain several compressed frames. Encoders are allowed to output empty + * packets, with no compressed data, containing only side data + * (e.g. to update some stream parameters at the end of encoding). + * + * AVPacket is one of the few structs in FFmpeg, whose size is a part of public + * ABI. Thus it may be allocated on stack and no new fields can be added to it + * without libavcodec and libavformat major bump. + * + * The semantics of data ownership depends on the buf field. + * If it is set, the packet data is dynamically allocated and is + * valid indefinitely until a call to av_packet_unref() reduces the + * reference count to 0. + * + * If the buf field is not set av_packet_ref() would make a copy instead + * of increasing the reference count. + * + * The side data is always allocated with av_malloc(), copied by + * av_packet_ref() and freed by av_packet_unref(). + * + * @see av_packet_ref + * @see av_packet_unref + */ +typedef struct AVPacket { + /** + * A reference to the reference-counted buffer where the packet data is + * stored. + * May be NULL, then the packet data is not reference-counted. + */ + AVBufferRef *buf; + /** + * Presentation timestamp in AVStream->time_base units; the time at which + * the decompressed packet will be presented to the user. + * Can be AV_NOPTS_VALUE if it is not stored in the file. + * pts MUST be larger or equal to dts as presentation cannot happen before + * decompression, unless one wants to view hex dumps. Some formats misuse + * the terms dts and pts/cts to mean something different. Such timestamps + * must be converted to true pts/dts before they are stored in AVPacket. + */ + int64_t pts; + /** + * Decompression timestamp in AVStream->time_base units; the time at which + * the packet is decompressed. + * Can be AV_NOPTS_VALUE if it is not stored in the file. + */ + int64_t dts; + uint8_t *data; + int size; + int stream_index; + /** + * A combination of AV_PKT_FLAG values + */ + int flags; + /** + * Additional packet data that can be provided by the container. + * Packet can contain several types of side information. + */ + AVPacketSideData *side_data; + int side_data_elems; + + /** + * Duration of this packet in AVStream->time_base units, 0 if unknown. + * Equals next_pts - this_pts in presentation order. + */ + int64_t duration; + + int64_t pos; ///< byte position in stream, -1 if unknown + +#if FF_API_CONVERGENCE_DURATION + /** + * @deprecated Same as the duration field, but as int64_t. This was required + * for Matroska subtitles, whose duration values could overflow when the + * duration field was still an int. + */ + attribute_deprecated + int64_t convergence_duration; +#endif +} AVPacket; +#define AV_PKT_FLAG_KEY 0x0001 ///< The packet contains a keyframe +#define AV_PKT_FLAG_CORRUPT 0x0002 ///< The packet content is corrupted + +enum AVSideDataParamChangeFlags { + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_COUNT = 0x0001, + AV_SIDE_DATA_PARAM_CHANGE_CHANNEL_LAYOUT = 0x0002, + AV_SIDE_DATA_PARAM_CHANGE_SAMPLE_RATE = 0x0004, + AV_SIDE_DATA_PARAM_CHANGE_DIMENSIONS = 0x0008, +}; +/** + * @} + */ + +struct AVCodecInternal; + +enum AVFieldOrder { + AV_FIELD_UNKNOWN, + AV_FIELD_PROGRESSIVE, + AV_FIELD_TT, //< Top coded_first, top displayed first + AV_FIELD_BB, //< Bottom coded first, bottom displayed first + AV_FIELD_TB, //< Top coded first, bottom displayed first + AV_FIELD_BT, //< Bottom coded first, top displayed first +}; + +/** + * main external API structure. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * Please use AVOptions (av_opt* / av_set/get*()) to access these fields from user + * applications. + * The name string for AVOptions options matches the associated command line + * parameter name and can be found in libavcodec/options_table.h + * The AVOption/command line parameter names differ in some cases from the C + * structure field names for historic reasons or brevity. + * sizeof(AVCodecContext) must not be used outside libav*. + */ +typedef struct AVCodecContext { + /** + * information on struct for av_log + * - set by avcodec_alloc_context3 + */ + const AVClass *av_class; + int log_level_offset; + + enum AVMediaType codec_type; /* see AVMEDIA_TYPE_xxx */ + const struct AVCodec *codec; +#if FF_API_CODEC_NAME + /** + * @deprecated this field is not used for anything in libavcodec + */ + attribute_deprecated + char codec_name[32]; +#endif + enum AVCodecID codec_id; /* see AV_CODEC_ID_xxx */ + + /** + * fourcc (LSB first, so "ABCD" -> ('D'<<24) + ('C'<<16) + ('B'<<8) + 'A'). + * This is used to work around some encoder bugs. + * A demuxer should set this to what is stored in the field used to identify the codec. + * If there are multiple such fields in a container then the demuxer should choose the one + * which maximizes the information about the used codec. + * If the codec tag field in a container is larger than 32 bits then the demuxer should + * remap the longer ID to 32 bits with a table or other structure. Alternatively a new + * extra_codec_tag + size could be added but for this a clear advantage must be demonstrated + * first. + * - encoding: Set by user, if not then the default based on codec_id will be used. + * - decoding: Set by user, will be converted to uppercase by libavcodec during init. + */ + unsigned int codec_tag; + +#if FF_API_STREAM_CODEC_TAG + /** + * @deprecated this field is unused + */ + attribute_deprecated + unsigned int stream_codec_tag; +#endif + + void *priv_data; + + /** + * Private context used for internal data. + * + * Unlike priv_data, this is not codec-specific. It is used in general + * libavcodec functions. + */ + struct AVCodecInternal *internal; + + /** + * Private data of the user, can be used to carry app specific stuff. + * - encoding: Set by user. + * - decoding: Set by user. + */ + void *opaque; + + /** + * the average bitrate + * - encoding: Set by user; unused for constant quantizer encoding. + * - decoding: Set by user, may be overwritten by libavcodec + * if this info is available in the stream + */ + int64_t bit_rate; + + /** + * number of bits the bitstream is allowed to diverge from the reference. + * the reference can be CBR (for CBR pass1) or VBR (for pass2) + * - encoding: Set by user; unused for constant quantizer encoding. + * - decoding: unused + */ + int bit_rate_tolerance; + + /** + * Global quality for codecs which cannot change it per frame. + * This should be proportional to MPEG-1/2/4 qscale. + * - encoding: Set by user. + * - decoding: unused + */ + int global_quality; + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int compression_level; +#define FF_COMPRESSION_DEFAULT -1 + + /** + * AV_CODEC_FLAG_*. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int flags; + + /** + * AV_CODEC_FLAG2_* + * - encoding: Set by user. + * - decoding: Set by user. + */ + int flags2; + + /** + * some codecs need / can use extradata like Huffman tables. + * MJPEG: Huffman tables + * rv10: additional flags + * MPEG-4: global headers (they can be in the bitstream or here) + * The allocated memory should be AV_INPUT_BUFFER_PADDING_SIZE bytes larger + * than extradata_size to avoid problems if it is read with the bitstream reader. + * The bytewise contents of extradata must not depend on the architecture or CPU endianness. + * - encoding: Set/allocated/freed by libavcodec. + * - decoding: Set/allocated/freed by user. + */ + uint8_t *extradata; + int extradata_size; + + /** + * This is the fundamental unit of time (in seconds) in terms + * of which frame timestamps are represented. For fixed-fps content, + * timebase should be 1/framerate and timestamp increments should be + * identically 1. + * This often, but not always is the inverse of the frame rate or field rate + * for video. 1/time_base is not the average frame rate if the frame rate is not + * constant. + * + * Like containers, elementary streams also can store timestamps, 1/time_base + * is the unit in which these timestamps are specified. + * As example of such codec time base see ISO/IEC 14496-2:2001(E) + * vop_time_increment_resolution and fixed_vop_rate + * (fixed_vop_rate == 0 implies that it is different from the framerate) + * + * - encoding: MUST be set by user. + * - decoding: the use of this field for decoding is deprecated. + * Use framerate instead. + */ + AVRational time_base; + + /** + * For some codecs, the time base is closer to the field rate than the frame rate. + * Most notably, H.264 and MPEG-2 specify time_base as half of frame duration + * if no telecine is used ... + * + * Set to time_base ticks per frame. Default 1, e.g., H.264/MPEG-2 set it to 2. + */ + int ticks_per_frame; + + /** + * Codec delay. + * + * Encoding: Number of frames delay there will be from the encoder input to + * the decoder output. (we assume the decoder matches the spec) + * Decoding: Number of frames delay in addition to what a standard decoder + * as specified in the spec would produce. + * + * Video: + * Number of frames the decoded output will be delayed relative to the + * encoded input. + * + * Audio: + * For encoding, this field is unused (see initial_padding). + * + * For decoding, this is the number of samples the decoder needs to + * output before the decoder's output is valid. When seeking, you should + * start decoding this many samples prior to your desired seek point. + * + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int delay; + + + /* video only */ + /** + * picture width / height. + * + * @note Those fields may not match the values of the last + * AVFrame output by avcodec_decode_video2 due frame + * reordering. + * + * - encoding: MUST be set by user. + * - decoding: May be set by the user before opening the decoder if known e.g. + * from the container. Some decoders will require the dimensions + * to be set by the caller. During decoding, the decoder may + * overwrite those values as required while parsing the data. + */ + int width, height; + + /** + * Bitstream width / height, may be different from width/height e.g. when + * the decoded frame is cropped before being output or lowres is enabled. + * + * @note Those field may not match the value of the last + * AVFrame output by avcodec_receive_frame() due frame + * reordering. + * + * - encoding: unused + * - decoding: May be set by the user before opening the decoder if known + * e.g. from the container. During decoding, the decoder may + * overwrite those values as required while parsing the data. + */ + int coded_width, coded_height; + +#if FF_API_ASPECT_EXTENDED +#define FF_ASPECT_EXTENDED 15 +#endif + + /** + * the number of pictures in a group of pictures, or 0 for intra_only + * - encoding: Set by user. + * - decoding: unused + */ + int gop_size; + + /** + * Pixel format, see AV_PIX_FMT_xxx. + * May be set by the demuxer if known from headers. + * May be overridden by the decoder if it knows better. + * + * @note This field may not match the value of the last + * AVFrame output by avcodec_receive_frame() due frame + * reordering. + * + * - encoding: Set by user. + * - decoding: Set by user if known, overridden by libavcodec while + * parsing the data. + */ + enum AVPixelFormat pix_fmt; + +#if FF_API_MOTION_EST + /** + * This option does nothing + * @deprecated use codec private options instead + */ + attribute_deprecated int me_method; +#endif + + /** + * If non NULL, 'draw_horiz_band' is called by the libavcodec + * decoder to draw a horizontal band. It improves cache usage. Not + * all codecs can do that. You must check the codec capabilities + * beforehand. + * When multithreading is used, it may be called from multiple threads + * at the same time; threads might draw different parts of the same AVFrame, + * or multiple AVFrames, and there is no guarantee that slices will be drawn + * in order. + * The function is also used by hardware acceleration APIs. + * It is called at least once during frame decoding to pass + * the data needed for hardware render. + * In that mode instead of pixel data, AVFrame points to + * a structure specific to the acceleration API. The application + * reads the structure and can change some fields to indicate progress + * or mark state. + * - encoding: unused + * - decoding: Set by user. + * @param height the height of the slice + * @param y the y position of the slice + * @param type 1->top field, 2->bottom field, 3->frame + * @param offset offset into the AVFrame.data from which the slice should be read + */ + void (*draw_horiz_band)(struct AVCodecContext *s, + const AVFrame *src, int offset[AV_NUM_DATA_POINTERS], + int y, int type, int height); + + /** + * callback to negotiate the pixelFormat + * @param fmt is the list of formats which are supported by the codec, + * it is terminated by -1 as 0 is a valid format, the formats are ordered by quality. + * The first is always the native one. + * @note The callback may be called again immediately if initialization for + * the selected (hardware-accelerated) pixel format failed. + * @warning Behavior is undefined if the callback returns a value not + * in the fmt list of formats. + * @return the chosen format + * - encoding: unused + * - decoding: Set by user, if not set the native format will be chosen. + */ + enum AVPixelFormat (*get_format)(struct AVCodecContext *s, const enum AVPixelFormat * fmt); + + /** + * maximum number of B-frames between non-B-frames + * Note: The output will be delayed by max_b_frames+1 relative to the input. + * - encoding: Set by user. + * - decoding: unused + */ + int max_b_frames; + + /** + * qscale factor between IP and B-frames + * If > 0 then the last P-frame quantizer will be used (q= lastp_q*factor+offset). + * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). + * - encoding: Set by user. + * - decoding: unused + */ + float b_quant_factor; + +#if FF_API_RC_STRATEGY + /** @deprecated use codec private option instead */ + attribute_deprecated int rc_strategy; +#define FF_RC_STRATEGY_XVID 1 +#endif + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int b_frame_strategy; +#endif + + /** + * qscale offset between IP and B-frames + * - encoding: Set by user. + * - decoding: unused + */ + float b_quant_offset; + + /** + * Size of the frame reordering buffer in the decoder. + * For MPEG-2 it is 1 IPB or 0 low delay IP. + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int has_b_frames; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int mpeg_quant; +#endif + + /** + * qscale factor between P- and I-frames + * If > 0 then the last P-frame quantizer will be used (q = lastp_q * factor + offset). + * If < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset). + * - encoding: Set by user. + * - decoding: unused + */ + float i_quant_factor; + + /** + * qscale offset between P and I-frames + * - encoding: Set by user. + * - decoding: unused + */ + float i_quant_offset; + + /** + * luminance masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float lumi_masking; + + /** + * temporary complexity masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float temporal_cplx_masking; + + /** + * spatial complexity masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float spatial_cplx_masking; + + /** + * p block masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float p_masking; + + /** + * darkness masking (0-> disabled) + * - encoding: Set by user. + * - decoding: unused + */ + float dark_masking; + + /** + * slice count + * - encoding: Set by libavcodec. + * - decoding: Set by user (or 0). + */ + int slice_count; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int prediction_method; +#define FF_PRED_LEFT 0 +#define FF_PRED_PLANE 1 +#define FF_PRED_MEDIAN 2 +#endif + + /** + * slice offsets in the frame in bytes + * - encoding: Set/allocated by libavcodec. + * - decoding: Set/allocated by user (or NULL). + */ + int *slice_offset; + + /** + * sample aspect ratio (0 if unknown) + * That is the width of a pixel divided by the height of the pixel. + * Numerator and denominator must be relatively prime and smaller than 256 for some video standards. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + AVRational sample_aspect_ratio; + + /** + * motion estimation comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_cmp; + /** + * subpixel motion estimation comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_sub_cmp; + /** + * macroblock comparison function (not supported yet) + * - encoding: Set by user. + * - decoding: unused + */ + int mb_cmp; + /** + * interlaced DCT comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int ildct_cmp; +#define FF_CMP_SAD 0 +#define FF_CMP_SSE 1 +#define FF_CMP_SATD 2 +#define FF_CMP_DCT 3 +#define FF_CMP_PSNR 4 +#define FF_CMP_BIT 5 +#define FF_CMP_RD 6 +#define FF_CMP_ZERO 7 +#define FF_CMP_VSAD 8 +#define FF_CMP_VSSE 9 +#define FF_CMP_NSSE 10 +#define FF_CMP_W53 11 +#define FF_CMP_W97 12 +#define FF_CMP_DCTMAX 13 +#define FF_CMP_DCT264 14 +#define FF_CMP_CHROMA 256 + + /** + * ME diamond size & shape + * - encoding: Set by user. + * - decoding: unused + */ + int dia_size; + + /** + * amount of previous MV predictors (2a+1 x 2a+1 square) + * - encoding: Set by user. + * - decoding: unused + */ + int last_predictor_count; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int pre_me; +#endif + + /** + * motion estimation prepass comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int me_pre_cmp; + + /** + * ME prepass diamond size & shape + * - encoding: Set by user. + * - decoding: unused + */ + int pre_dia_size; + + /** + * subpel ME quality + * - encoding: Set by user. + * - decoding: unused + */ + int me_subpel_quality; + +#if FF_API_AFD + /** + * DTG active format information (additional aspect ratio + * information only used in DVB MPEG-2 transport streams) + * 0 if not set. + * + * - encoding: unused + * - decoding: Set by decoder. + * @deprecated Deprecated in favor of AVSideData + */ + attribute_deprecated int dtg_active_format; +#define FF_DTG_AFD_SAME 8 +#define FF_DTG_AFD_4_3 9 +#define FF_DTG_AFD_16_9 10 +#define FF_DTG_AFD_14_9 11 +#define FF_DTG_AFD_4_3_SP_14_9 13 +#define FF_DTG_AFD_16_9_SP_14_9 14 +#define FF_DTG_AFD_SP_4_3 15 +#endif /* FF_API_AFD */ + + /** + * maximum motion estimation search range in subpel units + * If 0 then no limit. + * + * - encoding: Set by user. + * - decoding: unused + */ + int me_range; + +#if FF_API_QUANT_BIAS + /** + * @deprecated use encoder private option instead + */ + attribute_deprecated int intra_quant_bias; +#define FF_DEFAULT_QUANT_BIAS 999999 + + /** + * @deprecated use encoder private option instead + */ + attribute_deprecated int inter_quant_bias; +#endif + + /** + * slice flags + * - encoding: unused + * - decoding: Set by user. + */ + int slice_flags; +#define SLICE_FLAG_CODED_ORDER 0x0001 ///< draw_horiz_band() is called in coded order instead of display +#define SLICE_FLAG_ALLOW_FIELD 0x0002 ///< allow draw_horiz_band() with field slices (MPEG-2 field pics) +#define SLICE_FLAG_ALLOW_PLANE 0x0004 ///< allow draw_horiz_band() with 1 component at a time (SVQ1) + +#if FF_API_XVMC + /** + * XVideo Motion Acceleration + * - encoding: forbidden + * - decoding: set by decoder + * @deprecated XvMC doesn't need it anymore. + */ + attribute_deprecated int xvmc_acceleration; +#endif /* FF_API_XVMC */ + + /** + * macroblock decision mode + * - encoding: Set by user. + * - decoding: unused + */ + int mb_decision; +#define FF_MB_DECISION_SIMPLE 0 ///< uses mb_cmp +#define FF_MB_DECISION_BITS 1 ///< chooses the one which needs the fewest bits +#define FF_MB_DECISION_RD 2 ///< rate distortion + + /** + * custom intra quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: Set by libavcodec. + */ + uint16_t *intra_matrix; + + /** + * custom inter quantization matrix + * - encoding: Set by user, can be NULL. + * - decoding: Set by libavcodec. + */ + uint16_t *inter_matrix; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int scenechange_threshold; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int noise_reduction; +#endif + +#if FF_API_MPV_OPT + /** + * @deprecated this field is unused + */ + attribute_deprecated + int me_threshold; + + /** + * @deprecated this field is unused + */ + attribute_deprecated + int mb_threshold; +#endif + + /** + * precision of the intra DC coefficient - 8 + * - encoding: Set by user. + * - decoding: Set by libavcodec + */ + int intra_dc_precision; + + /** + * Number of macroblock rows at the top which are skipped. + * - encoding: unused + * - decoding: Set by user. + */ + int skip_top; + + /** + * Number of macroblock rows at the bottom which are skipped. + * - encoding: unused + * - decoding: Set by user. + */ + int skip_bottom; + +#if FF_API_MPV_OPT + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + float border_masking; +#endif + + /** + * minimum MB Lagrange multiplier + * - encoding: Set by user. + * - decoding: unused + */ + int mb_lmin; + + /** + * maximum MB Lagrange multiplier + * - encoding: Set by user. + * - decoding: unused + */ + int mb_lmax; + +#if FF_API_PRIVATE_OPT + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + int me_penalty_compensation; +#endif + + /** + * - encoding: Set by user. + * - decoding: unused + */ + int bidir_refine; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int brd_scale; +#endif + + /** + * minimum GOP size + * - encoding: Set by user. + * - decoding: unused + */ + int keyint_min; + + /** + * number of reference frames + * - encoding: Set by user. + * - decoding: Set by lavc. + */ + int refs; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int chromaoffset; +#endif + +#if FF_API_UNUSED_MEMBERS + /** + * Multiplied by qscale for each frame and added to scene_change_score. + * - encoding: Set by user. + * - decoding: unused + */ + attribute_deprecated int scenechange_factor; +#endif + + /** + * Note: Value depends upon the compare function used for fullpel ME. + * - encoding: Set by user. + * - decoding: unused + */ + int mv0_threshold; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int b_sensitivity; +#endif + + /** + * Chromaticity coordinates of the source primaries. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorPrimaries color_primaries; + + /** + * Color Transfer Characteristic. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorTransferCharacteristic color_trc; + + /** + * YUV colorspace type. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorSpace colorspace; + + /** + * MPEG vs JPEG YUV range. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorRange color_range; + + /** + * This defines the location of chroma samples. + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVChromaLocation chroma_sample_location; + + /** + * Number of slices. + * Indicates number of picture subdivisions. Used for parallelized + * decoding. + * - encoding: Set by user + * - decoding: unused + */ + int slices; + + /** Field order + * - encoding: set by libavcodec + * - decoding: Set by user. + */ + enum AVFieldOrder field_order; + + /* audio only */ + int sample_rate; ///< samples per second + int channels; ///< number of audio channels + + /** + * audio sample format + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + enum AVSampleFormat sample_fmt; ///< sample format + + /* The following data should not be initialized. */ + /** + * Number of samples per channel in an audio frame. + * + * - encoding: set by libavcodec in avcodec_open2(). Each submitted frame + * except the last must contain exactly frame_size samples per channel. + * May be 0 when the codec has AV_CODEC_CAP_VARIABLE_FRAME_SIZE set, then the + * frame size is not restricted. + * - decoding: may be set by some decoders to indicate constant frame size + */ + int frame_size; + + /** + * Frame counter, set by libavcodec. + * + * - decoding: total number of frames returned from the decoder so far. + * - encoding: total number of frames passed to the encoder so far. + * + * @note the counter is not incremented if encoding/decoding resulted in + * an error. + */ + int frame_number; + + /** + * number of bytes per packet if constant and known or 0 + * Used by some WAV based audio codecs. + */ + int block_align; + + /** + * Audio cutoff bandwidth (0 means "automatic") + * - encoding: Set by user. + * - decoding: unused + */ + int cutoff; + + /** + * Audio channel layout. + * - encoding: set by user. + * - decoding: set by user, may be overwritten by libavcodec. + */ + uint64_t channel_layout; + + /** + * Request decoder to use this channel layout if it can (0 for default) + * - encoding: unused + * - decoding: Set by user. + */ + uint64_t request_channel_layout; + + /** + * Type of service that the audio stream conveys. + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + enum AVAudioServiceType audio_service_type; + + /** + * desired sample format + * - encoding: Not used. + * - decoding: Set by user. + * Decoder will decode to this format if it can. + */ + enum AVSampleFormat request_sample_fmt; + + /** + * This callback is called at the beginning of each frame to get data + * buffer(s) for it. There may be one contiguous buffer for all the data or + * there may be a buffer per each data plane or anything in between. What + * this means is, you may set however many entries in buf[] you feel necessary. + * Each buffer must be reference-counted using the AVBuffer API (see description + * of buf[] below). + * + * The following fields will be set in the frame before this callback is + * called: + * - format + * - width, height (video only) + * - sample_rate, channel_layout, nb_samples (audio only) + * Their values may differ from the corresponding values in + * AVCodecContext. This callback must use the frame values, not the codec + * context values, to calculate the required buffer size. + * + * This callback must fill the following fields in the frame: + * - data[] + * - linesize[] + * - extended_data: + * * if the data is planar audio with more than 8 channels, then this + * callback must allocate and fill extended_data to contain all pointers + * to all data planes. data[] must hold as many pointers as it can. + * extended_data must be allocated with av_malloc() and will be freed in + * av_frame_unref(). + * * otherwise extended_data must point to data + * - buf[] must contain one or more pointers to AVBufferRef structures. Each of + * the frame's data and extended_data pointers must be contained in these. That + * is, one AVBufferRef for each allocated chunk of memory, not necessarily one + * AVBufferRef per data[] entry. See: av_buffer_create(), av_buffer_alloc(), + * and av_buffer_ref(). + * - extended_buf and nb_extended_buf must be allocated with av_malloc() by + * this callback and filled with the extra buffers if there are more + * buffers than buf[] can hold. extended_buf will be freed in + * av_frame_unref(). + * + * If AV_CODEC_CAP_DR1 is not set then get_buffer2() must call + * avcodec_default_get_buffer2() instead of providing buffers allocated by + * some other means. + * + * Each data plane must be aligned to the maximum required by the target + * CPU. + * + * @see avcodec_default_get_buffer2() + * + * Video: + * + * If AV_GET_BUFFER_FLAG_REF is set in flags then the frame may be reused + * (read and/or written to if it is writable) later by libavcodec. + * + * avcodec_align_dimensions2() should be used to find the required width and + * height, as they normally need to be rounded up to the next multiple of 16. + * + * Some decoders do not support linesizes changing between frames. + * + * If frame multithreading is used and thread_safe_callbacks is set, + * this callback may be called from a different thread, but not from more + * than one at once. Does not need to be reentrant. + * + * @see avcodec_align_dimensions2() + * + * Audio: + * + * Decoders request a buffer of a particular size by setting + * AVFrame.nb_samples prior to calling get_buffer2(). The decoder may, + * however, utilize only part of the buffer by setting AVFrame.nb_samples + * to a smaller value in the output frame. + * + * As a convenience, av_samples_get_buffer_size() and + * av_samples_fill_arrays() in libavutil may be used by custom get_buffer2() + * functions to find the required data size and to fill data pointers and + * linesize. In AVFrame.linesize, only linesize[0] may be set for audio + * since all planes must be the same size. + * + * @see av_samples_get_buffer_size(), av_samples_fill_arrays() + * + * - encoding: unused + * - decoding: Set by libavcodec, user can override. + */ + int (*get_buffer2)(struct AVCodecContext *s, AVFrame *frame, int flags); + + /** + * If non-zero, the decoded audio and video frames returned from + * avcodec_decode_video2() and avcodec_decode_audio4() are reference-counted + * and are valid indefinitely. The caller must free them with + * av_frame_unref() when they are not needed anymore. + * Otherwise, the decoded frames must not be freed by the caller and are + * only valid until the next decode call. + * + * This is always automatically enabled if avcodec_receive_frame() is used. + * + * - encoding: unused + * - decoding: set by the caller before avcodec_open2(). + */ + int refcounted_frames; + + /* - encoding parameters */ + float qcompress; ///< amount of qscale change between easy & hard scenes (0.0-1.0) + float qblur; ///< amount of qscale smoothing over time (0.0-1.0) + + /** + * minimum quantizer + * - encoding: Set by user. + * - decoding: unused + */ + int qmin; + + /** + * maximum quantizer + * - encoding: Set by user. + * - decoding: unused + */ + int qmax; + + /** + * maximum quantizer difference between frames + * - encoding: Set by user. + * - decoding: unused + */ + int max_qdiff; + +#if FF_API_MPV_OPT + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + float rc_qsquish; + + attribute_deprecated + float rc_qmod_amp; + attribute_deprecated + int rc_qmod_freq; +#endif + + /** + * decoder bitstream buffer size + * - encoding: Set by user. + * - decoding: unused + */ + int rc_buffer_size; + + /** + * ratecontrol override, see RcOverride + * - encoding: Allocated/set/freed by user. + * - decoding: unused + */ + int rc_override_count; + RcOverride *rc_override; + +#if FF_API_MPV_OPT + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + const char *rc_eq; +#endif + + /** + * maximum bitrate + * - encoding: Set by user. + * - decoding: Set by user, may be overwritten by libavcodec. + */ + int64_t rc_max_rate; + + /** + * minimum bitrate + * - encoding: Set by user. + * - decoding: unused + */ + int64_t rc_min_rate; + +#if FF_API_MPV_OPT + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + float rc_buffer_aggressivity; + + attribute_deprecated + float rc_initial_cplx; +#endif + + /** + * Ratecontrol attempt to use, at maximum, of what can be used without an underflow. + * - encoding: Set by user. + * - decoding: unused. + */ + float rc_max_available_vbv_use; + + /** + * Ratecontrol attempt to use, at least, times the amount needed to prevent a vbv overflow. + * - encoding: Set by user. + * - decoding: unused. + */ + float rc_min_vbv_overflow_use; + + /** + * Number of bits which should be loaded into the rc buffer before decoding starts. + * - encoding: Set by user. + * - decoding: unused + */ + int rc_initial_buffer_occupancy; + +#if FF_API_CODER_TYPE +#define FF_CODER_TYPE_VLC 0 +#define FF_CODER_TYPE_AC 1 +#define FF_CODER_TYPE_RAW 2 +#define FF_CODER_TYPE_RLE 3 +#if FF_API_UNUSED_MEMBERS +#define FF_CODER_TYPE_DEFLATE 4 +#endif /* FF_API_UNUSED_MEMBERS */ + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + int coder_type; +#endif /* FF_API_CODER_TYPE */ + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int context_model; +#endif + +#if FF_API_MPV_OPT + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + int lmin; + + /** + * @deprecated use encoder private options instead + */ + attribute_deprecated + int lmax; +#endif + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_threshold; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_factor; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_exp; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int frame_skip_cmp; +#endif /* FF_API_PRIVATE_OPT */ + + /** + * trellis RD quantization + * - encoding: Set by user. + * - decoding: unused + */ + int trellis; + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int min_prediction_order; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int max_prediction_order; + + /** @deprecated use encoder private options instead */ + attribute_deprecated + int64_t timecode_frame_start; +#endif + +#if FF_API_RTP_CALLBACK + /** + * @deprecated unused + */ + /* The RTP callback: This function is called */ + /* every time the encoder has a packet to send. */ + /* It depends on the encoder if the data starts */ + /* with a Start Code (it should). H.263 does. */ + /* mb_nb contains the number of macroblocks */ + /* encoded in the RTP payload. */ + attribute_deprecated + void (*rtp_callback)(struct AVCodecContext *avctx, void *data, int size, int mb_nb); +#endif + +#if FF_API_PRIVATE_OPT + /** @deprecated use encoder private options instead */ + attribute_deprecated + int rtp_payload_size; /* The size of the RTP payload: the coder will */ + /* do its best to deliver a chunk with size */ + /* below rtp_payload_size, the chunk will start */ + /* with a start code on some codecs like H.263. */ + /* This doesn't take account of any particular */ + /* headers inside the transmitted RTP payload. */ +#endif + +#if FF_API_STAT_BITS + /* statistics, used for 2-pass encoding */ + attribute_deprecated + int mv_bits; + attribute_deprecated + int header_bits; + attribute_deprecated + int i_tex_bits; + attribute_deprecated + int p_tex_bits; + attribute_deprecated + int i_count; + attribute_deprecated + int p_count; + attribute_deprecated + int skip_count; + attribute_deprecated + int misc_bits; + + /** @deprecated this field is unused */ + attribute_deprecated + int frame_bits; +#endif + + /** + * pass1 encoding statistics output buffer + * - encoding: Set by libavcodec. + * - decoding: unused + */ + char *stats_out; + + /** + * pass2 encoding statistics input buffer + * Concatenated stuff from stats_out of pass1 should be placed here. + * - encoding: Allocated/set/freed by user. + * - decoding: unused + */ + char *stats_in; + + /** + * Work around bugs in encoders which sometimes cannot be detected automatically. + * - encoding: Set by user + * - decoding: Set by user + */ + int workaround_bugs; +#define FF_BUG_AUTODETECT 1 ///< autodetection +#if FF_API_OLD_MSMPEG4 +#define FF_BUG_OLD_MSMPEG4 2 +#endif +#define FF_BUG_XVID_ILACE 4 +#define FF_BUG_UMP4 8 +#define FF_BUG_NO_PADDING 16 +#define FF_BUG_AMV 32 +#if FF_API_AC_VLC +#define FF_BUG_AC_VLC 0 ///< Will be removed, libavcodec can now handle these non-compliant files by default. +#endif +#define FF_BUG_QPEL_CHROMA 64 +#define FF_BUG_STD_QPEL 128 +#define FF_BUG_QPEL_CHROMA2 256 +#define FF_BUG_DIRECT_BLOCKSIZE 512 +#define FF_BUG_EDGE 1024 +#define FF_BUG_HPEL_CHROMA 2048 +#define FF_BUG_DC_CLIP 4096 +#define FF_BUG_MS 8192 ///< Work around various bugs in Microsoft's broken decoders. +#define FF_BUG_TRUNCATED 16384 + + /** + * strictly follow the standard (MPEG-4, ...). + * - encoding: Set by user. + * - decoding: Set by user. + * Setting this to STRICT or higher means the encoder and decoder will + * generally do stupid things, whereas setting it to unofficial or lower + * will mean the encoder might produce output that is not supported by all + * spec-compliant decoders. Decoders don't differentiate between normal, + * unofficial and experimental (that is, they always try to decode things + * when they can) unless they are explicitly asked to behave stupidly + * (=strictly conform to the specs) + */ + int strict_std_compliance; +#define FF_COMPLIANCE_VERY_STRICT 2 ///< Strictly conform to an older more strict version of the spec or reference software. +#define FF_COMPLIANCE_STRICT 1 ///< Strictly conform to all the things in the spec no matter what consequences. +#define FF_COMPLIANCE_NORMAL 0 +#define FF_COMPLIANCE_UNOFFICIAL -1 ///< Allow unofficial extensions +#define FF_COMPLIANCE_EXPERIMENTAL -2 ///< Allow nonstandardized experimental things. + + /** + * error concealment flags + * - encoding: unused + * - decoding: Set by user. + */ + int error_concealment; +#define FF_EC_GUESS_MVS 1 +#define FF_EC_DEBLOCK 2 +#define FF_EC_FAVOR_INTER 256 + + /** + * debug + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug; +#define FF_DEBUG_PICT_INFO 1 +#define FF_DEBUG_RC 2 +#define FF_DEBUG_BITSTREAM 4 +#define FF_DEBUG_MB_TYPE 8 +#define FF_DEBUG_QP 16 +#if FF_API_DEBUG_MV +/** + * @deprecated this option does nothing + */ +#define FF_DEBUG_MV 32 +#endif +#define FF_DEBUG_DCT_COEFF 0x00000040 +#define FF_DEBUG_SKIP 0x00000080 +#define FF_DEBUG_STARTCODE 0x00000100 +#if FF_API_UNUSED_MEMBERS +#define FF_DEBUG_PTS 0x00000200 +#endif /* FF_API_UNUSED_MEMBERS */ +#define FF_DEBUG_ER 0x00000400 +#define FF_DEBUG_MMCO 0x00000800 +#define FF_DEBUG_BUGS 0x00001000 +#if FF_API_DEBUG_MV +#define FF_DEBUG_VIS_QP 0x00002000 ///< only access through AVOptions from outside libavcodec +#define FF_DEBUG_VIS_MB_TYPE 0x00004000 ///< only access through AVOptions from outside libavcodec +#endif +#define FF_DEBUG_BUFFERS 0x00008000 +#define FF_DEBUG_THREADS 0x00010000 +#define FF_DEBUG_GREEN_MD 0x00800000 +#define FF_DEBUG_NOMC 0x01000000 + +#if FF_API_DEBUG_MV + /** + * debug + * Code outside libavcodec should access this field using AVOptions + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug_mv; +#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 // visualize forward predicted MVs of P-frames +#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 // visualize forward predicted MVs of B-frames +#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 // visualize backward predicted MVs of B-frames +#endif + + /** + * Error recognition; may misdetect some more or less valid parts as errors. + * - encoding: unused + * - decoding: Set by user. + */ + int err_recognition; + +/** + * Verify checksums embedded in the bitstream (could be of either encoded or + * decoded data, depending on the codec) and print an error message on mismatch. + * If AV_EF_EXPLODE is also set, a mismatching checksum will result in the + * decoder returning an error. + */ +#define AV_EF_CRCCHECK (1<<0) +#define AV_EF_BITSTREAM (1<<1) ///< detect bitstream specification deviations +#define AV_EF_BUFFER (1<<2) ///< detect improper bitstream length +#define AV_EF_EXPLODE (1<<3) ///< abort decoding on minor error detection + +#define AV_EF_IGNORE_ERR (1<<15) ///< ignore errors and continue +#define AV_EF_CAREFUL (1<<16) ///< consider things that violate the spec, are fast to calculate and have not been seen in the wild as errors +#define AV_EF_COMPLIANT (1<<17) ///< consider all spec non compliances as errors +#define AV_EF_AGGRESSIVE (1<<18) ///< consider things that a sane encoder should not do as an error + + + /** + * opaque 64-bit number (generally a PTS) that will be reordered and + * output in AVFrame.reordered_opaque + * - encoding: unused + * - decoding: Set by user. + */ + int64_t reordered_opaque; + + /** + * Hardware accelerator in use + * - encoding: unused. + * - decoding: Set by libavcodec + */ + struct AVHWAccel *hwaccel; + + /** + * Hardware accelerator context. + * For some hardware accelerators, a global context needs to be + * provided by the user. In that case, this holds display-dependent + * data FFmpeg cannot instantiate itself. Please refer to the + * FFmpeg HW accelerator documentation to know how to fill this + * is. e.g. for VA API, this is a struct vaapi_context. + * - encoding: unused + * - decoding: Set by user + */ + void *hwaccel_context; + + /** + * error + * - encoding: Set by libavcodec if flags & AV_CODEC_FLAG_PSNR. + * - decoding: unused + */ + uint64_t error[AV_NUM_DATA_POINTERS]; + + /** + * DCT algorithm, see FF_DCT_* below + * - encoding: Set by user. + * - decoding: unused + */ + int dct_algo; +#define FF_DCT_AUTO 0 +#define FF_DCT_FASTINT 1 +#define FF_DCT_INT 2 +#define FF_DCT_MMX 3 +#define FF_DCT_ALTIVEC 5 +#define FF_DCT_FAAN 6 + + /** + * IDCT algorithm, see FF_IDCT_* below. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int idct_algo; +#define FF_IDCT_AUTO 0 +#define FF_IDCT_INT 1 +#define FF_IDCT_SIMPLE 2 +#define FF_IDCT_SIMPLEMMX 3 +#define FF_IDCT_ARM 7 +#define FF_IDCT_ALTIVEC 8 +#if FF_API_ARCH_SH4 +#define FF_IDCT_SH4 9 +#endif +#define FF_IDCT_SIMPLEARM 10 +#if FF_API_UNUSED_MEMBERS +#define FF_IDCT_IPP 13 +#endif /* FF_API_UNUSED_MEMBERS */ +#define FF_IDCT_XVID 14 +#if FF_API_IDCT_XVIDMMX +#define FF_IDCT_XVIDMMX 14 +#endif /* FF_API_IDCT_XVIDMMX */ +#define FF_IDCT_SIMPLEARMV5TE 16 +#define FF_IDCT_SIMPLEARMV6 17 +#if FF_API_ARCH_SPARC +#define FF_IDCT_SIMPLEVIS 18 +#endif +#define FF_IDCT_FAAN 20 +#define FF_IDCT_SIMPLENEON 22 +#if FF_API_ARCH_ALPHA +#define FF_IDCT_SIMPLEALPHA 23 +#endif +#define FF_IDCT_SIMPLEAUTO 128 + + /** + * bits per sample/pixel from the demuxer (needed for huffyuv). + * - encoding: Set by libavcodec. + * - decoding: Set by user. + */ + int bits_per_coded_sample; + + /** + * Bits per sample/pixel of internal libavcodec pixel/sample format. + * - encoding: set by user. + * - decoding: set by libavcodec. + */ + int bits_per_raw_sample; + +#if FF_API_LOWRES + /** + * low resolution decoding, 1-> 1/2 size, 2->1/4 size + * - encoding: unused + * - decoding: Set by user. + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_lowres(avctx) + */ + int lowres; +#endif + +#if FF_API_CODED_FRAME + /** + * the picture in the bitstream + * - encoding: Set by libavcodec. + * - decoding: unused + * + * @deprecated use the quality factor packet side data instead + */ + attribute_deprecated AVFrame *coded_frame; +#endif + + /** + * thread count + * is used to decide how many independent tasks should be passed to execute() + * - encoding: Set by user. + * - decoding: Set by user. + */ + int thread_count; + + /** + * Which multithreading methods to use. + * Use of FF_THREAD_FRAME will increase decoding delay by one frame per thread, + * so clients which cannot provide future frames should not use it. + * + * - encoding: Set by user, otherwise the default is used. + * - decoding: Set by user, otherwise the default is used. + */ + int thread_type; +#define FF_THREAD_FRAME 1 ///< Decode more than one frame at once +#define FF_THREAD_SLICE 2 ///< Decode more than one part of a single frame at once + + /** + * Which multithreading methods are in use by the codec. + * - encoding: Set by libavcodec. + * - decoding: Set by libavcodec. + */ + int active_thread_type; + + /** + * Set by the client if its custom get_buffer() callback can be called + * synchronously from another thread, which allows faster multithreaded decoding. + * draw_horiz_band() will be called from other threads regardless of this setting. + * Ignored if the default get_buffer() is used. + * - encoding: Set by user. + * - decoding: Set by user. + */ + int thread_safe_callbacks; + + /** + * The codec may call this to execute several independent things. + * It will return only after finishing all tasks. + * The user may replace this with some multithreaded implementation, + * the default implementation will execute the parts serially. + * @param count the number of things to execute + * - encoding: Set by libavcodec, user can override. + * - decoding: Set by libavcodec, user can override. + */ + int (*execute)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg), void *arg2, int *ret, int count, int size); + + /** + * The codec may call this to execute several independent things. + * It will return only after finishing all tasks. + * The user may replace this with some multithreaded implementation, + * the default implementation will execute the parts serially. + * Also see avcodec_thread_init and e.g. the --enable-pthread configure option. + * @param c context passed also to func + * @param count the number of things to execute + * @param arg2 argument passed unchanged to func + * @param ret return values of executed functions, must have space for "count" values. May be NULL. + * @param func function that will be called count times, with jobnr from 0 to count-1. + * threadnr will be in the range 0 to c->thread_count-1 < MAX_THREADS and so that no + * two instances of func executing at the same time will have the same threadnr. + * @return always 0 currently, but code should handle a future improvement where when any call to func + * returns < 0 no further calls to func may be done and < 0 is returned. + * - encoding: Set by libavcodec, user can override. + * - decoding: Set by libavcodec, user can override. + */ + int (*execute2)(struct AVCodecContext *c, int (*func)(struct AVCodecContext *c2, void *arg, int jobnr, int threadnr), void *arg2, int *ret, int count); + + /** + * noise vs. sse weight for the nsse comparison function + * - encoding: Set by user. + * - decoding: unused + */ + int nsse_weight; + + /** + * profile + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int profile; +#define FF_PROFILE_UNKNOWN -99 +#define FF_PROFILE_RESERVED -100 + +#define FF_PROFILE_AAC_MAIN 0 +#define FF_PROFILE_AAC_LOW 1 +#define FF_PROFILE_AAC_SSR 2 +#define FF_PROFILE_AAC_LTP 3 +#define FF_PROFILE_AAC_HE 4 +#define FF_PROFILE_AAC_HE_V2 28 +#define FF_PROFILE_AAC_LD 22 +#define FF_PROFILE_AAC_ELD 38 +#define FF_PROFILE_MPEG2_AAC_LOW 128 +#define FF_PROFILE_MPEG2_AAC_HE 131 + +#define FF_PROFILE_DTS 20 +#define FF_PROFILE_DTS_ES 30 +#define FF_PROFILE_DTS_96_24 40 +#define FF_PROFILE_DTS_HD_HRA 50 +#define FF_PROFILE_DTS_HD_MA 60 +#define FF_PROFILE_DTS_EXPRESS 70 + +#define FF_PROFILE_MPEG2_422 0 +#define FF_PROFILE_MPEG2_HIGH 1 +#define FF_PROFILE_MPEG2_SS 2 +#define FF_PROFILE_MPEG2_SNR_SCALABLE 3 +#define FF_PROFILE_MPEG2_MAIN 4 +#define FF_PROFILE_MPEG2_SIMPLE 5 + +#define FF_PROFILE_H264_CONSTRAINED (1<<9) // 8+1; constraint_set1_flag +#define FF_PROFILE_H264_INTRA (1<<11) // 8+3; constraint_set3_flag + +#define FF_PROFILE_H264_BASELINE 66 +#define FF_PROFILE_H264_CONSTRAINED_BASELINE (66|FF_PROFILE_H264_CONSTRAINED) +#define FF_PROFILE_H264_MAIN 77 +#define FF_PROFILE_H264_EXTENDED 88 +#define FF_PROFILE_H264_HIGH 100 +#define FF_PROFILE_H264_HIGH_10 110 +#define FF_PROFILE_H264_HIGH_10_INTRA (110|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_HIGH_422 122 +#define FF_PROFILE_H264_HIGH_422_INTRA (122|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_HIGH_444 144 +#define FF_PROFILE_H264_HIGH_444_PREDICTIVE 244 +#define FF_PROFILE_H264_HIGH_444_INTRA (244|FF_PROFILE_H264_INTRA) +#define FF_PROFILE_H264_CAVLC_444 44 + +#define FF_PROFILE_VC1_SIMPLE 0 +#define FF_PROFILE_VC1_MAIN 1 +#define FF_PROFILE_VC1_COMPLEX 2 +#define FF_PROFILE_VC1_ADVANCED 3 + +#define FF_PROFILE_MPEG4_SIMPLE 0 +#define FF_PROFILE_MPEG4_SIMPLE_SCALABLE 1 +#define FF_PROFILE_MPEG4_CORE 2 +#define FF_PROFILE_MPEG4_MAIN 3 +#define FF_PROFILE_MPEG4_N_BIT 4 +#define FF_PROFILE_MPEG4_SCALABLE_TEXTURE 5 +#define FF_PROFILE_MPEG4_SIMPLE_FACE_ANIMATION 6 +#define FF_PROFILE_MPEG4_BASIC_ANIMATED_TEXTURE 7 +#define FF_PROFILE_MPEG4_HYBRID 8 +#define FF_PROFILE_MPEG4_ADVANCED_REAL_TIME 9 +#define FF_PROFILE_MPEG4_CORE_SCALABLE 10 +#define FF_PROFILE_MPEG4_ADVANCED_CODING 11 +#define FF_PROFILE_MPEG4_ADVANCED_CORE 12 +#define FF_PROFILE_MPEG4_ADVANCED_SCALABLE_TEXTURE 13 +#define FF_PROFILE_MPEG4_SIMPLE_STUDIO 14 +#define FF_PROFILE_MPEG4_ADVANCED_SIMPLE 15 + +#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_0 1 +#define FF_PROFILE_JPEG2000_CSTREAM_RESTRICTION_1 2 +#define FF_PROFILE_JPEG2000_CSTREAM_NO_RESTRICTION 32768 +#define FF_PROFILE_JPEG2000_DCINEMA_2K 3 +#define FF_PROFILE_JPEG2000_DCINEMA_4K 4 + +#define FF_PROFILE_VP9_0 0 +#define FF_PROFILE_VP9_1 1 +#define FF_PROFILE_VP9_2 2 +#define FF_PROFILE_VP9_3 3 + +#define FF_PROFILE_HEVC_MAIN 1 +#define FF_PROFILE_HEVC_MAIN_10 2 +#define FF_PROFILE_HEVC_MAIN_STILL_PICTURE 3 +#define FF_PROFILE_HEVC_REXT 4 + + /** + * level + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + int level; +#define FF_LEVEL_UNKNOWN -99 + + /** + * Skip loop filtering for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_loop_filter; + + /** + * Skip IDCT/dequantization for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_idct; + + /** + * Skip decoding for selected frames. + * - encoding: unused + * - decoding: Set by user. + */ + enum AVDiscard skip_frame; + + /** + * Header containing style information for text subtitles. + * For SUBTITLE_ASS subtitle type, it should contain the whole ASS + * [Script Info] and [V4+ Styles] section, plus the [Events] line and + * the Format line following. It shouldn't include any Dialogue line. + * - encoding: Set/allocated/freed by user (before avcodec_open2()) + * - decoding: Set/allocated/freed by libavcodec (by avcodec_open2()) + */ + uint8_t *subtitle_header; + int subtitle_header_size; + +#if FF_API_ERROR_RATE + /** + * @deprecated use the 'error_rate' private AVOption of the mpegvideo + * encoders + */ + attribute_deprecated + int error_rate; +#endif + +#if FF_API_VBV_DELAY + /** + * VBV delay coded in the last frame (in periods of a 27 MHz clock). + * Used for compliant TS muxing. + * - encoding: Set by libavcodec. + * - decoding: unused. + * @deprecated this value is now exported as a part of + * AV_PKT_DATA_CPB_PROPERTIES packet side data + */ + attribute_deprecated + uint64_t vbv_delay; +#endif + +#if FF_API_SIDEDATA_ONLY_PKT + /** + * Encoding only and set by default. Allow encoders to output packets + * that do not contain any encoded data, only side data. + * + * Some encoders need to output such packets, e.g. to update some stream + * parameters at the end of encoding. + * + * @deprecated this field disables the default behaviour and + * it is kept only for compatibility. + */ + attribute_deprecated + int side_data_only_packets; +#endif + + /** + * Audio only. The number of "priming" samples (padding) inserted by the + * encoder at the beginning of the audio. I.e. this number of leading + * decoded samples must be discarded by the caller to get the original audio + * without leading padding. + * + * - decoding: unused + * - encoding: Set by libavcodec. The timestamps on the output packets are + * adjusted by the encoder so that they always refer to the + * first sample of the data actually contained in the packet, + * including any added padding. E.g. if the timebase is + * 1/samplerate and the timestamp of the first input sample is + * 0, the timestamp of the first output packet will be + * -initial_padding. + */ + int initial_padding; + + /** + * - decoding: For codecs that store a framerate value in the compressed + * bitstream, the decoder may export it here. { 0, 1} when + * unknown. + * - encoding: May be used to signal the framerate of CFR content to an + * encoder. + */ + AVRational framerate; + + /** + * Nominal unaccelerated pixel format, see AV_PIX_FMT_xxx. + * - encoding: unused. + * - decoding: Set by libavcodec before calling get_format() + */ + enum AVPixelFormat sw_pix_fmt; + + /** + * Timebase in which pkt_dts/pts and AVPacket.dts/pts are. + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_pkt_timebase(avctx) + * - encoding unused. + * - decoding set by user. + */ + AVRational pkt_timebase; + + /** + * AVCodecDescriptor + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_codec_descriptor(avctx) + * - encoding: unused. + * - decoding: set by libavcodec. + */ + const AVCodecDescriptor *codec_descriptor; + +#if !FF_API_LOWRES + /** + * low resolution decoding, 1-> 1/2 size, 2->1/4 size + * - encoding: unused + * - decoding: Set by user. + * Code outside libavcodec should access this field using: + * av_codec_{get,set}_lowres(avctx) + */ + int lowres; +#endif + + /** + * Current statistics for PTS correction. + * - decoding: maintained and used by libavcodec, not intended to be used by user apps + * - encoding: unused + */ + int64_t pts_correction_num_faulty_pts; /// Number of incorrect PTS values so far + int64_t pts_correction_num_faulty_dts; /// Number of incorrect DTS values so far + int64_t pts_correction_last_pts; /// PTS of the last frame + int64_t pts_correction_last_dts; /// DTS of the last frame + + /** + * Character encoding of the input subtitles file. + * - decoding: set by user + * - encoding: unused + */ + char *sub_charenc; + + /** + * Subtitles character encoding mode. Formats or codecs might be adjusting + * this setting (if they are doing the conversion themselves for instance). + * - decoding: set by libavcodec + * - encoding: unused + */ + int sub_charenc_mode; +#define FF_SUB_CHARENC_MODE_DO_NOTHING -1 ///< do nothing (demuxer outputs a stream supposed to be already in UTF-8, or the codec is bitmap for instance) +#define FF_SUB_CHARENC_MODE_AUTOMATIC 0 ///< libavcodec will select the mode itself +#define FF_SUB_CHARENC_MODE_PRE_DECODER 1 ///< the AVPacket data needs to be recoded to UTF-8 before being fed to the decoder, requires iconv + + /** + * Skip processing alpha if supported by codec. + * Note that if the format uses pre-multiplied alpha (common with VP6, + * and recommended due to better video quality/compression) + * the image will look as if alpha-blended onto a black background. + * However for formats that do not use pre-multiplied alpha + * there might be serious artefacts (though e.g. libswscale currently + * assumes pre-multiplied alpha anyway). + * Code outside libavcodec should access this field using AVOptions + * + * - decoding: set by user + * - encoding: unused + */ + int skip_alpha; + + /** + * Number of samples to skip after a discontinuity + * - decoding: unused + * - encoding: set by libavcodec + */ + int seek_preroll; + +#if !FF_API_DEBUG_MV + /** + * debug motion vectors + * Code outside libavcodec should access this field using AVOptions + * - encoding: Set by user. + * - decoding: Set by user. + */ + int debug_mv; +#define FF_DEBUG_VIS_MV_P_FOR 0x00000001 //visualize forward predicted MVs of P frames +#define FF_DEBUG_VIS_MV_B_FOR 0x00000002 //visualize forward predicted MVs of B frames +#define FF_DEBUG_VIS_MV_B_BACK 0x00000004 //visualize backward predicted MVs of B frames +#endif + + /** + * custom intra quantization matrix + * Code outside libavcodec should access this field using av_codec_g/set_chroma_intra_matrix() + * - encoding: Set by user, can be NULL. + * - decoding: unused. + */ + uint16_t *chroma_intra_matrix; + + /** + * dump format separator. + * can be ", " or "\n " or anything else + * Code outside libavcodec should access this field using AVOptions + * (NO direct access). + * - encoding: Set by user. + * - decoding: Set by user. + */ + uint8_t *dump_separator; + + /** + * ',' separated list of allowed decoders. + * If NULL then all are allowed + * - encoding: unused + * - decoding: set by user through AVOPtions (NO direct access) + */ + char *codec_whitelist; + + /* + * Properties of the stream that gets decoded + * To be accessed through av_codec_get_properties() (NO direct access) + * - encoding: unused + * - decoding: set by libavcodec + */ + unsigned properties; +#define FF_CODEC_PROPERTY_LOSSLESS 0x00000001 +#define FF_CODEC_PROPERTY_CLOSED_CAPTIONS 0x00000002 + + /** + * Additional data associated with the entire coded stream. + * + * - decoding: unused + * - encoding: may be set by libavcodec after avcodec_open2(). + */ + AVPacketSideData *coded_side_data; + int nb_coded_side_data; + + /** + * Encoding only. + * + * For hardware encoders configured to use a hwaccel pixel format, this + * field should be set by the caller to a reference to the AVHWFramesContext + * describing input frames. AVHWFramesContext.format must be equal to + * AVCodecContext.pix_fmt. + * + * This field should be set before avcodec_open2() is called and is + * afterwards owned and managed by libavcodec. + */ + AVBufferRef *hw_frames_ctx; + + /** + * Control the form of AVSubtitle.rects[N]->ass + * - decoding: set by user + * - encoding: unused + */ + int sub_text_format; +#define FF_SUB_TEXT_FMT_ASS 0 +#if FF_API_ASS_TIMING +#define FF_SUB_TEXT_FMT_ASS_WITH_TIMINGS 1 +#endif + +} AVCodecContext; + +AVRational av_codec_get_pkt_timebase (const AVCodecContext *avctx); +void av_codec_set_pkt_timebase (AVCodecContext *avctx, AVRational val); + +const AVCodecDescriptor *av_codec_get_codec_descriptor(const AVCodecContext *avctx); +void av_codec_set_codec_descriptor(AVCodecContext *avctx, const AVCodecDescriptor *desc); + +unsigned av_codec_get_codec_properties(const AVCodecContext *avctx); + +int av_codec_get_lowres(const AVCodecContext *avctx); +void av_codec_set_lowres(AVCodecContext *avctx, int val); + +int av_codec_get_seek_preroll(const AVCodecContext *avctx); +void av_codec_set_seek_preroll(AVCodecContext *avctx, int val); + +uint16_t *av_codec_get_chroma_intra_matrix(const AVCodecContext *avctx); +void av_codec_set_chroma_intra_matrix(AVCodecContext *avctx, uint16_t *val); + +/** + * AVProfile. + */ +typedef struct AVProfile { + int profile; + const char *name; ///< short name for the profile +} AVProfile; + +typedef struct AVCodecDefault AVCodecDefault; + +struct AVSubtitle; + +/** + * AVCodec. + */ +typedef struct AVCodec { + /** + * Name of the codec implementation. + * The name is globally unique among encoders and among decoders (but an + * encoder and a decoder can share the same name). + * This is the primary way to find a codec from the user perspective. + */ + const char *name; + /** + * Descriptive name for the codec, meant to be more human readable than name. + * You should use the NULL_IF_CONFIG_SMALL() macro to define it. + */ + const char *long_name; + enum AVMediaType type; + enum AVCodecID id; + /** + * Codec capabilities. + * see AV_CODEC_CAP_* + */ + int capabilities; + const AVRational *supported_framerates; ///< array of supported framerates, or NULL if any, array is terminated by {0,0} + const enum AVPixelFormat *pix_fmts; ///< array of supported pixel formats, or NULL if unknown, array is terminated by -1 + const int *supported_samplerates; ///< array of supported audio samplerates, or NULL if unknown, array is terminated by 0 + const enum AVSampleFormat *sample_fmts; ///< array of supported sample formats, or NULL if unknown, array is terminated by -1 + const uint64_t *channel_layouts; ///< array of support channel layouts, or NULL if unknown. array is terminated by 0 + uint8_t max_lowres; ///< maximum value for lowres supported by the decoder, no direct access, use av_codec_get_max_lowres() + const AVClass *priv_class; ///< AVClass for the private context + const AVProfile *profiles; ///< array of recognized profiles, or NULL if unknown, array is terminated by {FF_PROFILE_UNKNOWN} + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavcodec and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + int priv_data_size; + struct AVCodec *next; + /** + * @name Frame-level threading support functions + * @{ + */ + /** + * If defined, called on thread contexts when they are created. + * If the codec allocates writable tables in init(), re-allocate them here. + * priv_data will be set to a copy of the original. + */ + int (*init_thread_copy)(AVCodecContext *); + /** + * Copy necessary context variables from a previous thread context to the current one. + * If not defined, the next thread will start automatically; otherwise, the codec + * must call ff_thread_finish_setup(). + * + * dst and src will (rarely) point to the same context, in which case memcpy should be skipped. + */ + int (*update_thread_context)(AVCodecContext *dst, const AVCodecContext *src); + /** @} */ + + /** + * Private codec-specific defaults. + */ + const AVCodecDefault *defaults; + + /** + * Initialize codec static data, called from avcodec_register(). + */ + void (*init_static_data)(struct AVCodec *codec); + + int (*init)(AVCodecContext *); + int (*encode_sub)(AVCodecContext *, uint8_t *buf, int buf_size, + const struct AVSubtitle *sub); + /** + * Encode data to an AVPacket. + * + * @param avctx codec context + * @param avpkt output AVPacket (may contain a user-provided buffer) + * @param[in] frame AVFrame containing the raw data to be encoded + * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a + * non-empty packet was returned in avpkt. + * @return 0 on success, negative error code on failure + */ + int (*encode2)(AVCodecContext *avctx, AVPacket *avpkt, const AVFrame *frame, + int *got_packet_ptr); + int (*decode)(AVCodecContext *, void *outdata, int *outdata_size, AVPacket *avpkt); + int (*close)(AVCodecContext *); + /** + * Decode/encode API with decoupled packet/frame dataflow. The API is the + * same as the avcodec_ prefixed APIs (avcodec_send_frame() etc.), except + * that: + * - never called if the codec is closed or the wrong type, + * - AVPacket parameter change side data is applied right before calling + * AVCodec->send_packet, + * - if AV_CODEC_CAP_DELAY is not set, drain packets or frames are never sent, + * - only one drain packet is ever passed down (until the next flush()), + * - a drain AVPacket is always NULL (no need to check for avpkt->size). + */ + int (*send_frame)(AVCodecContext *avctx, const AVFrame *frame); + int (*send_packet)(AVCodecContext *avctx, const AVPacket *avpkt); + int (*receive_frame)(AVCodecContext *avctx, AVFrame *frame); + int (*receive_packet)(AVCodecContext *avctx, AVPacket *avpkt); + /** + * Flush buffers. + * Will be called when seeking + */ + void (*flush)(AVCodecContext *); + /** + * Internal codec capabilities. + * See FF_CODEC_CAP_* in internal.h + */ + int caps_internal; +} AVCodec; + +int av_codec_get_max_lowres(const AVCodec *codec); + +struct MpegEncContext; + +/** + * @defgroup lavc_hwaccel AVHWAccel + * @{ + */ +typedef struct AVHWAccel { + /** + * Name of the hardware accelerated codec. + * The name is globally unique among encoders and among decoders (but an + * encoder and a decoder can share the same name). + */ + const char *name; + + /** + * Type of codec implemented by the hardware accelerator. + * + * See AVMEDIA_TYPE_xxx + */ + enum AVMediaType type; + + /** + * Codec implemented by the hardware accelerator. + * + * See AV_CODEC_ID_xxx + */ + enum AVCodecID id; + + /** + * Supported pixel format. + * + * Only hardware accelerated formats are supported here. + */ + enum AVPixelFormat pix_fmt; + + /** + * Hardware accelerated codec capabilities. + * see HWACCEL_CODEC_CAP_* + */ + int capabilities; + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavcodec and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVHWAccel *next; + + /** + * Allocate a custom buffer + */ + int (*alloc_frame)(AVCodecContext *avctx, AVFrame *frame); + + /** + * Called at the beginning of each frame or field picture. + * + * Meaningful frame information (codec specific) is guaranteed to + * be parsed at this point. This function is mandatory. + * + * Note that buf can be NULL along with buf_size set to 0. + * Otherwise, this means the whole frame is available at this point. + * + * @param avctx the codec context + * @param buf the frame data buffer base + * @param buf_size the size of the frame in bytes + * @return zero if successful, a negative value otherwise + */ + int (*start_frame)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); + + /** + * Callback for each slice. + * + * Meaningful slice information (codec specific) is guaranteed to + * be parsed at this point. This function is mandatory. + * The only exception is XvMC, that works on MB level. + * + * @param avctx the codec context + * @param buf the slice data buffer base + * @param buf_size the size of the slice in bytes + * @return zero if successful, a negative value otherwise + */ + int (*decode_slice)(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size); + + /** + * Called at the end of each frame or field picture. + * + * The whole picture is parsed at this point and can now be sent + * to the hardware accelerator. This function is mandatory. + * + * @param avctx the codec context + * @return zero if successful, a negative value otherwise + */ + int (*end_frame)(AVCodecContext *avctx); + + /** + * Size of per-frame hardware accelerator private data. + * + * Private data is allocated with av_mallocz() before + * AVCodecContext.get_buffer() and deallocated after + * AVCodecContext.release_buffer(). + */ + int frame_priv_data_size; + + /** + * Called for every Macroblock in a slice. + * + * XvMC uses it to replace the ff_mpv_decode_mb(). + * Instead of decoding to raw picture, MB parameters are + * stored in an array provided by the video driver. + * + * @param s the mpeg context + */ + void (*decode_mb)(struct MpegEncContext *s); + + /** + * Initialize the hwaccel private data. + * + * This will be called from ff_get_format(), after hwaccel and + * hwaccel_context are set and the hwaccel private data in AVCodecInternal + * is allocated. + */ + int (*init)(AVCodecContext *avctx); + + /** + * Uninitialize the hwaccel private data. + * + * This will be called from get_format() or avcodec_close(), after hwaccel + * and hwaccel_context are already uninitialized. + */ + int (*uninit)(AVCodecContext *avctx); + + /** + * Size of the private data to allocate in + * AVCodecInternal.hwaccel_priv_data. + */ + int priv_data_size; +} AVHWAccel; + +/** + * Hardware acceleration should be used for decoding even if the codec level + * used is unknown or higher than the maximum supported level reported by the + * hardware driver. + * + * It's generally a good idea to pass this flag unless you have a specific + * reason not to, as hardware tends to under-report supported levels. + */ +#define AV_HWACCEL_FLAG_IGNORE_LEVEL (1 << 0) + +/** + * Hardware acceleration can output YUV pixel formats with a different chroma + * sampling than 4:2:0 and/or other than 8 bits per component. + */ +#define AV_HWACCEL_FLAG_ALLOW_HIGH_DEPTH (1 << 1) + +/** + * @} + */ + +#if FF_API_AVPICTURE +/** + * @defgroup lavc_picture AVPicture + * + * Functions for working with AVPicture + * @{ + */ + +/** + * Picture data structure. + * + * Up to four components can be stored into it, the last component is + * alpha. + * @deprecated use AVFrame or imgutils functions instead + */ +typedef struct AVPicture { + attribute_deprecated + uint8_t *data[AV_NUM_DATA_POINTERS]; ///< pointers to the image data planes + attribute_deprecated + int linesize[AV_NUM_DATA_POINTERS]; ///< number of bytes per line +} AVPicture; + +/** + * @} + */ +#endif + +enum AVSubtitleType { + SUBTITLE_NONE, + + SUBTITLE_BITMAP, ///< A bitmap, pict will be set + + /** + * Plain text, the text field must be set by the decoder and is + * authoritative. ass and pict fields may contain approximations. + */ + SUBTITLE_TEXT, + + /** + * Formatted text, the ass field must be set by the decoder and is + * authoritative. pict and text fields may contain approximations. + */ + SUBTITLE_ASS, +}; + +#define AV_SUBTITLE_FLAG_FORCED 0x00000001 + +typedef struct AVSubtitleRect { + int x; ///< top left corner of pict, undefined when pict is not set + int y; ///< top left corner of pict, undefined when pict is not set + int w; ///< width of pict, undefined when pict is not set + int h; ///< height of pict, undefined when pict is not set + int nb_colors; ///< number of colors in pict, undefined when pict is not set + +#if FF_API_AVPICTURE + /** + * @deprecated unused + */ + attribute_deprecated + AVPicture pict; +#endif + /** + * data+linesize for the bitmap of this subtitle. + * Can be set for text/ass as well once they are rendered. + */ + uint8_t *data[4]; + int linesize[4]; + + enum AVSubtitleType type; + + char *text; ///< 0 terminated plain UTF-8 text + + /** + * 0 terminated ASS/SSA compatible event line. + * The presentation of this is unaffected by the other values in this + * struct. + */ + char *ass; + + int flags; +} AVSubtitleRect; + +typedef struct AVSubtitle { + uint16_t format; /* 0 = graphics */ + uint32_t start_display_time; /* relative to packet pts, in ms */ + uint32_t end_display_time; /* relative to packet pts, in ms */ + unsigned num_rects; + AVSubtitleRect **rects; + int64_t pts; ///< Same as packet pts, in AV_TIME_BASE +} AVSubtitle; + +/** + * This struct describes the properties of an encoded stream. + * + * sizeof(AVCodecParameters) is not a part of the public ABI, this struct must + * be allocated with avcodec_parameters_alloc() and freed with + * avcodec_parameters_free(). + */ +typedef struct AVCodecParameters { + /** + * General type of the encoded data. + */ + enum AVMediaType codec_type; + /** + * Specific type of the encoded data (the codec used). + */ + enum AVCodecID codec_id; + /** + * Additional information about the codec (corresponds to the AVI FOURCC). + */ + uint32_t codec_tag; + + /** + * Extra binary data needed for initializing the decoder, codec-dependent. + * + * Must be allocated with av_malloc() and will be freed by + * avcodec_parameters_free(). The allocated size of extradata must be at + * least extradata_size + AV_INPUT_BUFFER_PADDING_SIZE, with the padding + * bytes zeroed. + */ + uint8_t *extradata; + /** + * Size of the extradata content in bytes. + */ + int extradata_size; + + /** + * - video: the pixel format, the value corresponds to enum AVPixelFormat. + * - audio: the sample format, the value corresponds to enum AVSampleFormat. + */ + int format; + + /** + * The average bitrate of the encoded data (in bits per second). + */ + int64_t bit_rate; + + /** + * The number of bits per sample in the codedwords. + * + * This is basically the bitrate per sample. It is mandatory for a bunch of + * formats to actually decode them. It's the number of bits for one sample in + * the actual coded bitstream. + * + * This could be for example 4 for ADPCM + * For PCM formats this matches bits_per_raw_sample + * Can be 0 + */ + int bits_per_coded_sample; + + /** + * This is the number of valid bits in each output sample. If the + * sample format has more bits, the least significant bits are additional + * padding bits, which are always 0. Use right shifts to reduce the sample + * to its actual size. For example, audio formats with 24 bit samples will + * have bits_per_raw_sample set to 24, and format set to AV_SAMPLE_FMT_S32. + * To get the original sample use "(int32_t)sample >> 8"." + * + * For ADPCM this might be 12 or 16 or similar + * Can be 0 + */ + int bits_per_raw_sample; + + /** + * Codec-specific bitstream restrictions that the stream conforms to. + */ + int profile; + int level; + + /** + * Video only. The dimensions of the video frame in pixels. + */ + int width; + int height; + + /** + * Video only. The aspect ratio (width / height) which a single pixel + * should have when displayed. + * + * When the aspect ratio is unknown / undefined, the numerator should be + * set to 0 (the denominator may have any value). + */ + AVRational sample_aspect_ratio; + + /** + * Video only. The order of the fields in interlaced video. + */ + enum AVFieldOrder field_order; + + /** + * Video only. Additional colorspace characteristics. + */ + enum AVColorRange color_range; + enum AVColorPrimaries color_primaries; + enum AVColorTransferCharacteristic color_trc; + enum AVColorSpace color_space; + enum AVChromaLocation chroma_location; + + /** + * Video only. Number of delayed frames. + */ + int video_delay; + + /** + * Audio only. The channel layout bitmask. May be 0 if the channel layout is + * unknown or unspecified, otherwise the number of bits set must be equal to + * the channels field. + */ + uint64_t channel_layout; + /** + * Audio only. The number of audio channels. + */ + int channels; + /** + * Audio only. The number of audio samples per second. + */ + int sample_rate; + /** + * Audio only. The number of bytes per coded audio frame, required by some + * formats. + * + * Corresponds to nBlockAlign in WAVEFORMATEX. + */ + int block_align; + /** + * Audio only. Audio frame size, if known. Required by some formats to be static. + */ + int frame_size; + + /** + * Audio only. The amount of padding (in samples) inserted by the encoder at + * the beginning of the audio. I.e. this number of leading decoded samples + * must be discarded by the caller to get the original audio without leading + * padding. + */ + int initial_padding; + /** + * Audio only. The amount of padding (in samples) appended by the encoder to + * the end of the audio. I.e. this number of decoded samples must be + * discarded by the caller from the end of the stream to get the original + * audio without any trailing padding. + */ + int trailing_padding; + /** + * Audio only. Number of samples to skip after a discontinuity. + */ + int seek_preroll; +} AVCodecParameters; + +/** + * If c is NULL, returns the first registered codec, + * if c is non-NULL, returns the next registered codec after c, + * or NULL if c is the last one. + */ +AVCodec *av_codec_next(const AVCodec *c); + +/** + * Return the LIBAVCODEC_VERSION_INT constant. + */ +unsigned avcodec_version(void); + +/** + * Return the libavcodec build-time configuration. + */ +const char *avcodec_configuration(void); + +/** + * Return the libavcodec license. + */ +const char *avcodec_license(void); + +/** + * Register the codec codec and initialize libavcodec. + * + * @warning either this function or avcodec_register_all() must be called + * before any other libavcodec functions. + * + * @see avcodec_register_all() + */ +void avcodec_register(AVCodec *codec); + +/** + * Register all the codecs, parsers and bitstream filters which were enabled at + * configuration time. If you do not call this function you can select exactly + * which formats you want to support, by using the individual registration + * functions. + * + * @see avcodec_register + * @see av_register_codec_parser + * @see av_register_bitstream_filter + */ +void avcodec_register_all(void); + +/** + * Allocate an AVCodecContext and set its fields to default values. The + * resulting struct should be freed with avcodec_free_context(). + * + * @param codec if non-NULL, allocate private data and initialize defaults + * for the given codec. It is illegal to then call avcodec_open2() + * with a different codec. + * If NULL, then the codec-specific defaults won't be initialized, + * which may result in suboptimal default settings (this is + * important mainly for encoders, e.g. libx264). + * + * @return An AVCodecContext filled with default values or NULL on failure. + */ +AVCodecContext *avcodec_alloc_context3(const AVCodec *codec); + +/** + * Free the codec context and everything associated with it and write NULL to + * the provided pointer. + */ +void avcodec_free_context(AVCodecContext **avctx); + +#if FF_API_GET_CONTEXT_DEFAULTS +/** + * @deprecated This function should not be used, as closing and opening a codec + * context multiple time is not supported. A new codec context should be + * allocated for each new use. + */ +int avcodec_get_context_defaults3(AVCodecContext *s, const AVCodec *codec); +#endif + +/** + * Get the AVClass for AVCodecContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_class(void); + +#if FF_API_COPY_CONTEXT +/** + * Get the AVClass for AVFrame. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_frame_class(void); + +/** + * Get the AVClass for AVSubtitleRect. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avcodec_get_subtitle_rect_class(void); + +/** + * Copy the settings of the source AVCodecContext into the destination + * AVCodecContext. The resulting destination codec context will be + * unopened, i.e. you are required to call avcodec_open2() before you + * can use this AVCodecContext to decode/encode video/audio data. + * + * @param dest target codec context, should be initialized with + * avcodec_alloc_context3(NULL), but otherwise uninitialized + * @param src source codec context + * @return AVERROR() on error (e.g. memory allocation error), 0 on success + * + * @deprecated The semantics of this function are ill-defined and it should not + * be used. If you need to transfer the stream parameters from one codec context + * to another, use an intermediate AVCodecParameters instance and the + * avcodec_parameters_from_context() / avcodec_parameters_to_context() + * functions. + */ +attribute_deprecated +int avcodec_copy_context(AVCodecContext *dest, const AVCodecContext *src); +#endif + +/** + * Allocate a new AVCodecParameters and set its fields to default values + * (unknown/invalid/0). The returned struct must be freed with + * avcodec_parameters_free(). + */ +AVCodecParameters *avcodec_parameters_alloc(void); + +/** + * Free an AVCodecParameters instance and everything associated with it and + * write NULL to the supplied pointer. + */ +void avcodec_parameters_free(AVCodecParameters **par); + +/** + * Copy the contents of src to dst. Any allocated fields in dst are freed and + * replaced with newly allocated duplicates of the corresponding fields in src. + * + * @return >= 0 on success, a negative AVERROR code on failure. + */ +int avcodec_parameters_copy(AVCodecParameters *dst, const AVCodecParameters *src); + +/** + * Fill the parameters struct based on the values from the supplied codec + * context. Any allocated fields in par are freed and replaced with duplicates + * of the corresponding fields in codec. + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int avcodec_parameters_from_context(AVCodecParameters *par, + const AVCodecContext *codec); + +/** + * Fill the codec context based on the values from the supplied codec + * parameters. Any allocated fields in codec that have a corresponding field in + * par are freed and replaced with duplicates of the corresponding field in par. + * Fields in codec that do not have a counterpart in par are not touched. + * + * @return >= 0 on success, a negative AVERROR code on failure. + */ +int avcodec_parameters_to_context(AVCodecContext *codec, + const AVCodecParameters *par); + +/** + * Initialize the AVCodecContext to use the given AVCodec. Prior to using this + * function the context has to be allocated with avcodec_alloc_context3(). + * + * The functions avcodec_find_decoder_by_name(), avcodec_find_encoder_by_name(), + * avcodec_find_decoder() and avcodec_find_encoder() provide an easy way for + * retrieving a codec. + * + * @warning This function is not thread safe! + * + * @note Always call this function before using decoding routines (such as + * @ref avcodec_receive_frame()). + * + * @code + * avcodec_register_all(); + * av_dict_set(&opts, "b", "2.5M", 0); + * codec = avcodec_find_decoder(AV_CODEC_ID_H264); + * if (!codec) + * exit(1); + * + * context = avcodec_alloc_context3(codec); + * + * if (avcodec_open2(context, codec, opts) < 0) + * exit(1); + * @endcode + * + * @param avctx The context to initialize. + * @param codec The codec to open this context for. If a non-NULL codec has been + * previously passed to avcodec_alloc_context3() or + * for this context, then this parameter MUST be either NULL or + * equal to the previously passed codec. + * @param options A dictionary filled with AVCodecContext and codec-private options. + * On return this object will be filled with options that were not found. + * + * @return zero on success, a negative value on error + * @see avcodec_alloc_context3(), avcodec_find_decoder(), avcodec_find_encoder(), + * av_dict_set(), av_opt_find(). + */ +int avcodec_open2(AVCodecContext *avctx, const AVCodec *codec, AVDictionary **options); + +/** + * Close a given AVCodecContext and free all the data associated with it + * (but not the AVCodecContext itself). + * + * Calling this function on an AVCodecContext that hasn't been opened will free + * the codec-specific data allocated in avcodec_alloc_context3() with a non-NULL + * codec. Subsequent calls will do nothing. + * + * @note Do not use this function. Use avcodec_free_context() to destroy a + * codec context (either open or closed). Opening and closing a codec context + * multiple times is not supported anymore -- use multiple codec contexts + * instead. + */ +int avcodec_close(AVCodecContext *avctx); + +/** + * Free all allocated data in the given subtitle struct. + * + * @param sub AVSubtitle to free. + */ +void avsubtitle_free(AVSubtitle *sub); + +/** + * @} + */ + +/** + * @addtogroup lavc_packet + * @{ + */ + +/** + * Allocate an AVPacket and set its fields to default values. The resulting + * struct must be freed using av_packet_free(). + * + * @return An AVPacket filled with default values or NULL on failure. + * + * @note this only allocates the AVPacket itself, not the data buffers. Those + * must be allocated through other means such as av_new_packet. + * + * @see av_new_packet + */ +AVPacket *av_packet_alloc(void); + +/** + * Create a new packet that references the same data as src. + * + * This is a shortcut for av_packet_alloc()+av_packet_ref(). + * + * @return newly created AVPacket on success, NULL on error. + * + * @see av_packet_alloc + * @see av_packet_ref + */ +AVPacket *av_packet_clone(AVPacket *src); + +/** + * Free the packet, if the packet is reference counted, it will be + * unreferenced first. + * + * @param packet packet to be freed. The pointer will be set to NULL. + * @note passing NULL is a no-op. + */ +void av_packet_free(AVPacket **pkt); + +/** + * Initialize optional fields of a packet with default values. + * + * Note, this does not touch the data and size members, which have to be + * initialized separately. + * + * @param pkt packet + */ +void av_init_packet(AVPacket *pkt); + +/** + * Allocate the payload of a packet and initialize its fields with + * default values. + * + * @param pkt packet + * @param size wanted payload size + * @return 0 if OK, AVERROR_xxx otherwise + */ +int av_new_packet(AVPacket *pkt, int size); + +/** + * Reduce packet size, correctly zeroing padding + * + * @param pkt packet + * @param size new size + */ +void av_shrink_packet(AVPacket *pkt, int size); + +/** + * Increase packet size, correctly zeroing padding + * + * @param pkt packet + * @param grow_by number of bytes by which to increase the size of the packet + */ +int av_grow_packet(AVPacket *pkt, int grow_by); + +/** + * Initialize a reference-counted packet from av_malloc()ed data. + * + * @param pkt packet to be initialized. This function will set the data, size, + * buf and destruct fields, all others are left untouched. + * @param data Data allocated by av_malloc() to be used as packet data. If this + * function returns successfully, the data is owned by the underlying AVBuffer. + * The caller may not access the data through other means. + * @param size size of data in bytes, without the padding. I.e. the full buffer + * size is assumed to be size + AV_INPUT_BUFFER_PADDING_SIZE. + * + * @return 0 on success, a negative AVERROR on error + */ +int av_packet_from_data(AVPacket *pkt, uint8_t *data, int size); + +#if FF_API_AVPACKET_OLD_API +/** + * @warning This is a hack - the packet memory allocation stuff is broken. The + * packet is allocated if it was not really allocated. + * + * @deprecated Use av_packet_ref + */ +attribute_deprecated +int av_dup_packet(AVPacket *pkt); +/** + * Copy packet, including contents + * + * @return 0 on success, negative AVERROR on fail + */ +int av_copy_packet(AVPacket *dst, const AVPacket *src); + +/** + * Copy packet side data + * + * @return 0 on success, negative AVERROR on fail + */ +int av_copy_packet_side_data(AVPacket *dst, const AVPacket *src); + +/** + * Free a packet. + * + * @deprecated Use av_packet_unref + * + * @param pkt packet to free + */ +attribute_deprecated +void av_free_packet(AVPacket *pkt); +#endif +/** + * Allocate new information of a packet. + * + * @param pkt packet + * @param type side information type + * @param size side information size + * @return pointer to fresh allocated data or NULL otherwise + */ +uint8_t* av_packet_new_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int size); + +/** + * Wrap an existing array as a packet side data. + * + * @param pkt packet + * @param type side information type + * @param data the side data array. It must be allocated with the av_malloc() + * family of functions. The ownership of the data is transferred to + * pkt. + * @param size side information size + * @return a non-negative number on success, a negative AVERROR code on + * failure. On failure, the packet is unchanged and the data remains + * owned by the caller. + */ +int av_packet_add_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + uint8_t *data, size_t size); + +/** + * Shrink the already allocated side data buffer + * + * @param pkt packet + * @param type side information type + * @param size new side information size + * @return 0 on success, < 0 on failure + */ +int av_packet_shrink_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int size); + +/** + * Get side information from packet. + * + * @param pkt packet + * @param type desired side information type + * @param size pointer for side information size to store (optional) + * @return pointer to data if present or NULL otherwise + */ +uint8_t* av_packet_get_side_data(AVPacket *pkt, enum AVPacketSideDataType type, + int *size); + +int av_packet_merge_side_data(AVPacket *pkt); + +int av_packet_split_side_data(AVPacket *pkt); + +const char *av_packet_side_data_name(enum AVPacketSideDataType type); + +/** + * Pack a dictionary for use in side_data. + * + * @param dict The dictionary to pack. + * @param size pointer to store the size of the returned data + * @return pointer to data if successful, NULL otherwise + */ +uint8_t *av_packet_pack_dictionary(AVDictionary *dict, int *size); +/** + * Unpack a dictionary from side_data. + * + * @param data data from side_data + * @param size size of the data + * @param dict the metadata storage dictionary + * @return 0 on success, < 0 on failure + */ +int av_packet_unpack_dictionary(const uint8_t *data, int size, AVDictionary **dict); + + +/** + * Convenience function to free all the side data stored. + * All the other fields stay untouched. + * + * @param pkt packet + */ +void av_packet_free_side_data(AVPacket *pkt); + +/** + * Setup a new reference to the data described by a given packet + * + * If src is reference-counted, setup dst as a new reference to the + * buffer in src. Otherwise allocate a new buffer in dst and copy the + * data from src into it. + * + * All the other fields are copied from src. + * + * @see av_packet_unref + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_packet_ref(AVPacket *dst, const AVPacket *src); + +/** + * Wipe the packet. + * + * Unreference the buffer referenced by the packet and reset the + * remaining packet fields to their default values. + * + * @param pkt The packet to be unreferenced. + */ +void av_packet_unref(AVPacket *pkt); + +/** + * Move every field in src to dst and reset src. + * + * @see av_packet_unref + * + * @param src Source packet, will be reset + * @param dst Destination packet + */ +void av_packet_move_ref(AVPacket *dst, AVPacket *src); + +/** + * Copy only "properties" fields from src to dst. + * + * Properties for the purpose of this function are all the fields + * beside those related to the packet data (buf, data, size) + * + * @param dst Destination packet + * @param src Source packet + * + * @return 0 on success AVERROR on failure. + */ +int av_packet_copy_props(AVPacket *dst, const AVPacket *src); + +/** + * Convert valid timing fields (timestamps / durations) in a packet from one + * timebase to another. Timestamps with unknown values (AV_NOPTS_VALUE) will be + * ignored. + * + * @param pkt packet on which the conversion will be performed + * @param tb_src source timebase, in which the timing fields in pkt are + * expressed + * @param tb_dst destination timebase, to which the timing fields will be + * converted + */ +void av_packet_rescale_ts(AVPacket *pkt, AVRational tb_src, AVRational tb_dst); + +/** + * @} + */ + +/** + * @addtogroup lavc_decoding + * @{ + */ + +/** + * Find a registered decoder with a matching codec ID. + * + * @param id AVCodecID of the requested decoder + * @return A decoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_decoder(enum AVCodecID id); + +/** + * Find a registered decoder with the specified name. + * + * @param name name of the requested decoder + * @return A decoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_decoder_by_name(const char *name); + +/** + * The default callback for AVCodecContext.get_buffer2(). It is made public so + * it can be called by custom get_buffer2() implementations for decoders without + * AV_CODEC_CAP_DR1 set. + */ +int avcodec_default_get_buffer2(AVCodecContext *s, AVFrame *frame, int flags); + +#if FF_API_EMU_EDGE +/** + * Return the amount of padding in pixels which the get_buffer callback must + * provide around the edge of the image for codecs which do not have the + * CODEC_FLAG_EMU_EDGE flag. + * + * @return Required padding in pixels. + * + * @deprecated CODEC_FLAG_EMU_EDGE is deprecated, so this function is no longer + * needed + */ +attribute_deprecated +unsigned avcodec_get_edge_width(void); +#endif + +/** + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you do not use any horizontal + * padding. + * + * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened. + */ +void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height); + +/** + * Modify width and height values so that they will result in a memory + * buffer that is acceptable for the codec if you also ensure that all + * line sizes are a multiple of the respective linesize_align[i]. + * + * May only be used if a codec with AV_CODEC_CAP_DR1 has been opened. + */ +void avcodec_align_dimensions2(AVCodecContext *s, int *width, int *height, + int linesize_align[AV_NUM_DATA_POINTERS]); + +/** + * Converts AVChromaLocation to swscale x/y chroma position. + * + * The positions represent the chroma (0,0) position in a coordinates system + * with luma (0,0) representing the origin and luma(1,1) representing 256,256 + * + * @param xpos horizontal chroma sample position + * @param ypos vertical chroma sample position + */ +int avcodec_enum_to_chroma_pos(int *xpos, int *ypos, enum AVChromaLocation pos); + +/** + * Converts swscale x/y chroma position to AVChromaLocation. + * + * The positions represent the chroma (0,0) position in a coordinates system + * with luma (0,0) representing the origin and luma(1,1) representing 256,256 + * + * @param xpos horizontal chroma sample position + * @param ypos vertical chroma sample position + */ +enum AVChromaLocation avcodec_chroma_pos_to_enum(int xpos, int ypos); + +/** + * Decode the audio frame of size avpkt->size from avpkt->data into frame. + * + * Some decoders may support multiple frames in a single AVPacket. Such + * decoders would then just decode the first frame and the return value would be + * less than the packet size. In this case, avcodec_decode_audio4 has to be + * called again with an AVPacket containing the remaining data in order to + * decode the second frame, etc... Even if no frames are returned, the packet + * needs to be fed to the decoder with remaining data until it is completely + * consumed or an error occurs. + * + * Some decoders (those marked with AV_CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning samples. It is safe to flush even those decoders that are not + * marked with AV_CODEC_CAP_DELAY, then no samples will be returned. + * + * @warning The input buffer, avpkt->data must be AV_INPUT_BUFFER_PADDING_SIZE + * larger than the actual read bytes because some optimized bitstream + * readers read 32 or 64 bits at once and could read over the end. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx the codec context + * @param[out] frame The AVFrame in which to store decoded audio samples. + * The decoder will allocate a buffer for the decoded frame by + * calling the AVCodecContext.get_buffer2() callback. + * When AVCodecContext.refcounted_frames is set to 1, the frame is + * reference counted and the returned reference belongs to the + * caller. The caller must release the frame using av_frame_unref() + * when the frame is no longer needed. The caller may safely write + * to the frame if av_frame_is_writable() returns 1. + * When AVCodecContext.refcounted_frames is set to 0, the returned + * reference belongs to the decoder and is valid only until the + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. + * @param[out] got_frame_ptr Zero if no frame could be decoded, otherwise it is + * non-zero. Note that this field being set to zero + * does not mean that an error has occurred. For + * decoders with AV_CODEC_CAP_DELAY set, no given decode + * call is guaranteed to produce a frame. + * @param[in] avpkt The input AVPacket containing the input buffer. + * At least avpkt->data and avpkt->size should be set. Some + * decoders might also require additional fields to be set. + * @return A negative error code is returned if an error occurred during + * decoding, otherwise the number of bytes consumed from the input + * AVPacket is returned. + * +* @deprecated Use avcodec_send_packet() and avcodec_receive_frame(). + */ +attribute_deprecated +int avcodec_decode_audio4(AVCodecContext *avctx, AVFrame *frame, + int *got_frame_ptr, const AVPacket *avpkt); + +/** + * Decode the video frame of size avpkt->size from avpkt->data into picture. + * Some decoders may support multiple frames in a single AVPacket, such + * decoders would then just decode the first frame. + * + * @warning The input buffer must be AV_INPUT_BUFFER_PADDING_SIZE larger than + * the actual read bytes because some optimized bitstream readers read 32 or 64 + * bits at once and could read over the end. + * + * @warning The end of the input buffer buf should be set to 0 to ensure that + * no overreading happens for damaged MPEG streams. + * + * @note Codecs which have the AV_CODEC_CAP_DELAY capability set have a delay + * between input and output, these need to be fed with avpkt->data=NULL, + * avpkt->size=0 at the end to return the remaining frames. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx the codec context + * @param[out] picture The AVFrame in which the decoded video frame will be stored. + * Use av_frame_alloc() to get an AVFrame. The codec will + * allocate memory for the actual bitmap by calling the + * AVCodecContext.get_buffer2() callback. + * When AVCodecContext.refcounted_frames is set to 1, the frame is + * reference counted and the returned reference belongs to the + * caller. The caller must release the frame using av_frame_unref() + * when the frame is no longer needed. The caller may safely write + * to the frame if av_frame_is_writable() returns 1. + * When AVCodecContext.refcounted_frames is set to 0, the returned + * reference belongs to the decoder and is valid only until the + * next call to this function or until closing or flushing the + * decoder. The caller may not write to it. + * + * @param[in] avpkt The input AVPacket containing the input buffer. + * You can create such packet with av_init_packet() and by then setting + * data and size, some decoders might in addition need other fields like + * flags&AV_PKT_FLAG_KEY. All decoders are designed to use the least + * fields possible. + * @param[in,out] got_picture_ptr Zero if no frame could be decompressed, otherwise, it is nonzero. + * @return On error a negative value is returned, otherwise the number of bytes + * used or zero if no frame could be decompressed. + * + * @deprecated Use avcodec_send_packet() and avcodec_receive_frame(). + */ +attribute_deprecated +int avcodec_decode_video2(AVCodecContext *avctx, AVFrame *picture, + int *got_picture_ptr, + const AVPacket *avpkt); + +/** + * Decode a subtitle message. + * Return a negative value on error, otherwise return the number of bytes used. + * If no subtitle could be decompressed, got_sub_ptr is zero. + * Otherwise, the subtitle is stored in *sub. + * Note that AV_CODEC_CAP_DR1 is not available for subtitle codecs. This is for + * simplicity, because the performance difference is expect to be negligible + * and reusing a get_buffer written for video codecs would probably perform badly + * due to a potentially very different allocation pattern. + * + * Some decoders (those marked with CODEC_CAP_DELAY) have a delay between input + * and output. This means that for some packets they will not immediately + * produce decoded output and need to be flushed at the end of decoding to get + * all the decoded data. Flushing is done by calling this function with packets + * with avpkt->data set to NULL and avpkt->size set to 0 until it stops + * returning subtitles. It is safe to flush even those decoders that are not + * marked with CODEC_CAP_DELAY, then no subtitles will be returned. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx the codec context + * @param[out] sub The Preallocated AVSubtitle in which the decoded subtitle will be stored, + * must be freed with avsubtitle_free if *got_sub_ptr is set. + * @param[in,out] got_sub_ptr Zero if no subtitle could be decompressed, otherwise, it is nonzero. + * @param[in] avpkt The input AVPacket containing the input buffer. + */ +int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub, + int *got_sub_ptr, + AVPacket *avpkt); + +/** + * Supply raw packet data as input to a decoder. + * + * Internally, this call will copy relevant AVCodecContext fields, which can + * influence decoding per-packet, and apply them when the packet is actually + * decoded. (For example AVCodecContext.skip_frame, which might direct the + * decoder to drop the frame contained by the packet sent with this function.) + * + * @warning The input buffer, avpkt->data must be AV_INPUT_BUFFER_PADDING_SIZE + * larger than the actual read bytes because some optimized bitstream + * readers read 32 or 64 bits at once and could read over the end. + * + * @warning Do not mix this API with the legacy API (like avcodec_decode_video2()) + * on the same AVCodecContext. It will return unexpected results now + * or in future libavcodec versions. + * + * @note The AVCodecContext MUST have been opened with @ref avcodec_open2() + * before packets may be fed to the decoder. + * + * @param avctx codec context + * @param[in] avpkt The input AVPacket. Usually, this will be a single video + * frame, or several complete audio frames. + * Ownership of the packet remains with the caller, and the + * decoder will not write to the packet. The decoder may create + * a reference to the packet data (or copy it if the packet is + * not reference-counted). + * Unlike with older APIs, the packet is always fully consumed, + * and if it contains multiple frames (e.g. some audio codecs), + * will require you to call avcodec_receive_frame() multiple + * times afterwards before you can send a new packet. + * It can be NULL (or an AVPacket with data set to NULL and + * size set to 0); in this case, it is considered a flush + * packet, which signals the end of the stream. Sending the + * first flush packet will return success. Subsequent ones are + * unnecessary and will return AVERROR_EOF. If the decoder + * still has frames buffered, it will return them after sending + * a flush packet. + * + * @return 0 on success, otherwise negative error code: + * AVERROR(EAGAIN): input is not accepted right now - the packet must be + * resent after trying to read output + * AVERROR_EOF: the decoder has been flushed, and no new packets can + * be sent to it (also returned if more than 1 flush + * packet is sent) + * AVERROR(EINVAL): codec not opened, it is an encoder, or requires flush + * AVERROR(ENOMEM): failed to add packet to internal queue, or similar + * other errors: legitimate decoding errors + */ +int avcodec_send_packet(AVCodecContext *avctx, const AVPacket *avpkt); + +/** + * Return decoded output data from a decoder. + * + * @param avctx codec context + * @param frame This will be set to a reference-counted video or audio + * frame (depending on the decoder type) allocated by the + * decoder. Note that the function will always call + * av_frame_unref(frame) before doing anything else. + * + * @return + * 0: success, a frame was returned + * AVERROR(EAGAIN): output is not available right now - user must try + * to send new input + * AVERROR_EOF: the decoder has been fully flushed, and there will be + * no more output frames + * AVERROR(EINVAL): codec not opened, or it is an encoder + * other negative values: legitimate decoding errors + */ +int avcodec_receive_frame(AVCodecContext *avctx, AVFrame *frame); + +/** + * Supply a raw video or audio frame to the encoder. Use avcodec_receive_packet() + * to retrieve buffered output packets. + * + * @param avctx codec context + * @param[in] frame AVFrame containing the raw audio or video frame to be encoded. + * Ownership of the frame remains with the caller, and the + * encoder will not write to the frame. The encoder may create + * a reference to the frame data (or copy it if the frame is + * not reference-counted). + * It can be NULL, in which case it is considered a flush + * packet. This signals the end of the stream. If the encoder + * still has packets buffered, it will return them after this + * call. Once flushing mode has been entered, additional flush + * packets are ignored, and sending frames will return + * AVERROR_EOF. + * + * For audio: + * If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame + * can have any number of samples. + * If it is not set, frame->nb_samples must be equal to + * avctx->frame_size for all frames except the last. + * The final frame may be smaller than avctx->frame_size. + * @return 0 on success, otherwise negative error code: + * AVERROR(EAGAIN): input is not accepted right now - the frame must be + * resent after trying to read output packets + * AVERROR_EOF: the encoder has been flushed, and no new frames can + * be sent to it + * AVERROR(EINVAL): codec not opened, refcounted_frames not set, it is a + * decoder, or requires flush + * AVERROR(ENOMEM): failed to add packet to internal queue, or similar + * other errors: legitimate decoding errors + */ +int avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame); + +/** + * Read encoded data from the encoder. + * + * @param avctx codec context + * @param avpkt This will be set to a reference-counted packet allocated by the + * encoder. Note that the function will always call + * av_frame_unref(frame) before doing anything else. + * @return 0 on success, otherwise negative error code: + * AVERROR(EAGAIN): output is not available right now - user must try + * to send input + * AVERROR_EOF: the encoder has been fully flushed, and there will be + * no more output packets + * AVERROR(EINVAL): codec not opened, or it is an encoder + * other errors: legitimate decoding errors + */ +int avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt); + + +/** + * @defgroup lavc_parsing Frame parsing + * @{ + */ + +enum AVPictureStructure { + AV_PICTURE_STRUCTURE_UNKNOWN, //< unknown + AV_PICTURE_STRUCTURE_TOP_FIELD, //< coded as top field + AV_PICTURE_STRUCTURE_BOTTOM_FIELD, //< coded as bottom field + AV_PICTURE_STRUCTURE_FRAME, //< coded as frame +}; + +typedef struct AVCodecParserContext { + void *priv_data; + struct AVCodecParser *parser; + int64_t frame_offset; /* offset of the current frame */ + int64_t cur_offset; /* current offset + (incremented by each av_parser_parse()) */ + int64_t next_frame_offset; /* offset of the next frame */ + /* video info */ + int pict_type; /* XXX: Put it back in AVCodecContext. */ + /** + * This field is used for proper frame duration computation in lavf. + * It signals, how much longer the frame duration of the current frame + * is compared to normal frame duration. + * + * frame_duration = (1 + repeat_pict) * time_base + * + * It is used by codecs like H.264 to display telecined material. + */ + int repeat_pict; /* XXX: Put it back in AVCodecContext. */ + int64_t pts; /* pts of the current frame */ + int64_t dts; /* dts of the current frame */ + + /* private data */ + int64_t last_pts; + int64_t last_dts; + int fetch_timestamp; + +#define AV_PARSER_PTS_NB 4 + int cur_frame_start_index; + int64_t cur_frame_offset[AV_PARSER_PTS_NB]; + int64_t cur_frame_pts[AV_PARSER_PTS_NB]; + int64_t cur_frame_dts[AV_PARSER_PTS_NB]; + + int flags; +#define PARSER_FLAG_COMPLETE_FRAMES 0x0001 +#define PARSER_FLAG_ONCE 0x0002 +/// Set if the parser has a valid file offset +#define PARSER_FLAG_FETCHED_OFFSET 0x0004 +#define PARSER_FLAG_USE_CODEC_TS 0x1000 + + int64_t offset; ///< byte offset from starting packet start + int64_t cur_frame_end[AV_PARSER_PTS_NB]; + + /** + * Set by parser to 1 for key frames and 0 for non-key frames. + * It is initialized to -1, so if the parser doesn't set this flag, + * old-style fallback using AV_PICTURE_TYPE_I picture type as key frames + * will be used. + */ + int key_frame; + +#if FF_API_CONVERGENCE_DURATION + /** + * @deprecated unused + */ + attribute_deprecated + int64_t convergence_duration; +#endif + + // Timestamp generation support: + /** + * Synchronization point for start of timestamp generation. + * + * Set to >0 for sync point, 0 for no sync point and <0 for undefined + * (default). + * + * For example, this corresponds to presence of H.264 buffering period + * SEI message. + */ + int dts_sync_point; + + /** + * Offset of the current timestamp against last timestamp sync point in + * units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain a valid timestamp offset. + * + * Note that the timestamp of sync point has usually a nonzero + * dts_ref_dts_delta, which refers to the previous sync point. Offset of + * the next frame after timestamp sync point will be usually 1. + * + * For example, this corresponds to H.264 cpb_removal_delay. + */ + int dts_ref_dts_delta; + + /** + * Presentation delay of current frame in units of AVCodecContext.time_base. + * + * Set to INT_MIN when dts_sync_point unused. Otherwise, it must + * contain valid non-negative timestamp delta (presentation time of a frame + * must not lie in the past). + * + * This delay represents the difference between decoding and presentation + * time of the frame. + * + * For example, this corresponds to H.264 dpb_output_delay. + */ + int pts_dts_delta; + + /** + * Position of the packet in file. + * + * Analogous to cur_frame_pts/dts + */ + int64_t cur_frame_pos[AV_PARSER_PTS_NB]; + + /** + * Byte position of currently parsed frame in stream. + */ + int64_t pos; + + /** + * Previous frame byte position. + */ + int64_t last_pos; + + /** + * Duration of the current frame. + * For audio, this is in units of 1 / AVCodecContext.sample_rate. + * For all other types, this is in units of AVCodecContext.time_base. + */ + int duration; + + enum AVFieldOrder field_order; + + /** + * Indicate whether a picture is coded as a frame, top field or bottom field. + * + * For example, H.264 field_pic_flag equal to 0 corresponds to + * AV_PICTURE_STRUCTURE_FRAME. An H.264 picture with field_pic_flag + * equal to 1 and bottom_field_flag equal to 0 corresponds to + * AV_PICTURE_STRUCTURE_TOP_FIELD. + */ + enum AVPictureStructure picture_structure; + + /** + * Picture number incremented in presentation or output order. + * This field may be reinitialized at the first picture of a new sequence. + * + * For example, this corresponds to H.264 PicOrderCnt. + */ + int output_picture_number; + + /** + * Dimensions of the decoded video intended for presentation. + */ + int width; + int height; + + /** + * Dimensions of the coded video. + */ + int coded_width; + int coded_height; + + /** + * The format of the coded data, corresponds to enum AVPixelFormat for video + * and for enum AVSampleFormat for audio. + * + * Note that a decoder can have considerable freedom in how exactly it + * decodes the data, so the format reported here might be different from the + * one returned by a decoder. + */ + int format; +} AVCodecParserContext; + +typedef struct AVCodecParser { + int codec_ids[5]; /* several codec IDs are permitted */ + int priv_data_size; + int (*parser_init)(AVCodecParserContext *s); + /* This callback never returns an error, a negative value means that + * the frame start was in a previous packet. */ + int (*parser_parse)(AVCodecParserContext *s, + AVCodecContext *avctx, + const uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size); + void (*parser_close)(AVCodecParserContext *s); + int (*split)(AVCodecContext *avctx, const uint8_t *buf, int buf_size); + struct AVCodecParser *next; +} AVCodecParser; + +AVCodecParser *av_parser_next(const AVCodecParser *c); + +void av_register_codec_parser(AVCodecParser *parser); +AVCodecParserContext *av_parser_init(int codec_id); + +/** + * Parse a packet. + * + * @param s parser context. + * @param avctx codec context. + * @param poutbuf set to pointer to parsed buffer or NULL if not yet finished. + * @param poutbuf_size set to size of parsed buffer or zero if not yet finished. + * @param buf input buffer. + * @param buf_size input length, to signal EOF, this should be 0 (so that the last frame can be output). + * @param pts input presentation timestamp. + * @param dts input decoding timestamp. + * @param pos input byte position in stream. + * @return the number of bytes of the input bitstream used. + * + * Example: + * @code + * while(in_len){ + * len = av_parser_parse2(myparser, AVCodecContext, &data, &size, + * in_data, in_len, + * pts, dts, pos); + * in_data += len; + * in_len -= len; + * + * if(size) + * decode_frame(data, size); + * } + * @endcode + */ +int av_parser_parse2(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, + int64_t pts, int64_t dts, + int64_t pos); + +/** + * @return 0 if the output buffer is a subset of the input, 1 if it is allocated and must be freed + * @deprecated use AVBitStreamFilter + */ +int av_parser_change(AVCodecParserContext *s, + AVCodecContext *avctx, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); +void av_parser_close(AVCodecParserContext *s); + +/** + * @} + * @} + */ + +/** + * @addtogroup lavc_encoding + * @{ + */ + +/** + * Find a registered encoder with a matching codec ID. + * + * @param id AVCodecID of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_encoder(enum AVCodecID id); + +/** + * Find a registered encoder with the specified name. + * + * @param name name of the requested encoder + * @return An encoder if one was found, NULL otherwise. + */ +AVCodec *avcodec_find_encoder_by_name(const char *name); + +/** + * Encode a frame of audio. + * + * Takes input samples from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay, split, and combine input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. If avpkt->data and + * avpkt->size are set, avpkt->destruct must also be set. All + * other AVPacket fields will be reset by the encoder using + * av_init_packet(). If avpkt->data is NULL, the encoder will + * allocate it. The encoder will set avpkt->size to the size + * of the output packet. + * + * If this function fails or produces no output, avpkt will be + * freed using av_packet_unref(). + * @param[in] frame AVFrame containing the raw audio data to be encoded. + * May be NULL when flushing an encoder that has the + * AV_CODEC_CAP_DELAY capability set. + * If AV_CODEC_CAP_VARIABLE_FRAME_SIZE is set, then each frame + * can have any number of samples. + * If it is not set, frame->nb_samples must be equal to + * avctx->frame_size for all frames except the last. + * The final frame may be smaller than avctx->frame_size. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + * + * @deprecated use avcodec_send_frame()/avcodec_receive_packet() instead + */ +attribute_deprecated +int avcodec_encode_audio2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +/** + * Encode a frame of video. + * + * Takes input raw video data from frame and writes the next output packet, if + * available, to avpkt. The output packet does not necessarily contain data for + * the most recent frame, as encoders can delay and reorder input frames + * internally as needed. + * + * @param avctx codec context + * @param avpkt output AVPacket. + * The user can supply an output buffer by setting + * avpkt->data and avpkt->size prior to calling the + * function, but if the size of the user-provided data is not + * large enough, encoding will fail. All other AVPacket fields + * will be reset by the encoder using av_init_packet(). If + * avpkt->data is NULL, the encoder will allocate it. + * The encoder will set avpkt->size to the size of the + * output packet. The returned data (if any) belongs to the + * caller, he is responsible for freeing it. + * + * If this function fails or produces no output, avpkt will be + * freed using av_packet_unref(). + * @param[in] frame AVFrame containing the raw video data to be encoded. + * May be NULL when flushing an encoder that has the + * AV_CODEC_CAP_DELAY capability set. + * @param[out] got_packet_ptr This field is set to 1 by libavcodec if the + * output packet is non-empty, and to 0 if it is + * empty. If the function returns an error, the + * packet can be assumed to be invalid, and the + * value of got_packet_ptr is undefined and should + * not be used. + * @return 0 on success, negative error code on failure + * + * @deprecated use avcodec_send_frame()/avcodec_receive_packet() instead + */ +attribute_deprecated +int avcodec_encode_video2(AVCodecContext *avctx, AVPacket *avpkt, + const AVFrame *frame, int *got_packet_ptr); + +int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size, + const AVSubtitle *sub); + + +/** + * @} + */ + +#if FF_API_AVCODEC_RESAMPLE +/** + * @defgroup lavc_resample Audio resampling + * @ingroup libavc + * @deprecated use libswresample instead + * + * @{ + */ +struct ReSampleContext; +struct AVResampleContext; + +typedef struct ReSampleContext ReSampleContext; + +/** + * Initialize audio resampling context. + * + * @param output_channels number of output channels + * @param input_channels number of input channels + * @param output_rate output sample rate + * @param input_rate input sample rate + * @param sample_fmt_out requested output sample format + * @param sample_fmt_in input sample format + * @param filter_length length of each FIR filter in the filterbank relative to the cutoff frequency + * @param log2_phase_count log2 of the number of entries in the polyphase filterbank + * @param linear if 1 then the used FIR filter will be linearly interpolated + between the 2 closest, if 0 the closest will be used + * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate + * @return allocated ReSampleContext, NULL if error occurred + */ +attribute_deprecated +ReSampleContext *av_audio_resample_init(int output_channels, int input_channels, + int output_rate, int input_rate, + enum AVSampleFormat sample_fmt_out, + enum AVSampleFormat sample_fmt_in, + int filter_length, int log2_phase_count, + int linear, double cutoff); + +attribute_deprecated +int audio_resample(ReSampleContext *s, short *output, short *input, int nb_samples); + +/** + * Free resample context. + * + * @param s a non-NULL pointer to a resample context previously + * created with av_audio_resample_init() + */ +attribute_deprecated +void audio_resample_close(ReSampleContext *s); + + +/** + * Initialize an audio resampler. + * Note, if either rate is not an integer then simply scale both rates up so they are. + * @param filter_length length of each FIR filter in the filterbank relative to the cutoff freq + * @param log2_phase_count log2 of the number of entries in the polyphase filterbank + * @param linear If 1 then the used FIR filter will be linearly interpolated + between the 2 closest, if 0 the closest will be used + * @param cutoff cutoff frequency, 1.0 corresponds to half the output sampling rate + */ +attribute_deprecated +struct AVResampleContext *av_resample_init(int out_rate, int in_rate, int filter_length, int log2_phase_count, int linear, double cutoff); + +/** + * Resample an array of samples using a previously configured context. + * @param src an array of unconsumed samples + * @param consumed the number of samples of src which have been consumed are returned here + * @param src_size the number of unconsumed samples available + * @param dst_size the amount of space in samples available in dst + * @param update_ctx If this is 0 then the context will not be modified, that way several channels can be resampled with the same context. + * @return the number of samples written in dst or -1 if an error occurred + */ +attribute_deprecated +int av_resample(struct AVResampleContext *c, short *dst, short *src, int *consumed, int src_size, int dst_size, int update_ctx); + + +/** + * Compensate samplerate/timestamp drift. The compensation is done by changing + * the resampler parameters, so no audible clicks or similar distortions occur + * @param compensation_distance distance in output samples over which the compensation should be performed + * @param sample_delta number of output samples which should be output less + * + * example: av_resample_compensate(c, 10, 500) + * here instead of 510 samples only 500 samples would be output + * + * note, due to rounding the actual compensation might be slightly different, + * especially if the compensation_distance is large and the in_rate used during init is small + */ +attribute_deprecated +void av_resample_compensate(struct AVResampleContext *c, int sample_delta, int compensation_distance); +attribute_deprecated +void av_resample_close(struct AVResampleContext *c); + +/** + * @} + */ +#endif + +#if FF_API_AVPICTURE +/** + * @addtogroup lavc_picture + * @{ + */ + +/** + * @deprecated unused + */ +attribute_deprecated +int avpicture_alloc(AVPicture *picture, enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated unused + */ +attribute_deprecated +void avpicture_free(AVPicture *picture); + +/** + * @deprecated use av_image_fill_arrays() instead. + */ +attribute_deprecated +int avpicture_fill(AVPicture *picture, const uint8_t *ptr, + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated use av_image_copy_to_buffer() instead. + */ +attribute_deprecated +int avpicture_layout(const AVPicture *src, enum AVPixelFormat pix_fmt, + int width, int height, + unsigned char *dest, int dest_size); + +/** + * @deprecated use av_image_get_buffer_size() instead. + */ +attribute_deprecated +int avpicture_get_size(enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated av_image_copy() instead. + */ +attribute_deprecated +void av_picture_copy(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * @deprecated unused + */ +attribute_deprecated +int av_picture_crop(AVPicture *dst, const AVPicture *src, + enum AVPixelFormat pix_fmt, int top_band, int left_band); + +/** + * @deprecated unused + */ +attribute_deprecated +int av_picture_pad(AVPicture *dst, const AVPicture *src, int height, int width, enum AVPixelFormat pix_fmt, + int padtop, int padbottom, int padleft, int padright, int *color); + +/** + * @} + */ +#endif + +/** + * @defgroup lavc_misc Utility functions + * @ingroup libavc + * + * Miscellaneous utility functions related to both encoding and decoding + * (or neither). + * @{ + */ + +/** + * @defgroup lavc_misc_pixfmt Pixel formats + * + * Functions for working with pixel formats. + * @{ + */ + +/** + * Utility function to access log2_chroma_w log2_chroma_h from + * the pixel format AVPixFmtDescriptor. + * + * This function asserts that pix_fmt is valid. See av_pix_fmt_get_chroma_sub_sample + * for one that returns a failure code and continues in case of invalid + * pix_fmts. + * + * @param[in] pix_fmt the pixel format + * @param[out] h_shift store log2_chroma_w + * @param[out] v_shift store log2_chroma_h + * + * @see av_pix_fmt_get_chroma_sub_sample + */ + +void avcodec_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, int *h_shift, int *v_shift); + +/** + * Return a value representing the fourCC code associated to the + * pixel format pix_fmt, or 0 if no associated fourCC code can be + * found. + */ +unsigned int avcodec_pix_fmt_to_codec_tag(enum AVPixelFormat pix_fmt); + +/** + * @deprecated see av_get_pix_fmt_loss() + */ +int avcodec_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, enum AVPixelFormat src_pix_fmt, + int has_alpha); + +/** + * Find the best pixel format to convert to given a certain source pixel + * format. When converting from one pixel format to another, information loss + * may occur. For example, when converting from RGB24 to GRAY, the color + * information will be lost. Similarly, other losses occur when converting from + * some formats to other formats. avcodec_find_best_pix_fmt_of_2() searches which of + * the given pixel formats should be used to suffer the least amount of loss. + * The pixel formats from which it chooses one, are determined by the + * pix_fmt_list parameter. + * + * + * @param[in] pix_fmt_list AV_PIX_FMT_NONE terminated array of pixel formats to choose from + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @param[out] loss_ptr Combination of flags informing you what kind of losses will occur. + * @return The best pixel format to convert to or -1 if none was found. + */ +enum AVPixelFormat avcodec_find_best_pix_fmt_of_list(const enum AVPixelFormat *pix_fmt_list, + enum AVPixelFormat src_pix_fmt, + int has_alpha, int *loss_ptr); + +/** + * @deprecated see av_find_best_pix_fmt_of_2() + */ +enum AVPixelFormat avcodec_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); + +attribute_deprecated +#if AV_HAVE_INCOMPATIBLE_LIBAV_ABI +enum AVPixelFormat avcodec_find_best_pix_fmt2(const enum AVPixelFormat *pix_fmt_list, + enum AVPixelFormat src_pix_fmt, + int has_alpha, int *loss_ptr); +#else +enum AVPixelFormat avcodec_find_best_pix_fmt2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); +#endif + + +enum AVPixelFormat avcodec_default_get_format(struct AVCodecContext *s, const enum AVPixelFormat * fmt); + +/** + * @} + */ + +#if FF_API_SET_DIMENSIONS +/** + * @deprecated this function is not supposed to be used from outside of lavc + */ +attribute_deprecated +void avcodec_set_dimensions(AVCodecContext *s, int width, int height); +#endif + +/** + * Put a string representing the codec tag codec_tag in buf. + * + * @param buf buffer to place codec tag in + * @param buf_size size in bytes of buf + * @param codec_tag codec tag to assign + * @return the length of the string that would have been generated if + * enough space had been available, excluding the trailing null + */ +size_t av_get_codec_tag_string(char *buf, size_t buf_size, unsigned int codec_tag); + +void avcodec_string(char *buf, int buf_size, AVCodecContext *enc, int encode); + +/** + * Return a name for the specified profile, if available. + * + * @param codec the codec that is searched for the given profile + * @param profile the profile value for which a name is requested + * @return A name for the profile if found, NULL otherwise. + */ +const char *av_get_profile_name(const AVCodec *codec, int profile); + +/** + * Return a name for the specified profile, if available. + * + * @param codec_id the ID of the codec to which the requested profile belongs + * @param profile the profile value for which a name is requested + * @return A name for the profile if found, NULL otherwise. + * + * @note unlike av_get_profile_name(), which searches a list of profiles + * supported by a specific decoder or encoder implementation, this + * function searches the list of profiles from the AVCodecDescriptor + */ +const char *avcodec_profile_name(enum AVCodecID codec_id, int profile); + +int avcodec_default_execute(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2),void *arg, int *ret, int count, int size); +int avcodec_default_execute2(AVCodecContext *c, int (*func)(AVCodecContext *c2, void *arg2, int, int),void *arg, int *ret, int count); +//FIXME func typedef + +/** + * Fill AVFrame audio data and linesize pointers. + * + * The buffer buf must be a preallocated buffer with a size big enough + * to contain the specified samples amount. The filled AVFrame data + * pointers will point to this buffer. + * + * AVFrame extended_data channel pointers are allocated if necessary for + * planar audio. + * + * @param frame the AVFrame + * frame->nb_samples must be set prior to calling the + * function. This function fills in frame->data, + * frame->extended_data, frame->linesize[0]. + * @param nb_channels channel count + * @param sample_fmt sample format + * @param buf buffer to use for frame data + * @param buf_size size of buffer + * @param align plane size sample alignment (0 = default) + * @return >=0 on success, negative error code on failure + * @todo return the size in bytes required to store the samples in + * case of success, at the next libavutil bump + */ +int avcodec_fill_audio_frame(AVFrame *frame, int nb_channels, + enum AVSampleFormat sample_fmt, const uint8_t *buf, + int buf_size, int align); + +/** + * Reset the internal decoder state / flush internal buffers. Should be called + * e.g. when seeking or when switching to a different stream. + * + * @note when refcounted frames are not used (i.e. avctx->refcounted_frames is 0), + * this invalidates the frames previously returned from the decoder. When + * refcounted frames are used, the decoder just releases any references it might + * keep internally, but the caller's reference remains valid. + */ +void avcodec_flush_buffers(AVCodecContext *avctx); + +/** + * Return codec bits per sample. + * + * @param[in] codec_id the codec + * @return Number of bits per sample or zero if unknown for the given codec. + */ +int av_get_bits_per_sample(enum AVCodecID codec_id); + +/** + * Return the PCM codec associated with a sample format. + * @param be endianness, 0 for little, 1 for big, + * -1 (or anything else) for native + * @return AV_CODEC_ID_PCM_* or AV_CODEC_ID_NONE + */ +enum AVCodecID av_get_pcm_codec(enum AVSampleFormat fmt, int be); + +/** + * Return codec bits per sample. + * Only return non-zero if the bits per sample is exactly correct, not an + * approximation. + * + * @param[in] codec_id the codec + * @return Number of bits per sample or zero if unknown for the given codec. + */ +int av_get_exact_bits_per_sample(enum AVCodecID codec_id); + +/** + * Return audio frame duration. + * + * @param avctx codec context + * @param frame_bytes size of the frame, or 0 if unknown + * @return frame duration, in samples, if known. 0 if not able to + * determine. + */ +int av_get_audio_frame_duration(AVCodecContext *avctx, int frame_bytes); + +/** + * This function is the same as av_get_audio_frame_duration(), except it works + * with AVCodecParameters instead of an AVCodecContext. + */ +int av_get_audio_frame_duration2(AVCodecParameters *par, int frame_bytes); + +#if FF_API_OLD_BSF +typedef struct AVBitStreamFilterContext { + void *priv_data; + struct AVBitStreamFilter *filter; + AVCodecParserContext *parser; + struct AVBitStreamFilterContext *next; + /** + * Internal default arguments, used if NULL is passed to av_bitstream_filter_filter(). + * Not for access by library users. + */ + char *args; +} AVBitStreamFilterContext; +#endif + +typedef struct AVBSFInternal AVBSFInternal; + +/** + * The bitstream filter state. + * + * This struct must be allocated with av_bsf_alloc() and freed with + * av_bsf_free(). + * + * The fields in the struct will only be changed (by the caller or by the + * filter) as described in their documentation, and are to be considered + * immutable otherwise. + */ +typedef struct AVBSFContext { + /** + * A class for logging and AVOptions + */ + const AVClass *av_class; + + /** + * The bitstream filter this context is an instance of. + */ + const struct AVBitStreamFilter *filter; + + /** + * Opaque libavcodec internal data. Must not be touched by the caller in any + * way. + */ + AVBSFInternal *internal; + + /** + * Opaque filter-specific private data. If filter->priv_class is non-NULL, + * this is an AVOptions-enabled struct. + */ + void *priv_data; + + /** + * Parameters of the input stream. Set by the caller before av_bsf_init(). + */ + AVCodecParameters *par_in; + + /** + * Parameters of the output stream. Set by the filter in av_bsf_init(). + */ + AVCodecParameters *par_out; + + /** + * The timebase used for the timestamps of the input packets. Set by the + * caller before av_bsf_init(). + */ + AVRational time_base_in; + + /** + * The timebase used for the timestamps of the output packets. Set by the + * filter in av_bsf_init(). + */ + AVRational time_base_out; +} AVBSFContext; + +typedef struct AVBitStreamFilter { + const char *name; + + /** + * A list of codec ids supported by the filter, terminated by + * AV_CODEC_ID_NONE. + * May be NULL, in that case the bitstream filter works with any codec id. + */ + const enum AVCodecID *codec_ids; + + /** + * A class for the private data, used to declare bitstream filter private + * AVOptions. This field is NULL for bitstream filters that do not declare + * any options. + * + * If this field is non-NULL, the first member of the filter private data + * must be a pointer to AVClass, which will be set by libavcodec generic + * code to this class. + */ + const AVClass *priv_class; + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavcodec and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + int priv_data_size; + int (*init)(AVBSFContext *ctx); + int (*filter)(AVBSFContext *ctx, AVPacket *pkt); + void (*close)(AVBSFContext *ctx); +} AVBitStreamFilter; + +#if FF_API_OLD_BSF +/** + * Register a bitstream filter. + * + * The filter will be accessible to the application code through + * av_bitstream_filter_next() or can be directly initialized with + * av_bitstream_filter_init(). + * + * @see avcodec_register_all() + */ +attribute_deprecated +void av_register_bitstream_filter(AVBitStreamFilter *bsf); + +/** + * Create and initialize a bitstream filter context given a bitstream + * filter name. + * + * The returned context must be freed with av_bitstream_filter_close(). + * + * @param name the name of the bitstream filter + * @return a bitstream filter context if a matching filter was found + * and successfully initialized, NULL otherwise + */ +attribute_deprecated +AVBitStreamFilterContext *av_bitstream_filter_init(const char *name); + +/** + * Filter bitstream. + * + * This function filters the buffer buf with size buf_size, and places the + * filtered buffer in the buffer pointed to by poutbuf. + * + * The output buffer must be freed by the caller. + * + * @param bsfc bitstream filter context created by av_bitstream_filter_init() + * @param avctx AVCodecContext accessed by the filter, may be NULL. + * If specified, this must point to the encoder context of the + * output stream the packet is sent to. + * @param args arguments which specify the filter configuration, may be NULL + * @param poutbuf pointer which is updated to point to the filtered buffer + * @param poutbuf_size pointer which is updated to the filtered buffer size in bytes + * @param buf buffer containing the data to filter + * @param buf_size size in bytes of buf + * @param keyframe set to non-zero if the buffer to filter corresponds to a key-frame packet data + * @return >= 0 in case of success, or a negative error code in case of failure + * + * If the return value is positive, an output buffer is allocated and + * is available in *poutbuf, and is distinct from the input buffer. + * + * If the return value is 0, the output buffer is not allocated and + * should be considered identical to the input buffer, or in case + * *poutbuf was set it points to the input buffer (not necessarily to + * its starting address). A special case is if *poutbuf was set to NULL and + * *poutbuf_size was set to 0, which indicates the packet should be dropped. + */ +attribute_deprecated +int av_bitstream_filter_filter(AVBitStreamFilterContext *bsfc, + AVCodecContext *avctx, const char *args, + uint8_t **poutbuf, int *poutbuf_size, + const uint8_t *buf, int buf_size, int keyframe); + +/** + * Release bitstream filter context. + * + * @param bsf the bitstream filter context created with + * av_bitstream_filter_init(), can be NULL + */ +attribute_deprecated +void av_bitstream_filter_close(AVBitStreamFilterContext *bsf); + +/** + * If f is NULL, return the first registered bitstream filter, + * if f is non-NULL, return the next registered bitstream filter + * after f, or NULL if f is the last one. + * + * This function can be used to iterate over all registered bitstream + * filters. + */ +attribute_deprecated +AVBitStreamFilter *av_bitstream_filter_next(const AVBitStreamFilter *f); +#endif + +/** + * @return a bitstream filter with the specified name or NULL if no such + * bitstream filter exists. + */ +const AVBitStreamFilter *av_bsf_get_by_name(const char *name); + +/** + * Iterate over all registered bitstream filters. + * + * @param opaque a pointer where libavcodec will store the iteration state. Must + * point to NULL to start the iteration. + * + * @return the next registered bitstream filter or NULL when the iteration is + * finished + */ +const AVBitStreamFilter *av_bsf_next(void **opaque); + +/** + * Allocate a context for a given bitstream filter. The caller must fill in the + * context parameters as described in the documentation and then call + * av_bsf_init() before sending any data to the filter. + * + * @param filter the filter for which to allocate an instance. + * @param ctx a pointer into which the pointer to the newly-allocated context + * will be written. It must be freed with av_bsf_free() after the + * filtering is done. + * + * @return 0 on success, a negative AVERROR code on failure + */ +int av_bsf_alloc(const AVBitStreamFilter *filter, AVBSFContext **ctx); + +/** + * Prepare the filter for use, after all the parameters and options have been + * set. + */ +int av_bsf_init(AVBSFContext *ctx); + +/** + * Submit a packet for filtering. + * + * After sending each packet, the filter must be completely drained by calling + * av_bsf_receive_packet() repeatedly until it returns AVERROR(EAGAIN) or + * AVERROR_EOF. + * + * @param pkt the packet to filter. The bitstream filter will take ownership of + * the packet and reset the contents of pkt. pkt is not touched if an error occurs. + * This parameter may be NULL, which signals the end of the stream (i.e. no more + * packets will be sent). That will cause the filter to output any packets it + * may have buffered internally. + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_bsf_send_packet(AVBSFContext *ctx, AVPacket *pkt); + +/** + * Retrieve a filtered packet. + * + * @param[out] pkt this struct will be filled with the contents of the filtered + * packet. It is owned by the caller and must be freed using + * av_packet_unref() when it is no longer needed. + * This parameter should be "clean" (i.e. freshly allocated + * with av_packet_alloc() or unreffed with av_packet_unref()) + * when this function is called. If this function returns + * successfully, the contents of pkt will be completely + * overwritten by the returned data. On failure, pkt is not + * touched. + * + * @return 0 on success. AVERROR(EAGAIN) if more packets need to be sent to the + * filter (using av_bsf_send_packet()) to get more output. AVERROR_EOF if there + * will be no further output from the filter. Another negative AVERROR value if + * an error occurs. + * + * @note one input packet may result in several output packets, so after sending + * a packet with av_bsf_send_packet(), this function needs to be called + * repeatedly until it stops returning 0. It is also possible for a filter to + * output fewer packets than were sent to it, so this function may return + * AVERROR(EAGAIN) immediately after a successful av_bsf_send_packet() call. + */ +int av_bsf_receive_packet(AVBSFContext *ctx, AVPacket *pkt); + +/** + * Free a bitstream filter context and everything associated with it; write NULL + * into the supplied pointer. + */ +void av_bsf_free(AVBSFContext **ctx); + +/** + * Get the AVClass for AVBSFContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *av_bsf_get_class(void); + +/* memory */ + +/** + * Same behaviour av_fast_malloc but the buffer has additional + * AV_INPUT_BUFFER_PADDING_SIZE at the end which will always be 0. + * + * In addition the whole buffer will initially and after resizes + * be 0-initialized so that no uninitialized data will ever appear. + */ +void av_fast_padded_malloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Same behaviour av_fast_padded_malloc except that buffer will always + * be 0-initialized after call. + */ +void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size); + +/** + * Encode extradata length to a buffer. Used by xiph codecs. + * + * @param s buffer to write to; must be at least (v/255+1) bytes long + * @param v size of extradata in bytes + * @return number of bytes written to the buffer. + */ +unsigned int av_xiphlacing(unsigned char *s, unsigned int v); + +#if FF_API_MISSING_SAMPLE +/** + * Log a generic warning message about a missing feature. This function is + * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) + * only, and would normally not be used by applications. + * @param[in] avc a pointer to an arbitrary struct of which the first field is + * a pointer to an AVClass struct + * @param[in] feature string containing the name of the missing feature + * @param[in] want_sample indicates if samples are wanted which exhibit this feature. + * If want_sample is non-zero, additional verbiage will be added to the log + * message which tells the user how to report samples to the development + * mailing list. + * @deprecated Use avpriv_report_missing_feature() instead. + */ +attribute_deprecated +void av_log_missing_feature(void *avc, const char *feature, int want_sample); + +/** + * Log a generic warning message asking for a sample. This function is + * intended to be used internally by FFmpeg (libavcodec, libavformat, etc.) + * only, and would normally not be used by applications. + * @param[in] avc a pointer to an arbitrary struct of which the first field is + * a pointer to an AVClass struct + * @param[in] msg string containing an optional message, or NULL if no message + * @deprecated Use avpriv_request_sample() instead. + */ +attribute_deprecated +void av_log_ask_for_sample(void *avc, const char *msg, ...) av_printf_format(2, 3); +#endif /* FF_API_MISSING_SAMPLE */ + +/** + * Register the hardware accelerator hwaccel. + */ +void av_register_hwaccel(AVHWAccel *hwaccel); + +/** + * If hwaccel is NULL, returns the first registered hardware accelerator, + * if hwaccel is non-NULL, returns the next registered hardware accelerator + * after hwaccel, or NULL if hwaccel is the last one. + */ +AVHWAccel *av_hwaccel_next(const AVHWAccel *hwaccel); + + +/** + * Lock operation used by lockmgr + */ +enum AVLockOp { + AV_LOCK_CREATE, ///< Create a mutex + AV_LOCK_OBTAIN, ///< Lock the mutex + AV_LOCK_RELEASE, ///< Unlock the mutex + AV_LOCK_DESTROY, ///< Free mutex resources +}; + +/** + * Register a user provided lock manager supporting the operations + * specified by AVLockOp. The "mutex" argument to the function points + * to a (void *) where the lockmgr should store/get a pointer to a user + * allocated mutex. It is NULL upon AV_LOCK_CREATE and equal to the + * value left by the last call for all other ops. If the lock manager is + * unable to perform the op then it should leave the mutex in the same + * state as when it was called and return a non-zero value. However, + * when called with AV_LOCK_DESTROY the mutex will always be assumed to + * have been successfully destroyed. If av_lockmgr_register succeeds + * it will return a non-negative value, if it fails it will return a + * negative value and destroy all mutex and unregister all callbacks. + * av_lockmgr_register is not thread-safe, it must be called from a + * single thread before any calls which make use of locking are used. + * + * @param cb User defined callback. av_lockmgr_register invokes calls + * to this callback and the previously registered callback. + * The callback will be used to create more than one mutex + * each of which must be backed by its own underlying locking + * mechanism (i.e. do not use a single static object to + * implement your lock manager). If cb is set to NULL the + * lockmgr will be unregistered. + */ +int av_lockmgr_register(int (*cb)(void **mutex, enum AVLockOp op)); + +/** + * Get the type of the given codec. + */ +enum AVMediaType avcodec_get_type(enum AVCodecID codec_id); + +/** + * Get the name of a codec. + * @return a static string identifying the codec; never NULL + */ +const char *avcodec_get_name(enum AVCodecID id); + +/** + * @return a positive value if s is open (i.e. avcodec_open2() was called on it + * with no corresponding avcodec_close()), 0 otherwise. + */ +int avcodec_is_open(AVCodecContext *s); + +/** + * @return a non-zero number if codec is an encoder, zero otherwise + */ +int av_codec_is_encoder(const AVCodec *codec); + +/** + * @return a non-zero number if codec is a decoder, zero otherwise + */ +int av_codec_is_decoder(const AVCodec *codec); + +/** + * @return descriptor for given codec ID or NULL if no descriptor exists. + */ +const AVCodecDescriptor *avcodec_descriptor_get(enum AVCodecID id); + +/** + * Iterate over all codec descriptors known to libavcodec. + * + * @param prev previous descriptor. NULL to get the first descriptor. + * + * @return next descriptor or NULL after the last descriptor + */ +const AVCodecDescriptor *avcodec_descriptor_next(const AVCodecDescriptor *prev); + +/** + * @return codec descriptor with the given name or NULL if no such descriptor + * exists. + */ +const AVCodecDescriptor *avcodec_descriptor_get_by_name(const char *name); + +/** + * Allocate a CPB properties structure and initialize its fields to default + * values. + * + * @param size if non-NULL, the size of the allocated struct will be written + * here. This is useful for embedding it in side data. + * + * @return the newly allocated struct or NULL on failure + */ +AVCPBProperties *av_cpb_properties_alloc(size_t *size); + +/** + * @} + */ + +#endif /* AVCODEC_AVCODEC_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/avdct.h b/third-party/FFmpeg-iOS/include/libavcodec/avdct.h new file mode 100644 index 0000000000..59408f8e71 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/avdct.h @@ -0,0 +1,84 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVDCT_H +#define AVCODEC_AVDCT_H + +#include "../libavutilopt.h" + +/** + * AVDCT context. + * @note function pointers can be NULL if the specific features have been + * disabled at build time. + */ +typedef struct AVDCT { + const AVClass *av_class; + + void (*idct)(int16_t *block /* align 16 */); + + /** + * IDCT input permutation. + * Several optimized IDCTs need a permutated input (relative to the + * normal order of the reference IDCT). + * This permutation must be performed before the idct_put/add. + * Note, normally this can be merged with the zigzag/alternate scan
+ * An example to avoid confusion: + * - (->decode coeffs -> zigzag reorder -> dequant -> reference IDCT -> ...) + * - (x -> reference DCT -> reference IDCT -> x) + * - (x -> reference DCT -> simple_mmx_perm = idct_permutation + * -> simple_idct_mmx -> x) + * - (-> decode coeffs -> zigzag reorder -> simple_mmx_perm -> dequant + * -> simple_idct_mmx -> ...) + */ + uint8_t idct_permutation[64]; + + void (*fdct)(int16_t *block /* align 16 */); + + + /** + * DCT algorithm. + * must use AVOptions to set this field. + */ + int dct_algo; + + /** + * IDCT algorithm. + * must use AVOptions to set this field. + */ + int idct_algo; + + void (*get_pixels)(int16_t *block /* align 16 */, + const uint8_t *pixels /* align 8 */, + ptrdiff_t line_size); + + int bits_per_sample; +} AVDCT; + +/** + * Allocates a AVDCT context. + * This needs to be initialized with avcodec_dct_init() after optionally + * configuring it with AVOptions. + * + * To free it use av_free() + */ +AVDCT *avcodec_dct_alloc(void); +int avcodec_dct_init(AVDCT *); + +const AVClass *avcodec_dct_get_class(void); + +#endif /* AVCODEC_AVDCT_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/avfft.h b/third-party/FFmpeg-iOS/include/libavcodec/avfft.h new file mode 100644 index 0000000000..0c0f9b8d8d --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/avfft.h @@ -0,0 +1,118 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_AVFFT_H +#define AVCODEC_AVFFT_H + +/** + * @file + * @ingroup lavc_fft + * FFT functions + */ + +/** + * @defgroup lavc_fft FFT functions + * @ingroup lavc_misc + * + * @{ + */ + +typedef float FFTSample; + +typedef struct FFTComplex { + FFTSample re, im; +} FFTComplex; + +typedef struct FFTContext FFTContext; + +/** + * Set up a complex FFT. + * @param nbits log2 of the length of the input array + * @param inverse if 0 perform the forward transform, if 1 perform the inverse + */ +FFTContext *av_fft_init(int nbits, int inverse); + +/** + * Do the permutation needed BEFORE calling ff_fft_calc(). + */ +void av_fft_permute(FFTContext *s, FFTComplex *z); + +/** + * Do a complex FFT with the parameters defined in av_fft_init(). The + * input data must be permuted before. No 1.0/sqrt(n) normalization is done. + */ +void av_fft_calc(FFTContext *s, FFTComplex *z); + +void av_fft_end(FFTContext *s); + +FFTContext *av_mdct_init(int nbits, int inverse, double scale); +void av_imdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); +void av_imdct_half(FFTContext *s, FFTSample *output, const FFTSample *input); +void av_mdct_calc(FFTContext *s, FFTSample *output, const FFTSample *input); +void av_mdct_end(FFTContext *s); + +/* Real Discrete Fourier Transform */ + +enum RDFTransformType { + DFT_R2C, + IDFT_C2R, + IDFT_R2C, + DFT_C2R, +}; + +typedef struct RDFTContext RDFTContext; + +/** + * Set up a real FFT. + * @param nbits log2 of the length of the input array + * @param trans the type of transform + */ +RDFTContext *av_rdft_init(int nbits, enum RDFTransformType trans); +void av_rdft_calc(RDFTContext *s, FFTSample *data); +void av_rdft_end(RDFTContext *s); + +/* Discrete Cosine Transform */ + +typedef struct DCTContext DCTContext; + +enum DCTTransformType { + DCT_II = 0, + DCT_III, + DCT_I, + DST_I, +}; + +/** + * Set up DCT. + * + * @param nbits size of the input array: + * (1 << nbits) for DCT-II, DCT-III and DST-I + * (1 << nbits) + 1 for DCT-I + * @param type the type of transform + * + * @note the first element of the input of DST-I is ignored + */ +DCTContext *av_dct_init(int nbits, enum DCTTransformType type); +void av_dct_calc(DCTContext *s, FFTSample *data); +void av_dct_end (DCTContext *s); + +/** + * @} + */ + +#endif /* AVCODEC_AVFFT_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/d3d11va.h b/third-party/FFmpeg-iOS/include/libavcodec/d3d11va.h new file mode 100644 index 0000000000..6816b6c1e6 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/d3d11va.h @@ -0,0 +1,112 @@ +/* + * Direct3D11 HW acceleration + * + * copyright (c) 2009 Laurent Aimar + * copyright (c) 2015 Steve Lhomme + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_D3D11VA_H +#define AVCODEC_D3D11VA_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_d3d11va + * Public libavcodec D3D11VA header. + */ + +#if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602 +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0602 +#endif + +#include +#include + +/** + * @defgroup lavc_codec_hwaccel_d3d11va Direct3D11 + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for Direct3D11 and old UVD/UVD+ ATI video cards +#define FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO 2 ///< Work around for Direct3D11 and old Intel GPUs with ClearVideo interface + +/** + * This structure is used to provides the necessary configurations and data + * to the Direct3D11 FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + * + * Use av_d3d11va_alloc_context() exclusively to allocate an AVD3D11VAContext. + */ +typedef struct AVD3D11VAContext { + /** + * D3D11 decoder object + */ + ID3D11VideoDecoder *decoder; + + /** + * D3D11 VideoContext + */ + ID3D11VideoContext *video_context; + + /** + * D3D11 configuration used to create the decoder + */ + D3D11_VIDEO_DECODER_CONFIG *cfg; + + /** + * The number of surface in the surface array + */ + unsigned surface_count; + + /** + * The array of Direct3D surfaces used to create the decoder + */ + ID3D11VideoDecoderOutputView **surface; + + /** + * A bit field configuring the workarounds needed for using the decoder + */ + uint64_t workaround; + + /** + * Private to the FFmpeg AVHWAccel implementation + */ + unsigned report_id; + + /** + * Mutex to access video_context + */ + HANDLE context_mutex; +} AVD3D11VAContext; + +/** + * Allocate an AVD3D11VAContext. + * + * @return Newly-allocated AVD3D11VAContext or NULL on failure. + */ +AVD3D11VAContext *av_d3d11va_alloc_context(void); + +/** + * @} + */ + +#endif /* AVCODEC_D3D11VA_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/dirac.h b/third-party/FFmpeg-iOS/include/libavcodec/dirac.h new file mode 100644 index 0000000000..e6d9d346d9 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/dirac.h @@ -0,0 +1,131 @@ +/* + * Copyright (C) 2007 Marco Gerards + * Copyright (C) 2009 David Conrad + * Copyright (C) 2011 Jordi Ortiz + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_DIRAC_H +#define AVCODEC_DIRAC_H + +/** + * @file + * Interface to Dirac Decoder/Encoder + * @author Marco Gerards + * @author David Conrad + * @author Jordi Ortiz + */ + +#include "avcodec.h" + +/** + * The spec limits the number of wavelet decompositions to 4 for both + * level 1 (VC-2) and 128 (long-gop default). + * 5 decompositions is the maximum before >16-bit buffers are needed. + * Schroedinger allows this for DD 9,7 and 13,7 wavelets only, limiting + * the others to 4 decompositions (or 3 for the fidelity filter). + * + * We use this instead of MAX_DECOMPOSITIONS to save some memory. + */ +#define MAX_DWT_LEVELS 5 + +/** + * Parse code values: + * + * Dirac Specification -> + * 9.6.1 Table 9.1 + * + * VC-2 Specification -> + * 10.4.1 Table 10.1 + */ + +enum DiracParseCodes { + DIRAC_PCODE_SEQ_HEADER = 0x00, + DIRAC_PCODE_END_SEQ = 0x10, + DIRAC_PCODE_AUX = 0x20, + DIRAC_PCODE_PAD = 0x30, + DIRAC_PCODE_PICTURE_CODED = 0x08, + DIRAC_PCODE_PICTURE_RAW = 0x48, + DIRAC_PCODE_PICTURE_LOW_DEL = 0xC8, + DIRAC_PCODE_PICTURE_HQ = 0xE8, + DIRAC_PCODE_INTER_NOREF_CO1 = 0x0A, + DIRAC_PCODE_INTER_NOREF_CO2 = 0x09, + DIRAC_PCODE_INTER_REF_CO1 = 0x0D, + DIRAC_PCODE_INTER_REF_CO2 = 0x0E, + DIRAC_PCODE_INTRA_REF_CO = 0x0C, + DIRAC_PCODE_INTRA_REF_RAW = 0x4C, + DIRAC_PCODE_INTRA_REF_PICT = 0xCC, + DIRAC_PCODE_MAGIC = 0x42424344, +}; + +typedef struct DiracVersionInfo { + int major; + int minor; +} DiracVersionInfo; + +typedef struct AVDiracSeqHeader { + unsigned width; + unsigned height; + uint8_t chroma_format; ///< 0: 444 1: 422 2: 420 + + uint8_t interlaced; + uint8_t top_field_first; + + uint8_t frame_rate_index; ///< index into dirac_frame_rate[] + uint8_t aspect_ratio_index; ///< index into dirac_aspect_ratio[] + + uint16_t clean_width; + uint16_t clean_height; + uint16_t clean_left_offset; + uint16_t clean_right_offset; + + uint8_t pixel_range_index; ///< index into dirac_pixel_range_presets[] + uint8_t color_spec_index; ///< index into dirac_color_spec_presets[] + + int profile; + int level; + + AVRational framerate; + AVRational sample_aspect_ratio; + + enum AVPixelFormat pix_fmt; + enum AVColorRange color_range; + enum AVColorPrimaries color_primaries; + enum AVColorTransferCharacteristic color_trc; + enum AVColorSpace colorspace; + + DiracVersionInfo version; + int bit_depth; +} AVDiracSeqHeader; + +/** + * Parse a Dirac sequence header. + * + * @param dsh this function will allocate and fill an AVDiracSeqHeader struct + * and write it into this pointer. The caller must free it with + * av_free(). + * @param buf the data buffer + * @param buf_size the size of the data buffer in bytes + * @param log_ctx if non-NULL, this function will log errors here + * @return 0 on success, a negative AVERROR code on failure + */ +int av_dirac_parse_sequence_header(AVDiracSeqHeader **dsh, + const uint8_t *buf, size_t buf_size, + void *log_ctx); + +#endif /* AVCODEC_DIRAC_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/dv_profile.h b/third-party/FFmpeg-iOS/include/libavcodec/dv_profile.h new file mode 100644 index 0000000000..121cccfb03 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/dv_profile.h @@ -0,0 +1,83 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_DV_PROFILE_H +#define AVCODEC_DV_PROFILE_H + +#include + +#include "../libavutilpixfmt.h" +#include "../libavutilrational.h" +#include "avcodec.h" + +/* minimum number of bytes to read from a DV stream in order to + * determine the profile */ +#define DV_PROFILE_BYTES (6 * 80) /* 6 DIF blocks */ + + +/* + * AVDVProfile is used to express the differences between various + * DV flavors. For now it's primarily used for differentiating + * 525/60 and 625/50, but the plans are to use it for various + * DV specs as well (e.g. SMPTE314M vs. IEC 61834). + */ +typedef struct AVDVProfile { + int dsf; /* value of the dsf in the DV header */ + int video_stype; /* stype for VAUX source pack */ + int frame_size; /* total size of one frame in bytes */ + int difseg_size; /* number of DIF segments per DIF channel */ + int n_difchan; /* number of DIF channels per frame */ + AVRational time_base; /* 1/framerate */ + int ltc_divisor; /* FPS from the LTS standpoint */ + int height; /* picture height in pixels */ + int width; /* picture width in pixels */ + AVRational sar[2]; /* sample aspect ratios for 4:3 and 16:9 */ + enum AVPixelFormat pix_fmt; /* picture pixel format */ + int bpm; /* blocks per macroblock */ + const uint8_t *block_sizes; /* AC block sizes, in bits */ + int audio_stride; /* size of audio_shuffle table */ + int audio_min_samples[3]; /* min amount of audio samples */ + /* for 48kHz, 44.1kHz and 32kHz */ + int audio_samples_dist[5]; /* how many samples are supposed to be */ + /* in each frame in a 5 frames window */ + const uint8_t (*audio_shuffle)[9]; /* PCM shuffling table */ +} AVDVProfile; + +/** + * Get a DV profile for the provided compressed frame. + * + * @param sys the profile used for the previous frame, may be NULL + * @param frame the compressed data buffer + * @param buf_size size of the buffer in bytes + * @return the DV profile for the supplied data or NULL on failure + */ +const AVDVProfile *av_dv_frame_profile(const AVDVProfile *sys, + const uint8_t *frame, unsigned buf_size); + +/** + * Get a DV profile for the provided stream parameters. + */ +const AVDVProfile *av_dv_codec_profile(int width, int height, enum AVPixelFormat pix_fmt); + +/** + * Get a DV profile for the provided stream parameters. + * The frame rate is used as a best-effort parameter. + */ +const AVDVProfile *av_dv_codec_profile2(int width, int height, enum AVPixelFormat pix_fmt, AVRational frame_rate); + +#endif /* AVCODEC_DV_PROFILE_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/dxva2.h b/third-party/FFmpeg-iOS/include/libavcodec/dxva2.h new file mode 100644 index 0000000000..22c93992f2 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/dxva2.h @@ -0,0 +1,93 @@ +/* + * DXVA2 HW acceleration + * + * copyright (c) 2009 Laurent Aimar + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_DXVA2_H +#define AVCODEC_DXVA2_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_dxva2 + * Public libavcodec DXVA2 header. + */ + +#if !defined(_WIN32_WINNT) || _WIN32_WINNT < 0x0602 +#undef _WIN32_WINNT +#define _WIN32_WINNT 0x0602 +#endif + +#include +#include +#include + +/** + * @defgroup lavc_codec_hwaccel_dxva2 DXVA2 + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define FF_DXVA2_WORKAROUND_SCALING_LIST_ZIGZAG 1 ///< Work around for DXVA2 and old UVD/UVD+ ATI video cards +#define FF_DXVA2_WORKAROUND_INTEL_CLEARVIDEO 2 ///< Work around for DXVA2 and old Intel GPUs with ClearVideo interface + +/** + * This structure is used to provides the necessary configurations and data + * to the DXVA2 FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + */ +struct dxva_context { + /** + * DXVA2 decoder object + */ + IDirectXVideoDecoder *decoder; + + /** + * DXVA2 configuration used to create the decoder + */ + const DXVA2_ConfigPictureDecode *cfg; + + /** + * The number of surface in the surface array + */ + unsigned surface_count; + + /** + * The array of Direct3D surfaces used to create the decoder + */ + LPDIRECT3DSURFACE9 *surface; + + /** + * A bit field configuring the workarounds needed for using the decoder + */ + uint64_t workaround; + + /** + * Private to the FFmpeg AVHWAccel implementation + */ + unsigned report_id; +}; + +/** + * @} + */ + +#endif /* AVCODEC_DXVA2_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/jni.h b/third-party/FFmpeg-iOS/include/libavcodec/jni.h new file mode 100644 index 0000000000..dd99e92611 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/jni.h @@ -0,0 +1,46 @@ +/* + * JNI public API functions + * + * Copyright (c) 2015-2016 Matthieu Bouron + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_JNI_H +#define AVCODEC_JNI_H + +/* + * Manually set a Java virtual machine which will be used to retrieve the JNI + * environment. Once a Java VM is set it cannot be changed afterwards, meaning + * you can call multiple times av_jni_set_java_vm with the same Java VM pointer + * however it will error out if you try to set a different Java VM. + * + * @param vm Java virtual machine + * @param log_ctx context used for logging, can be NULL + * @return 0 on success, < 0 otherwise + */ +int av_jni_set_java_vm(void *vm, void *log_ctx); + +/* + * Get the Java virtual machine which has been set with av_jni_set_java_vm. + * + * @param vm Java virtual machine + * @return a pointer to the Java virtual machine + */ +void *av_jni_get_java_vm(void *log_ctx); + +#endif /* AVCODEC_JNI_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/qsv.h b/third-party/FFmpeg-iOS/include/libavcodec/qsv.h new file mode 100644 index 0000000000..1fe9a54bed --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/qsv.h @@ -0,0 +1,107 @@ +/* + * Intel MediaSDK QSV public API + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_QSV_H +#define AVCODEC_QSV_H + +#include + +#include "../libavutilbuffer.h" + +/** + * This struct is used for communicating QSV parameters between libavcodec and + * the caller. It is managed by the caller and must be assigned to + * AVCodecContext.hwaccel_context. + * - decoding: hwaccel_context must be set on return from the get_format() + * callback + * - encoding: hwaccel_context must be set before avcodec_open2() + */ +typedef struct AVQSVContext { + /** + * If non-NULL, the session to use for encoding or decoding. + * Otherwise, libavcodec will try to create an internal session. + */ + mfxSession session; + + /** + * The IO pattern to use. + */ + int iopattern; + + /** + * Extra buffers to pass to encoder or decoder initialization. + */ + mfxExtBuffer **ext_buffers; + int nb_ext_buffers; + + /** + * Encoding only. If this field is set to non-zero by the caller, libavcodec + * will create an mfxExtOpaqueSurfaceAlloc extended buffer and pass it to + * the encoder initialization. This only makes sense if iopattern is also + * set to MFX_IOPATTERN_IN_OPAQUE_MEMORY. + * + * The number of allocated opaque surfaces will be the sum of the number + * required by the encoder and the user-provided value nb_opaque_surfaces. + * The array of the opaque surfaces will be exported to the caller through + * the opaque_surfaces field. + */ + int opaque_alloc; + + /** + * Encoding only, and only if opaque_alloc is set to non-zero. Before + * calling avcodec_open2(), the caller should set this field to the number + * of extra opaque surfaces to allocate beyond what is required by the + * encoder. + * + * On return from avcodec_open2(), this field will be set by libavcodec to + * the total number of allocated opaque surfaces. + */ + int nb_opaque_surfaces; + + /** + * Encoding only, and only if opaque_alloc is set to non-zero. On return + * from avcodec_open2(), this field will be used by libavcodec to export the + * array of the allocated opaque surfaces to the caller, so they can be + * passed to other parts of the pipeline. + * + * The buffer reference exported here is owned and managed by libavcodec, + * the callers should make their own reference with av_buffer_ref() and free + * it with av_buffer_unref() when it is no longer needed. + * + * The buffer data is an nb_opaque_surfaces-sized array of mfxFrameSurface1. + */ + AVBufferRef *opaque_surfaces; + + /** + * Encoding only, and only if opaque_alloc is set to non-zero. On return + * from avcodec_open2(), this field will be set to the surface type used in + * the opaque allocation request. + */ + int opaque_alloc_type; +} AVQSVContext; + +/** + * Allocate a new context. + * + * It must be freed by the caller with av_free(). + */ +AVQSVContext *av_qsv_alloc_context(void); + +#endif /* AVCODEC_QSV_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/vaapi.h b/third-party/FFmpeg-iOS/include/libavcodec/vaapi.h new file mode 100644 index 0000000000..1a64817b51 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/vaapi.h @@ -0,0 +1,189 @@ +/* + * Video Acceleration API (shared data between FFmpeg and the video player) + * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1 + * + * Copyright (C) 2008-2009 Splitted-Desktop Systems + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VAAPI_H +#define AVCODEC_VAAPI_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vaapi + * Public libavcodec VA API header. + */ + +#include +#include "../libavutilattributes.h" +#include "version.h" + +/** + * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding + * @ingroup lavc_codec_hwaccel + * @{ + */ + +/** + * This structure is used to share data between the FFmpeg library and + * the client video application. + * This shall be zero-allocated and available as + * AVCodecContext.hwaccel_context. All user members can be set once + * during initialization or through each AVCodecContext.get_buffer() + * function call. In any case, they must be valid prior to calling + * decoding functions. + */ +struct vaapi_context { + /** + * Window system dependent data + * + * - encoding: unused + * - decoding: Set by user + */ + void *display; + + /** + * Configuration ID + * + * - encoding: unused + * - decoding: Set by user + */ + uint32_t config_id; + + /** + * Context ID (video decode pipeline) + * + * - encoding: unused + * - decoding: Set by user + */ + uint32_t context_id; + +#if FF_API_VAAPI_CONTEXT + /** + * VAPictureParameterBuffer ID + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + uint32_t pic_param_buf_id; + + /** + * VAIQMatrixBuffer ID + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + uint32_t iq_matrix_buf_id; + + /** + * VABitPlaneBuffer ID (for VC-1 decoding) + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + uint32_t bitplane_buf_id; + + /** + * Slice parameter/data buffer IDs + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + uint32_t *slice_buf_ids; + + /** + * Number of effective slice buffer IDs to send to the HW + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + unsigned int n_slice_buf_ids; + + /** + * Size of pre-allocated slice_buf_ids + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + unsigned int slice_buf_ids_alloc; + + /** + * Pointer to VASliceParameterBuffers + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + void *slice_params; + + /** + * Size of a VASliceParameterBuffer element + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + unsigned int slice_param_size; + + /** + * Size of pre-allocated slice_params + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + unsigned int slice_params_alloc; + + /** + * Number of slices currently filled in + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + unsigned int slice_count; + + /** + * Pointer to slice data buffer base + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + const uint8_t *slice_data; + + /** + * Current size of slice data + * + * - encoding: unused + * - decoding: Set by libavcodec + */ + attribute_deprecated + uint32_t slice_data_size; +#endif +}; + +/* @} */ + +#endif /* AVCODEC_VAAPI_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/vda.h b/third-party/FFmpeg-iOS/include/libavcodec/vda.h new file mode 100644 index 0000000000..cb840b0021 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/vda.h @@ -0,0 +1,230 @@ +/* + * VDA HW acceleration + * + * copyright (c) 2011 Sebastien Zwickert + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VDA_H +#define AVCODEC_VDA_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vda + * Public libavcodec VDA header. + */ + +#include "../libavcodecavcodec.h" + +#include + +// emmintrin.h is unable to compile with -std=c99 -Werror=missing-prototypes +// http://openradar.appspot.com/8026390 +#undef __GNUC_STDC_INLINE__ + +#define Picture QuickdrawPicture +#include +#undef Picture + +#include "../libavcodecversion.h" + +// extra flags not defined in VDADecoder.h +enum { + kVDADecodeInfo_Asynchronous = 1UL << 0, + kVDADecodeInfo_FrameDropped = 1UL << 1 +}; + +/** + * @defgroup lavc_codec_hwaccel_vda VDA + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +/** + * This structure is used to provide the necessary configurations and data + * to the VDA FFmpeg HWAccel implementation. + * + * The application must make it available as AVCodecContext.hwaccel_context. + */ +struct vda_context { + /** + * VDA decoder object. + * + * - encoding: unused + * - decoding: Set/Unset by libavcodec. + */ + VDADecoder decoder; + + /** + * The Core Video pixel buffer that contains the current image data. + * + * encoding: unused + * decoding: Set by libavcodec. Unset by user. + */ + CVPixelBufferRef cv_buffer; + + /** + * Use the hardware decoder in synchronous mode. + * + * encoding: unused + * decoding: Set by user. + */ + int use_sync_decoding; + + /** + * The frame width. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + int width; + + /** + * The frame height. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + int height; + + /** + * The frame format. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + int format; + + /** + * The pixel format for output image buffers. + * + * - encoding: unused + * - decoding: Set/Unset by user. + */ + OSType cv_pix_fmt_type; + + /** + * unused + */ + uint8_t *priv_bitstream; + + /** + * unused + */ + int priv_bitstream_size; + + /** + * unused + */ + int priv_allocated_size; + + /** + * Use av_buffer to manage buffer. + * When the flag is set, the CVPixelBuffers returned by the decoder will + * be released automatically, so you have to retain them if necessary. + * Not setting this flag may cause memory leak. + * + * encoding: unused + * decoding: Set by user. + */ + int use_ref_buffer; +}; + +/** Create the video decoder. */ +int ff_vda_create_decoder(struct vda_context *vda_ctx, + uint8_t *extradata, + int extradata_size); + +/** Destroy the video decoder. */ +int ff_vda_destroy_decoder(struct vda_context *vda_ctx); + +/** + * This struct holds all the information that needs to be passed + * between the caller and libavcodec for initializing VDA decoding. + * Its size is not a part of the public ABI, it must be allocated with + * av_vda_alloc_context() and freed with av_free(). + */ +typedef struct AVVDAContext { + /** + * VDA decoder object. Created and freed by the caller. + */ + VDADecoder decoder; + + /** + * The output callback that must be passed to VDADecoderCreate. + * Set by av_vda_alloc_context(). + */ + VDADecoderOutputCallback output_callback; + + /** + * CVPixelBuffer Format Type that VDA will use for decoded frames; set by + * the caller. + */ + OSType cv_pix_fmt_type; +} AVVDAContext; + +/** + * Allocate and initialize a VDA context. + * + * This function should be called from the get_format() callback when the caller + * selects the AV_PIX_FMT_VDA format. The caller must then create the decoder + * object (using the output callback provided by libavcodec) that will be used + * for VDA-accelerated decoding. + * + * When decoding with VDA is finished, the caller must destroy the decoder + * object and free the VDA context using av_free(). + * + * @return the newly allocated context or NULL on failure + */ +AVVDAContext *av_vda_alloc_context(void); + +/** + * This is a convenience function that creates and sets up the VDA context using + * an internal implementation. + * + * @param avctx the corresponding codec context + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int av_vda_default_init(AVCodecContext *avctx); + +/** + * This is a convenience function that creates and sets up the VDA context using + * an internal implementation. + * + * @param avctx the corresponding codec context + * @param vdactx the VDA context to use + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int av_vda_default_init2(AVCodecContext *avctx, AVVDAContext *vdactx); + +/** + * This function must be called to free the VDA context initialized with + * av_vda_default_init(). + * + * @param avctx the corresponding codec context + */ +void av_vda_default_free(AVCodecContext *avctx); + +/** + * @} + */ + +#endif /* AVCODEC_VDA_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/vdpau.h b/third-party/FFmpeg-iOS/include/libavcodec/vdpau.h new file mode 100644 index 0000000000..5f3dbdc328 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/vdpau.h @@ -0,0 +1,253 @@ +/* + * The Video Decode and Presentation API for UNIX (VDPAU) is used for + * hardware-accelerated decoding of MPEG-1/2, H.264 and VC-1. + * + * Copyright (C) 2008 NVIDIA + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VDPAU_H +#define AVCODEC_VDPAU_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_vdpau + * Public libavcodec VDPAU header. + */ + + +/** + * @defgroup lavc_codec_hwaccel_vdpau VDPAU Decoder and Renderer + * @ingroup lavc_codec_hwaccel + * + * VDPAU hardware acceleration has two modules + * - VDPAU decoding + * - VDPAU presentation + * + * The VDPAU decoding module parses all headers using FFmpeg + * parsing mechanisms and uses VDPAU for the actual decoding. + * + * As per the current implementation, the actual decoding + * and rendering (API calls) are done as part of the VDPAU + * presentation (vo_vdpau.c) module. + * + * @{ + */ + +#include +#include +#include "../libavutilavconfig.h" +#include "../libavutilattributes.h" + +#include "avcodec.h" +#include "version.h" + +#if FF_API_BUFS_VDPAU +union AVVDPAUPictureInfo { + VdpPictureInfoH264 h264; + VdpPictureInfoMPEG1Or2 mpeg; + VdpPictureInfoVC1 vc1; + VdpPictureInfoMPEG4Part2 mpeg4; +}; +#endif + +struct AVCodecContext; +struct AVFrame; + +typedef int (*AVVDPAU_Render2)(struct AVCodecContext *, struct AVFrame *, + const VdpPictureInfo *, uint32_t, + const VdpBitstreamBuffer *); + +/** + * This structure is used to share data between the libavcodec library and + * the client video application. + * The user shall allocate the structure via the av_alloc_vdpau_hwaccel + * function and make it available as + * AVCodecContext.hwaccel_context. Members can be set by the user once + * during initialization or through each AVCodecContext.get_buffer() + * function call. In any case, they must be valid prior to calling + * decoding functions. + * + * The size of this structure is not a part of the public ABI and must not + * be used outside of libavcodec. Use av_vdpau_alloc_context() to allocate an + * AVVDPAUContext. + */ +typedef struct AVVDPAUContext { + /** + * VDPAU decoder handle + * + * Set by user. + */ + VdpDecoder decoder; + + /** + * VDPAU decoder render callback + * + * Set by the user. + */ + VdpDecoderRender *render; + +#if FF_API_BUFS_VDPAU + /** + * VDPAU picture information + * + * Set by libavcodec. + */ + attribute_deprecated + union AVVDPAUPictureInfo info; + + /** + * Allocated size of the bitstream_buffers table. + * + * Set by libavcodec. + */ + attribute_deprecated + int bitstream_buffers_allocated; + + /** + * Useful bitstream buffers in the bitstream buffers table. + * + * Set by libavcodec. + */ + attribute_deprecated + int bitstream_buffers_used; + + /** + * Table of bitstream buffers. + * The user is responsible for freeing this buffer using av_freep(). + * + * Set by libavcodec. + */ + attribute_deprecated + VdpBitstreamBuffer *bitstream_buffers; +#endif + AVVDPAU_Render2 render2; +} AVVDPAUContext; + +/** + * @brief allocation function for AVVDPAUContext + * + * Allows extending the struct without breaking API/ABI + */ +AVVDPAUContext *av_alloc_vdpaucontext(void); + +AVVDPAU_Render2 av_vdpau_hwaccel_get_render2(const AVVDPAUContext *); +void av_vdpau_hwaccel_set_render2(AVVDPAUContext *, AVVDPAU_Render2); + +/** + * Associate a VDPAU device with a codec context for hardware acceleration. + * This function is meant to be called from the get_format() codec callback, + * or earlier. It can also be called after avcodec_flush_buffers() to change + * the underlying VDPAU device mid-stream (e.g. to recover from non-transparent + * display preemption). + * + * @note get_format() must return AV_PIX_FMT_VDPAU if this function completes + * successfully. + * + * @param avctx decoding context whose get_format() callback is invoked + * @param device VDPAU device handle to use for hardware acceleration + * @param get_proc_address VDPAU device driver + * @param flags zero of more OR'd AV_HWACCEL_FLAG_* flags + * + * @return 0 on success, an AVERROR code on failure. + */ +int av_vdpau_bind_context(AVCodecContext *avctx, VdpDevice device, + VdpGetProcAddress *get_proc_address, unsigned flags); + +/** + * Gets the parameters to create an adequate VDPAU video surface for the codec + * context using VDPAU hardware decoding acceleration. + * + * @note Behavior is undefined if the context was not successfully bound to a + * VDPAU device using av_vdpau_bind_context(). + * + * @param avctx the codec context being used for decoding the stream + * @param type storage space for the VDPAU video surface chroma type + * (or NULL to ignore) + * @param width storage space for the VDPAU video surface pixel width + * (or NULL to ignore) + * @param height storage space for the VDPAU video surface pixel height + * (or NULL to ignore) + * + * @return 0 on success, a negative AVERROR code on failure. + */ +int av_vdpau_get_surface_parameters(AVCodecContext *avctx, VdpChromaType *type, + uint32_t *width, uint32_t *height); + +/** + * Allocate an AVVDPAUContext. + * + * @return Newly-allocated AVVDPAUContext or NULL on failure. + */ +AVVDPAUContext *av_vdpau_alloc_context(void); + +#if FF_API_VDPAU_PROFILE +/** + * Get a decoder profile that should be used for initializing a VDPAU decoder. + * Should be called from the AVCodecContext.get_format() callback. + * + * @deprecated Use av_vdpau_bind_context() instead. + * + * @param avctx the codec context being used for decoding the stream + * @param profile a pointer into which the result will be written on success. + * The contents of profile are undefined if this function returns + * an error. + * + * @return 0 on success (non-negative), a negative AVERROR on failure. + */ +attribute_deprecated +int av_vdpau_get_profile(AVCodecContext *avctx, VdpDecoderProfile *profile); +#endif + +#if FF_API_CAP_VDPAU +/** @brief The videoSurface is used for rendering. */ +#define FF_VDPAU_STATE_USED_FOR_RENDER 1 + +/** + * @brief The videoSurface is needed for reference/prediction. + * The codec manipulates this. + */ +#define FF_VDPAU_STATE_USED_FOR_REFERENCE 2 + +/** + * @brief This structure is used as a callback between the FFmpeg + * decoder (vd_) and presentation (vo_) module. + * This is used for defining a video frame containing surface, + * picture parameter, bitstream information etc which are passed + * between the FFmpeg decoder and its clients. + */ +struct vdpau_render_state { + VdpVideoSurface surface; ///< Used as rendered surface, never changed. + + int state; ///< Holds FF_VDPAU_STATE_* values. + + /** picture parameter information for all supported codecs */ + union AVVDPAUPictureInfo info; + + /** Describe size/location of the compressed video data. + Set to 0 when freeing bitstream_buffers. */ + int bitstream_buffers_allocated; + int bitstream_buffers_used; + /** The user is responsible for freeing this buffer using av_freep(). */ + VdpBitstreamBuffer *bitstream_buffers; +}; +#endif + +/* @}*/ + +#endif /* AVCODEC_VDPAU_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/version.h b/third-party/FFmpeg-iOS/include/libavcodec/version.h new file mode 100644 index 0000000000..d57d3cb6ed --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/version.h @@ -0,0 +1,230 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VERSION_H +#define AVCODEC_VERSION_H + +/** + * @file + * @ingroup libavc + * Libavcodec version macros. + */ + +#include "../libavutil/version.h" + +#define LIBAVCODEC_VERSION_MAJOR 57 +#define LIBAVCODEC_VERSION_MINOR 48 +#define LIBAVCODEC_VERSION_MICRO 101 + +#define LIBAVCODEC_VERSION_INT AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \ + LIBAVCODEC_VERSION_MINOR, \ + LIBAVCODEC_VERSION_MICRO) +#define LIBAVCODEC_VERSION AV_VERSION(LIBAVCODEC_VERSION_MAJOR, \ + LIBAVCODEC_VERSION_MINOR, \ + LIBAVCODEC_VERSION_MICRO) +#define LIBAVCODEC_BUILD LIBAVCODEC_VERSION_INT + +#define LIBAVCODEC_IDENT "Lavc" AV_STRINGIFY(LIBAVCODEC_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + * + * @note, when bumping the major version it is recommended to manually + * disable each FF_API_* in its own commit instead of disabling them all + * at once through the bump. This improves the git bisect-ability of the change. + */ + +#ifndef FF_API_VIMA_DECODER +#define FF_API_VIMA_DECODER (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_AUDIO_CONVERT +#define FF_API_AUDIO_CONVERT (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_AVCODEC_RESAMPLE +#define FF_API_AVCODEC_RESAMPLE FF_API_AUDIO_CONVERT +#endif +#ifndef FF_API_GETCHROMA +#define FF_API_GETCHROMA (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_MISSING_SAMPLE +#define FF_API_MISSING_SAMPLE (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_LOWRES +#define FF_API_LOWRES (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_CAP_VDPAU +#define FF_API_CAP_VDPAU (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_BUFS_VDPAU +#define FF_API_BUFS_VDPAU (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_VOXWARE +#define FF_API_VOXWARE (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_SET_DIMENSIONS +#define FF_API_SET_DIMENSIONS (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_DEBUG_MV +#define FF_API_DEBUG_MV (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_AC_VLC +#define FF_API_AC_VLC (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_OLD_MSMPEG4 +#define FF_API_OLD_MSMPEG4 (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_ASPECT_EXTENDED +#define FF_API_ASPECT_EXTENDED (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_ARCH_ALPHA +#define FF_API_ARCH_ALPHA (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_XVMC +#define FF_API_XVMC (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_ERROR_RATE +#define FF_API_ERROR_RATE (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_QSCALE_TYPE +#define FF_API_QSCALE_TYPE (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_MB_TYPE +#define FF_API_MB_TYPE (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_MAX_BFRAMES +#define FF_API_MAX_BFRAMES (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_NEG_LINESIZES +#define FF_API_NEG_LINESIZES (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_EMU_EDGE +#define FF_API_EMU_EDGE (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_ARCH_SH4 +#define FF_API_ARCH_SH4 (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_ARCH_SPARC +#define FF_API_ARCH_SPARC (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_UNUSED_MEMBERS +#define FF_API_UNUSED_MEMBERS (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_IDCT_XVIDMMX +#define FF_API_IDCT_XVIDMMX (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_INPUT_PRESERVED +#define FF_API_INPUT_PRESERVED (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_NORMALIZE_AQP +#define FF_API_NORMALIZE_AQP (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_GMC +#define FF_API_GMC (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_MV0 +#define FF_API_MV0 (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_CODEC_NAME +#define FF_API_CODEC_NAME (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_AFD +#define FF_API_AFD (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_VISMV +/* XXX: don't forget to drop the -vismv documentation */ +#define FF_API_VISMV (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_AUDIOENC_DELAY +#define FF_API_AUDIOENC_DELAY (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_VAAPI_CONTEXT +#define FF_API_VAAPI_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_AVCTX_TIMEBASE +#define FF_API_AVCTX_TIMEBASE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_MPV_OPT +#define FF_API_MPV_OPT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_STREAM_CODEC_TAG +#define FF_API_STREAM_CODEC_TAG (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_QUANT_BIAS +#define FF_API_QUANT_BIAS (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_RC_STRATEGY +#define FF_API_RC_STRATEGY (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CODED_FRAME +#define FF_API_CODED_FRAME (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_MOTION_EST +#define FF_API_MOTION_EST (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_WITHOUT_PREFIX +#define FF_API_WITHOUT_PREFIX (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_SIDEDATA_ONLY_PKT +#define FF_API_SIDEDATA_ONLY_PKT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_VDPAU_PROFILE +#define FF_API_VDPAU_PROFILE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CONVERGENCE_DURATION +#define FF_API_CONVERGENCE_DURATION (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_AVPICTURE +#define FF_API_AVPICTURE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_AVPACKET_OLD_API +#define FF_API_AVPACKET_OLD_API (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_RTP_CALLBACK +#define FF_API_RTP_CALLBACK (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_VBV_DELAY +#define FF_API_VBV_DELAY (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_CODER_TYPE +#define FF_API_CODER_TYPE (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_STAT_BITS +#define FF_API_STAT_BITS (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_PRIVATE_OPT +#define FF_API_PRIVATE_OPT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_ASS_TIMING +#define FF_API_ASS_TIMING (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_OLD_BSF +#define FF_API_OLD_BSF (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_COPY_CONTEXT +#define FF_API_COPY_CONTEXT (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_GET_CONTEXT_DEFAULTS +#define FF_API_GET_CONTEXT_DEFAULTS (LIBAVCODEC_VERSION_MAJOR < 59) +#endif +#ifndef FF_API_NVENC_OLD_NAME +#define FF_API_NVENC_OLD_NAME (LIBAVCODEC_VERSION_MAJOR < 59) +#endif + +#endif /* AVCODEC_VERSION_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/videotoolbox.h b/third-party/FFmpeg-iOS/include/libavcodec/videotoolbox.h new file mode 100644 index 0000000000..f3ffa8754d --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/videotoolbox.h @@ -0,0 +1,126 @@ +/* + * Videotoolbox hardware acceleration + * + * copyright (c) 2012 Sebastien Zwickert + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_VIDEOTOOLBOX_H +#define AVCODEC_VIDEOTOOLBOX_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_videotoolbox + * Public libavcodec Videotoolbox header. + */ + +#include + +#define Picture QuickdrawPicture +#include +#undef Picture + +#include "../libavcodecavcodec.h" + +/** + * This struct holds all the information that needs to be passed + * between the caller and libavcodec for initializing Videotoolbox decoding. + * Its size is not a part of the public ABI, it must be allocated with + * av_videotoolbox_alloc_context() and freed with av_free(). + */ +typedef struct AVVideotoolboxContext { + /** + * Videotoolbox decompression session object. + * Created and freed the caller. + */ + VTDecompressionSessionRef session; + + /** + * The output callback that must be passed to the session. + * Set by av_videottoolbox_default_init() + */ + VTDecompressionOutputCallback output_callback; + + /** + * CVPixelBuffer Format Type that Videotoolbox will use for decoded frames. + * set by the caller. + */ + OSType cv_pix_fmt_type; + + /** + * CoreMedia Format Description that Videotoolbox will use to create the decompression session. + * Set by the caller. + */ + CMVideoFormatDescriptionRef cm_fmt_desc; + + /** + * CoreMedia codec type that Videotoolbox will use to create the decompression session. + * Set by the caller. + */ + int cm_codec_type; +} AVVideotoolboxContext; + +/** + * Allocate and initialize a Videotoolbox context. + * + * This function should be called from the get_format() callback when the caller + * selects the AV_PIX_FMT_VIDETOOLBOX format. The caller must then create + * the decoder object (using the output callback provided by libavcodec) that + * will be used for Videotoolbox-accelerated decoding. + * + * When decoding with Videotoolbox is finished, the caller must destroy the decoder + * object and free the Videotoolbox context using av_free(). + * + * @return the newly allocated context or NULL on failure + */ +AVVideotoolboxContext *av_videotoolbox_alloc_context(void); + +/** + * This is a convenience function that creates and sets up the Videotoolbox context using + * an internal implementation. + * + * @param avctx the corresponding codec context + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int av_videotoolbox_default_init(AVCodecContext *avctx); + +/** + * This is a convenience function that creates and sets up the Videotoolbox context using + * an internal implementation. + * + * @param avctx the corresponding codec context + * @param vtctx the Videotoolbox context to use + * + * @return >= 0 on success, a negative AVERROR code on failure + */ +int av_videotoolbox_default_init2(AVCodecContext *avctx, AVVideotoolboxContext *vtctx); + +/** + * This function must be called to free the Videotoolbox context initialized with + * av_videotoolbox_default_init(). + * + * @param avctx the corresponding codec context + */ +void av_videotoolbox_default_free(AVCodecContext *avctx); + +/** + * @} + */ + +#endif /* AVCODEC_VIDEOTOOLBOX_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/vorbis_parser.h b/third-party/FFmpeg-iOS/include/libavcodec/vorbis_parser.h new file mode 100644 index 0000000000..92050277ed --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/vorbis_parser.h @@ -0,0 +1,77 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * A public API for Vorbis parsing + * + * Determines the duration for each packet. + */ + +#ifndef AVCODEC_VORBIS_PARSER_H +#define AVCODEC_VORBIS_PARSER_H + +#include + +typedef struct AVVorbisParseContext AVVorbisParseContext; + +/** + * Allocate and initialize the Vorbis parser using headers in the extradata. + * + * @param avctx codec context + * @param s Vorbis parser context + */ +AVVorbisParseContext *av_vorbis_parse_init(const uint8_t *extradata, + int extradata_size); + +/** + * Free the parser and everything associated with it. + */ +void av_vorbis_parse_free(AVVorbisParseContext **s); + +#define VORBIS_FLAG_HEADER 0x00000001 +#define VORBIS_FLAG_COMMENT 0x00000002 +#define VORBIS_FLAG_SETUP 0x00000004 + +/** + * Get the duration for a Vorbis packet. + * + * If @p flags is @c NULL, + * special frames are considered invalid. + * + * @param s Vorbis parser context + * @param buf buffer containing a Vorbis frame + * @param buf_size size of the buffer + * @param flags flags for special frames + */ +int av_vorbis_parse_frame_flags(AVVorbisParseContext *s, const uint8_t *buf, + int buf_size, int *flags); + +/** + * Get the duration for a Vorbis packet. + * + * @param s Vorbis parser context + * @param buf buffer containing a Vorbis frame + * @param buf_size size of the buffer + */ +int av_vorbis_parse_frame(AVVorbisParseContext *s, const uint8_t *buf, + int buf_size); + +void av_vorbis_parse_reset(AVVorbisParseContext *s); + +#endif /* AVCODEC_VORBIS_PARSER_H */ diff --git a/third-party/FFmpeg-iOS/include/libavcodec/xvmc.h b/third-party/FFmpeg-iOS/include/libavcodec/xvmc.h new file mode 100644 index 0000000000..5c0f7e5432 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavcodec/xvmc.h @@ -0,0 +1,170 @@ +/* + * Copyright (C) 2003 Ivan Kalvachev + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVCODEC_XVMC_H +#define AVCODEC_XVMC_H + +/** + * @file + * @ingroup lavc_codec_hwaccel_xvmc + * Public libavcodec XvMC header. + */ + +#include + +#include "../libavutilattributes.h" +#include "version.h" +#include "avcodec.h" + +/** + * @defgroup lavc_codec_hwaccel_xvmc XvMC + * @ingroup lavc_codec_hwaccel + * + * @{ + */ + +#define AV_XVMC_ID 0x1DC711C0 /**< special value to ensure that regular pixel routines haven't corrupted the struct + the number is 1337 speak for the letters IDCT MCo (motion compensation) */ + +struct attribute_deprecated xvmc_pix_fmt { + /** The field contains the special constant value AV_XVMC_ID. + It is used as a test that the application correctly uses the API, + and that there is no corruption caused by pixel routines. + - application - set during initialization + - libavcodec - unchanged + */ + int xvmc_id; + + /** Pointer to the block array allocated by XvMCCreateBlocks(). + The array has to be freed by XvMCDestroyBlocks(). + Each group of 64 values represents one data block of differential + pixel information (in MoCo mode) or coefficients for IDCT. + - application - set the pointer during initialization + - libavcodec - fills coefficients/pixel data into the array + */ + short* data_blocks; + + /** Pointer to the macroblock description array allocated by + XvMCCreateMacroBlocks() and freed by XvMCDestroyMacroBlocks(). + - application - set the pointer during initialization + - libavcodec - fills description data into the array + */ + XvMCMacroBlock* mv_blocks; + + /** Number of macroblock descriptions that can be stored in the mv_blocks + array. + - application - set during initialization + - libavcodec - unchanged + */ + int allocated_mv_blocks; + + /** Number of blocks that can be stored at once in the data_blocks array. + - application - set during initialization + - libavcodec - unchanged + */ + int allocated_data_blocks; + + /** Indicate that the hardware would interpret data_blocks as IDCT + coefficients and perform IDCT on them. + - application - set during initialization + - libavcodec - unchanged + */ + int idct; + + /** In MoCo mode it indicates that intra macroblocks are assumed to be in + unsigned format; same as the XVMC_INTRA_UNSIGNED flag. + - application - set during initialization + - libavcodec - unchanged + */ + int unsigned_intra; + + /** Pointer to the surface allocated by XvMCCreateSurface(). + It has to be freed by XvMCDestroySurface() on application exit. + It identifies the frame and its state on the video hardware. + - application - set during initialization + - libavcodec - unchanged + */ + XvMCSurface* p_surface; + +/** Set by the decoder before calling ff_draw_horiz_band(), + needed by the XvMCRenderSurface function. */ +//@{ + /** Pointer to the surface used as past reference + - application - unchanged + - libavcodec - set + */ + XvMCSurface* p_past_surface; + + /** Pointer to the surface used as future reference + - application - unchanged + - libavcodec - set + */ + XvMCSurface* p_future_surface; + + /** top/bottom field or frame + - application - unchanged + - libavcodec - set + */ + unsigned int picture_structure; + + /** XVMC_SECOND_FIELD - 1st or 2nd field in the sequence + - application - unchanged + - libavcodec - set + */ + unsigned int flags; +//}@ + + /** Number of macroblock descriptions in the mv_blocks array + that have already been passed to the hardware. + - application - zeroes it on get_buffer(). + A successful ff_draw_horiz_band() may increment it + with filled_mb_block_num or zero both. + - libavcodec - unchanged + */ + int start_mv_blocks_num; + + /** Number of new macroblock descriptions in the mv_blocks array (after + start_mv_blocks_num) that are filled by libavcodec and have to be + passed to the hardware. + - application - zeroes it on get_buffer() or after successful + ff_draw_horiz_band(). + - libavcodec - increment with one of each stored MB + */ + int filled_mv_blocks_num; + + /** Number of the next free data block; one data block consists of + 64 short values in the data_blocks array. + All blocks before this one have already been claimed by placing their + position into the corresponding block description structure field, + that are part of the mv_blocks array. + - application - zeroes it on get_buffer(). + A successful ff_draw_horiz_band() may zero it together + with start_mb_blocks_num. + - libavcodec - each decoded macroblock increases it by the number + of coded blocks it contains. + */ + int next_free_data_block_num; +}; + +/** + * @} + */ + +#endif /* AVCODEC_XVMC_H */ diff --git a/third-party/FFmpeg-iOS/include/libavformat/avformat.h b/third-party/FFmpeg-iOS/include/libavformat/avformat.h new file mode 100644 index 0000000000..e21ba8858a --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavformat/avformat.h @@ -0,0 +1,2886 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_AVFORMAT_H +#define AVFORMAT_AVFORMAT_H + +/** + * @file + * @ingroup libavf + * Main libavformat public API header + */ + +/** + * @defgroup libavf I/O and Muxing/Demuxing Library + * @{ + * + * Libavformat (lavf) is a library for dealing with various media container + * formats. Its main two purposes are demuxing - i.e. splitting a media file + * into component streams, and the reverse process of muxing - writing supplied + * data in a specified container format. It also has an @ref lavf_io + * "I/O module" which supports a number of protocols for accessing the data (e.g. + * file, tcp, http and others). Before using lavf, you need to call + * av_register_all() to register all compiled muxers, demuxers and protocols. + * Unless you are absolutely sure you won't use libavformat's network + * capabilities, you should also call avformat_network_init(). + * + * A supported input format is described by an AVInputFormat struct, conversely + * an output format is described by AVOutputFormat. You can iterate over all + * registered input/output formats using the av_iformat_next() / + * av_oformat_next() functions. The protocols layer is not part of the public + * API, so you can only get the names of supported protocols with the + * avio_enum_protocols() function. + * + * Main lavf structure used for both muxing and demuxing is AVFormatContext, + * which exports all information about the file being read or written. As with + * most Libavformat structures, its size is not part of public ABI, so it cannot be + * allocated on stack or directly with av_malloc(). To create an + * AVFormatContext, use avformat_alloc_context() (some functions, like + * avformat_open_input() might do that for you). + * + * Most importantly an AVFormatContext contains: + * @li the @ref AVFormatContext.iformat "input" or @ref AVFormatContext.oformat + * "output" format. It is either autodetected or set by user for input; + * always set by user for output. + * @li an @ref AVFormatContext.streams "array" of AVStreams, which describe all + * elementary streams stored in the file. AVStreams are typically referred to + * using their index in this array. + * @li an @ref AVFormatContext.pb "I/O context". It is either opened by lavf or + * set by user for input, always set by user for output (unless you are dealing + * with an AVFMT_NOFILE format). + * + * @section lavf_options Passing options to (de)muxers + * It is possible to configure lavf muxers and demuxers using the @ref avoptions + * mechanism. Generic (format-independent) libavformat options are provided by + * AVFormatContext, they can be examined from a user program by calling + * av_opt_next() / av_opt_find() on an allocated AVFormatContext (or its AVClass + * from avformat_get_class()). Private (format-specific) options are provided by + * AVFormatContext.priv_data if and only if AVInputFormat.priv_class / + * AVOutputFormat.priv_class of the corresponding format struct is non-NULL. + * Further options may be provided by the @ref AVFormatContext.pb "I/O context", + * if its AVClass is non-NULL, and the protocols layer. See the discussion on + * nesting in @ref avoptions documentation to learn how to access those. + * + * @section urls + * URL strings in libavformat are made of a scheme/protocol, a ':', and a + * scheme specific string. URLs without a scheme and ':' used for local files + * are supported but deprecated. "file:" should be used for local files. + * + * It is important that the scheme string is not taken from untrusted + * sources without checks. + * + * Note that some schemes/protocols are quite powerful, allowing access to + * both local and remote files, parts of them, concatenations of them, local + * audio and video devices and so on. + * + * @defgroup lavf_decoding Demuxing + * @{ + * Demuxers read a media file and split it into chunks of data (@em packets). A + * @ref AVPacket "packet" contains one or more encoded frames which belongs to a + * single elementary stream. In the lavf API this process is represented by the + * avformat_open_input() function for opening a file, av_read_frame() for + * reading a single packet and finally avformat_close_input(), which does the + * cleanup. + * + * @section lavf_decoding_open Opening a media file + * The minimum information required to open a file is its URL, which + * is passed to avformat_open_input(), as in the following code: + * @code + * const char *url = "file:in.mp3"; + * AVFormatContext *s = NULL; + * int ret = avformat_open_input(&s, url, NULL, NULL); + * if (ret < 0) + * abort(); + * @endcode + * The above code attempts to allocate an AVFormatContext, open the + * specified file (autodetecting the format) and read the header, exporting the + * information stored there into s. Some formats do not have a header or do not + * store enough information there, so it is recommended that you call the + * avformat_find_stream_info() function which tries to read and decode a few + * frames to find missing information. + * + * In some cases you might want to preallocate an AVFormatContext yourself with + * avformat_alloc_context() and do some tweaking on it before passing it to + * avformat_open_input(). One such case is when you want to use custom functions + * for reading input data instead of lavf internal I/O layer. + * To do that, create your own AVIOContext with avio_alloc_context(), passing + * your reading callbacks to it. Then set the @em pb field of your + * AVFormatContext to newly created AVIOContext. + * + * Since the format of the opened file is in general not known until after + * avformat_open_input() has returned, it is not possible to set demuxer private + * options on a preallocated context. Instead, the options should be passed to + * avformat_open_input() wrapped in an AVDictionary: + * @code + * AVDictionary *options = NULL; + * av_dict_set(&options, "video_size", "640x480", 0); + * av_dict_set(&options, "pixel_format", "rgb24", 0); + * + * if (avformat_open_input(&s, url, NULL, &options) < 0) + * abort(); + * av_dict_free(&options); + * @endcode + * This code passes the private options 'video_size' and 'pixel_format' to the + * demuxer. They would be necessary for e.g. the rawvideo demuxer, since it + * cannot know how to interpret raw video data otherwise. If the format turns + * out to be something different than raw video, those options will not be + * recognized by the demuxer and therefore will not be applied. Such unrecognized + * options are then returned in the options dictionary (recognized options are + * consumed). The calling program can handle such unrecognized options as it + * wishes, e.g. + * @code + * AVDictionaryEntry *e; + * if (e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX)) { + * fprintf(stderr, "Option %s not recognized by the demuxer.\n", e->key); + * abort(); + * } + * @endcode + * + * After you have finished reading the file, you must close it with + * avformat_close_input(). It will free everything associated with the file. + * + * @section lavf_decoding_read Reading from an opened file + * Reading data from an opened AVFormatContext is done by repeatedly calling + * av_read_frame() on it. Each call, if successful, will return an AVPacket + * containing encoded data for one AVStream, identified by + * AVPacket.stream_index. This packet may be passed straight into the libavcodec + * decoding functions avcodec_send_packet() or avcodec_decode_subtitle2() if the + * caller wishes to decode the data. + * + * AVPacket.pts, AVPacket.dts and AVPacket.duration timing information will be + * set if known. They may also be unset (i.e. AV_NOPTS_VALUE for + * pts/dts, 0 for duration) if the stream does not provide them. The timing + * information will be in AVStream.time_base units, i.e. it has to be + * multiplied by the timebase to convert them to seconds. + * + * If AVPacket.buf is set on the returned packet, then the packet is + * allocated dynamically and the user may keep it indefinitely. + * Otherwise, if AVPacket.buf is NULL, the packet data is backed by a + * static storage somewhere inside the demuxer and the packet is only valid + * until the next av_read_frame() call or closing the file. If the caller + * requires a longer lifetime, av_dup_packet() will make an av_malloc()ed copy + * of it. + * In both cases, the packet must be freed with av_packet_unref() when it is no + * longer needed. + * + * @section lavf_decoding_seek Seeking + * @} + * + * @defgroup lavf_encoding Muxing + * @{ + * Muxers take encoded data in the form of @ref AVPacket "AVPackets" and write + * it into files or other output bytestreams in the specified container format. + * + * The main API functions for muxing are avformat_write_header() for writing the + * file header, av_write_frame() / av_interleaved_write_frame() for writing the + * packets and av_write_trailer() for finalizing the file. + * + * At the beginning of the muxing process, the caller must first call + * avformat_alloc_context() to create a muxing context. The caller then sets up + * the muxer by filling the various fields in this context: + * + * - The @ref AVFormatContext.oformat "oformat" field must be set to select the + * muxer that will be used. + * - Unless the format is of the AVFMT_NOFILE type, the @ref AVFormatContext.pb + * "pb" field must be set to an opened IO context, either returned from + * avio_open2() or a custom one. + * - Unless the format is of the AVFMT_NOSTREAMS type, at least one stream must + * be created with the avformat_new_stream() function. The caller should fill + * the @ref AVStream.codecpar "stream codec parameters" information, such as the + * codec @ref AVCodecParameters.codec_type "type", @ref AVCodecParameters.codec_id + * "id" and other parameters (e.g. width / height, the pixel or sample format, + * etc.) as known. The @ref AVStream.time_base "stream timebase" should + * be set to the timebase that the caller desires to use for this stream (note + * that the timebase actually used by the muxer can be different, as will be + * described later). + * - It is advised to manually initialize only the relevant fields in + * AVCodecParameters, rather than using @ref avcodec_parameters_copy() during + * remuxing: there is no guarantee that the codec context values remain valid + * for both input and output format contexts. + * - The caller may fill in additional information, such as @ref + * AVFormatContext.metadata "global" or @ref AVStream.metadata "per-stream" + * metadata, @ref AVFormatContext.chapters "chapters", @ref + * AVFormatContext.programs "programs", etc. as described in the + * AVFormatContext documentation. Whether such information will actually be + * stored in the output depends on what the container format and the muxer + * support. + * + * When the muxing context is fully set up, the caller must call + * avformat_write_header() to initialize the muxer internals and write the file + * header. Whether anything actually is written to the IO context at this step + * depends on the muxer, but this function must always be called. Any muxer + * private options must be passed in the options parameter to this function. + * + * The data is then sent to the muxer by repeatedly calling av_write_frame() or + * av_interleaved_write_frame() (consult those functions' documentation for + * discussion on the difference between them; only one of them may be used with + * a single muxing context, they should not be mixed). Do note that the timing + * information on the packets sent to the muxer must be in the corresponding + * AVStream's timebase. That timebase is set by the muxer (in the + * avformat_write_header() step) and may be different from the timebase + * requested by the caller. + * + * Once all the data has been written, the caller must call av_write_trailer() + * to flush any buffered packets and finalize the output file, then close the IO + * context (if any) and finally free the muxing context with + * avformat_free_context(). + * @} + * + * @defgroup lavf_io I/O Read/Write + * @{ + * @section lavf_io_dirlist Directory listing + * The directory listing API makes it possible to list files on remote servers. + * + * Some of possible use cases: + * - an "open file" dialog to choose files from a remote location, + * - a recursive media finder providing a player with an ability to play all + * files from a given directory. + * + * @subsection lavf_io_dirlist_open Opening a directory + * At first, a directory needs to be opened by calling avio_open_dir() + * supplied with a URL and, optionally, ::AVDictionary containing + * protocol-specific parameters. The function returns zero or positive + * integer and allocates AVIODirContext on success. + * + * @code + * AVIODirContext *ctx = NULL; + * if (avio_open_dir(&ctx, "smb://example.com/some_dir", NULL) < 0) { + * fprintf(stderr, "Cannot open directory.\n"); + * abort(); + * } + * @endcode + * + * This code tries to open a sample directory using smb protocol without + * any additional parameters. + * + * @subsection lavf_io_dirlist_read Reading entries + * Each directory's entry (i.e. file, another directory, anything else + * within ::AVIODirEntryType) is represented by AVIODirEntry. + * Reading consecutive entries from an opened AVIODirContext is done by + * repeatedly calling avio_read_dir() on it. Each call returns zero or + * positive integer if successful. Reading can be stopped right after the + * NULL entry has been read -- it means there are no entries left to be + * read. The following code reads all entries from a directory associated + * with ctx and prints their names to standard output. + * @code + * AVIODirEntry *entry = NULL; + * for (;;) { + * if (avio_read_dir(ctx, &entry) < 0) { + * fprintf(stderr, "Cannot list directory.\n"); + * abort(); + * } + * if (!entry) + * break; + * printf("%s\n", entry->name); + * avio_free_directory_entry(&entry); + * } + * @endcode + * @} + * + * @defgroup lavf_codec Demuxers + * @{ + * @defgroup lavf_codec_native Native Demuxers + * @{ + * @} + * @defgroup lavf_codec_wrappers External library wrappers + * @{ + * @} + * @} + * @defgroup lavf_protos I/O Protocols + * @{ + * @} + * @defgroup lavf_internal Internal + * @{ + * @} + * @} + */ + +#include +#include /* FILE */ +#include "../libavcodec/avcodec.h" +#include "../libavutil/dict.h" +#include "../libavutil/log.h" + +#include "avio.h" +#include "../libavformat/version.h" + +struct AVFormatContext; + +struct AVDeviceInfoList; +struct AVDeviceCapabilitiesQuery; + +/** + * @defgroup metadata_api Public Metadata API + * @{ + * @ingroup libavf + * The metadata API allows libavformat to export metadata tags to a client + * application when demuxing. Conversely it allows a client application to + * set metadata when muxing. + * + * Metadata is exported or set as pairs of key/value strings in the 'metadata' + * fields of the AVFormatContext, AVStream, AVChapter and AVProgram structs + * using the @ref lavu_dict "AVDictionary" API. Like all strings in FFmpeg, + * metadata is assumed to be UTF-8 encoded Unicode. Note that metadata + * exported by demuxers isn't checked to be valid UTF-8 in most cases. + * + * Important concepts to keep in mind: + * - Keys are unique; there can never be 2 tags with the same key. This is + * also meant semantically, i.e., a demuxer should not knowingly produce + * several keys that are literally different but semantically identical. + * E.g., key=Author5, key=Author6. In this example, all authors must be + * placed in the same tag. + * - Metadata is flat, not hierarchical; there are no subtags. If you + * want to store, e.g., the email address of the child of producer Alice + * and actor Bob, that could have key=alice_and_bobs_childs_email_address. + * - Several modifiers can be applied to the tag name. This is done by + * appending a dash character ('-') and the modifier name in the order + * they appear in the list below -- e.g. foo-eng-sort, not foo-sort-eng. + * - language -- a tag whose value is localized for a particular language + * is appended with the ISO 639-2/B 3-letter language code. + * For example: Author-ger=Michael, Author-eng=Mike + * The original/default language is in the unqualified "Author" tag. + * A demuxer should set a default if it sets any translated tag. + * - sorting -- a modified version of a tag that should be used for + * sorting will have '-sort' appended. E.g. artist="The Beatles", + * artist-sort="Beatles, The". + * - Some protocols and demuxers support metadata updates. After a successful + * call to av_read_packet(), AVFormatContext.event_flags or AVStream.event_flags + * will be updated to indicate if metadata changed. In order to detect metadata + * changes on a stream, you need to loop through all streams in the AVFormatContext + * and check their individual event_flags. + * + * - Demuxers attempt to export metadata in a generic format, however tags + * with no generic equivalents are left as they are stored in the container. + * Follows a list of generic tag names: + * + @verbatim + album -- name of the set this work belongs to + album_artist -- main creator of the set/album, if different from artist. + e.g. "Various Artists" for compilation albums. + artist -- main creator of the work + comment -- any additional description of the file. + composer -- who composed the work, if different from artist. + copyright -- name of copyright holder. + creation_time-- date when the file was created, preferably in ISO 8601. + date -- date when the work was created, preferably in ISO 8601. + disc -- number of a subset, e.g. disc in a multi-disc collection. + encoder -- name/settings of the software/hardware that produced the file. + encoded_by -- person/group who created the file. + filename -- original name of the file. + genre -- . + language -- main language in which the work is performed, preferably + in ISO 639-2 format. Multiple languages can be specified by + separating them with commas. + performer -- artist who performed the work, if different from artist. + E.g for "Also sprach Zarathustra", artist would be "Richard + Strauss" and performer "London Philharmonic Orchestra". + publisher -- name of the label/publisher. + service_name -- name of the service in broadcasting (channel name). + service_provider -- name of the service provider in broadcasting. + title -- name of the work. + track -- number of this work in the set, can be in form current/total. + variant_bitrate -- the total bitrate of the bitrate variant that the current stream is part of + @endverbatim + * + * Look in the examples section for an application example how to use the Metadata API. + * + * @} + */ + +/* packet functions */ + + +/** + * Allocate and read the payload of a packet and initialize its + * fields with default values. + * + * @param s associated IO context + * @param pkt packet + * @param size desired payload size + * @return >0 (read size) if OK, AVERROR_xxx otherwise + */ +int av_get_packet(AVIOContext *s, AVPacket *pkt, int size); + + +/** + * Read data and append it to the current content of the AVPacket. + * If pkt->size is 0 this is identical to av_get_packet. + * Note that this uses av_grow_packet and thus involves a realloc + * which is inefficient. Thus this function should only be used + * when there is no reasonable way to know (an upper bound of) + * the final size. + * + * @param s associated IO context + * @param pkt packet + * @param size amount of data to read + * @return >0 (read size) if OK, AVERROR_xxx otherwise, previous data + * will not be lost even if an error occurs. + */ +int av_append_packet(AVIOContext *s, AVPacket *pkt, int size); + +#if FF_API_LAVF_FRAC +/*************************************************/ +/* fractional numbers for exact pts handling */ + +/** + * The exact value of the fractional number is: 'val + num / den'. + * num is assumed to be 0 <= num < den. + */ +typedef struct AVFrac { + int64_t val, num, den; +} AVFrac; +#endif + +/*************************************************/ +/* input/output formats */ + +struct AVCodecTag; + +/** + * This structure contains the data a format has to probe a file. + */ +typedef struct AVProbeData { + const char *filename; + unsigned char *buf; /**< Buffer must have AVPROBE_PADDING_SIZE of extra allocated bytes filled with zero. */ + int buf_size; /**< Size of buf except extra allocated bytes */ + const char *mime_type; /**< mime_type, when known. */ +} AVProbeData; + +#define AVPROBE_SCORE_RETRY (AVPROBE_SCORE_MAX/4) +#define AVPROBE_SCORE_STREAM_RETRY (AVPROBE_SCORE_MAX/4-1) + +#define AVPROBE_SCORE_EXTENSION 50 ///< score for file extension +#define AVPROBE_SCORE_MIME 75 ///< score for file mime type +#define AVPROBE_SCORE_MAX 100 ///< maximum score + +#define AVPROBE_PADDING_SIZE 32 ///< extra allocated bytes at the end of the probe buffer + +/// Demuxer will use avio_open, no opened file should be provided by the caller. +#define AVFMT_NOFILE 0x0001 +#define AVFMT_NEEDNUMBER 0x0002 /**< Needs '%d' in filename. */ +#define AVFMT_SHOW_IDS 0x0008 /**< Show format stream IDs numbers. */ +#if FF_API_LAVF_FMT_RAWPICTURE +#define AVFMT_RAWPICTURE 0x0020 /**< Format wants AVPicture structure for + raw picture data. @deprecated Not used anymore */ +#endif +#define AVFMT_GLOBALHEADER 0x0040 /**< Format wants global header. */ +#define AVFMT_NOTIMESTAMPS 0x0080 /**< Format does not need / have any timestamps. */ +#define AVFMT_GENERIC_INDEX 0x0100 /**< Use generic index building code. */ +#define AVFMT_TS_DISCONT 0x0200 /**< Format allows timestamp discontinuities. Note, muxers always require valid (monotone) timestamps */ +#define AVFMT_VARIABLE_FPS 0x0400 /**< Format allows variable fps. */ +#define AVFMT_NODIMENSIONS 0x0800 /**< Format does not need width/height */ +#define AVFMT_NOSTREAMS 0x1000 /**< Format does not require any streams */ +#define AVFMT_NOBINSEARCH 0x2000 /**< Format does not allow to fall back on binary search via read_timestamp */ +#define AVFMT_NOGENSEARCH 0x4000 /**< Format does not allow to fall back on generic search */ +#define AVFMT_NO_BYTE_SEEK 0x8000 /**< Format does not allow seeking by bytes */ +#define AVFMT_ALLOW_FLUSH 0x10000 /**< Format allows flushing. If not set, the muxer will not receive a NULL packet in the write_packet function. */ +#define AVFMT_TS_NONSTRICT 0x20000 /**< Format does not require strictly + increasing timestamps, but they must + still be monotonic */ +#define AVFMT_TS_NEGATIVE 0x40000 /**< Format allows muxing negative + timestamps. If not set the timestamp + will be shifted in av_write_frame and + av_interleaved_write_frame so they + start from 0. + The user or muxer can override this through + AVFormatContext.avoid_negative_ts + */ + +#define AVFMT_SEEK_TO_PTS 0x4000000 /**< Seeking is based on PTS */ + +/** + * @addtogroup lavf_encoding + * @{ + */ +typedef struct AVOutputFormat { + const char *name; + /** + * Descriptive name for the format, meant to be more human-readable + * than name. You should use the NULL_IF_CONFIG_SMALL() macro + * to define it. + */ + const char *long_name; + const char *mime_type; + const char *extensions; /**< comma-separated filename extensions */ + /* output support */ + enum AVCodecID audio_codec; /**< default audio codec */ + enum AVCodecID video_codec; /**< default video codec */ + enum AVCodecID subtitle_codec; /**< default subtitle codec */ + /** + * can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, + * AVFMT_GLOBALHEADER, AVFMT_NOTIMESTAMPS, AVFMT_VARIABLE_FPS, + * AVFMT_NODIMENSIONS, AVFMT_NOSTREAMS, AVFMT_ALLOW_FLUSH, + * AVFMT_TS_NONSTRICT, AVFMT_TS_NEGATIVE + */ + int flags; + + /** + * List of supported codec_id-codec_tag pairs, ordered by "better + * choice first". The arrays are all terminated by AV_CODEC_ID_NONE. + */ + const struct AVCodecTag * const *codec_tag; + + + const AVClass *priv_class; ///< AVClass for the private context + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVOutputFormat *next; + /** + * size of private data so that it can be allocated in the wrapper + */ + int priv_data_size; + + int (*write_header)(struct AVFormatContext *); + /** + * Write a packet. If AVFMT_ALLOW_FLUSH is set in flags, + * pkt can be NULL in order to flush data buffered in the muxer. + * When flushing, return 0 if there still is more data to flush, + * or 1 if everything was flushed and there is no more buffered + * data. + */ + int (*write_packet)(struct AVFormatContext *, AVPacket *pkt); + int (*write_trailer)(struct AVFormatContext *); + /** + * Currently only used to set pixel format if not YUV420P. + */ + int (*interleave_packet)(struct AVFormatContext *, AVPacket *out, + AVPacket *in, int flush); + /** + * Test if the given codec can be stored in this container. + * + * @return 1 if the codec is supported, 0 if it is not. + * A negative number if unknown. + * MKTAG('A', 'P', 'I', 'C') if the codec is only supported as AV_DISPOSITION_ATTACHED_PIC + */ + int (*query_codec)(enum AVCodecID id, int std_compliance); + + void (*get_output_timestamp)(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); + /** + * Allows sending messages from application to device. + */ + int (*control_message)(struct AVFormatContext *s, int type, + void *data, size_t data_size); + + /** + * Write an uncoded AVFrame. + * + * See av_write_uncoded_frame() for details. + * + * The library will free *frame afterwards, but the muxer can prevent it + * by setting the pointer to NULL. + */ + int (*write_uncoded_frame)(struct AVFormatContext *, int stream_index, + AVFrame **frame, unsigned flags); + /** + * Returns device list with it properties. + * @see avdevice_list_devices() for more details. + */ + int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list); + /** + * Initialize device capabilities submodule. + * @see avdevice_capabilities_create() for more details. + */ + int (*create_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); + /** + * Free device capabilities submodule. + * @see avdevice_capabilities_free() for more details. + */ + int (*free_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); + enum AVCodecID data_codec; /**< default data codec */ + /** + * Initialize format. May allocate data here, and set any AVFormatContext or + * AVStream parameters that need to be set before packets are sent. + * This method must not write output. + * + * Any allocations made here must be freed in deinit(). + */ + int (*init)(struct AVFormatContext *); + /** + * Deinitialize format. If present, this is called whenever the muxer is being + * destroyed, regardless of whether or not the header has been written. + * + * If a trailer is being written, this is called after write_trailer(). + * + * This is called if init() fails as well. + */ + void (*deinit)(struct AVFormatContext *); + /** + * Set up any necessary bitstream filtering and extract any extra data needed + * for the global header. + * Return 0 if more packets from this stream must be checked; 1 if not. + */ + int (*check_bitstream)(struct AVFormatContext *, const AVPacket *pkt); +} AVOutputFormat; +/** + * @} + */ + +/** + * @addtogroup lavf_decoding + * @{ + */ +typedef struct AVInputFormat { + /** + * A comma separated list of short names for the format. New names + * may be appended with a minor bump. + */ + const char *name; + + /** + * Descriptive name for the format, meant to be more human-readable + * than name. You should use the NULL_IF_CONFIG_SMALL() macro + * to define it. + */ + const char *long_name; + + /** + * Can use flags: AVFMT_NOFILE, AVFMT_NEEDNUMBER, AVFMT_SHOW_IDS, + * AVFMT_GENERIC_INDEX, AVFMT_TS_DISCONT, AVFMT_NOBINSEARCH, + * AVFMT_NOGENSEARCH, AVFMT_NO_BYTE_SEEK, AVFMT_SEEK_TO_PTS. + */ + int flags; + + /** + * If extensions are defined, then no probe is done. You should + * usually not use extension format guessing because it is not + * reliable enough + */ + const char *extensions; + + const struct AVCodecTag * const *codec_tag; + + const AVClass *priv_class; ///< AVClass for the private context + + /** + * Comma-separated list of mime types. + * It is used check for matching mime types while probing. + * @see av_probe_input_format2 + */ + const char *mime_type; + + /***************************************************************** + * No fields below this line are part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + struct AVInputFormat *next; + + /** + * Raw demuxers store their codec ID here. + */ + int raw_codec_id; + + /** + * Size of private data so that it can be allocated in the wrapper. + */ + int priv_data_size; + + /** + * Tell if a given file has a chance of being parsed as this format. + * The buffer provided is guaranteed to be AVPROBE_PADDING_SIZE bytes + * big so you do not have to check for that unless you need more. + */ + int (*read_probe)(AVProbeData *); + + /** + * Read the format header and initialize the AVFormatContext + * structure. Return 0 if OK. 'avformat_new_stream' should be + * called to create new streams. + */ + int (*read_header)(struct AVFormatContext *); + + /** + * Read one packet and put it in 'pkt'. pts and flags are also + * set. 'avformat_new_stream' can be called only if the flag + * AVFMTCTX_NOHEADER is used and only in the calling thread (not in a + * background thread). + * @return 0 on success, < 0 on error. + * When returning an error, pkt must not have been allocated + * or must be freed before returning + */ + int (*read_packet)(struct AVFormatContext *, AVPacket *pkt); + + /** + * Close the stream. The AVFormatContext and AVStreams are not + * freed by this function + */ + int (*read_close)(struct AVFormatContext *); + + /** + * Seek to a given timestamp relative to the frames in + * stream component stream_index. + * @param stream_index Must not be -1. + * @param flags Selects which direction should be preferred if no exact + * match is available. + * @return >= 0 on success (but not necessarily the new offset) + */ + int (*read_seek)(struct AVFormatContext *, + int stream_index, int64_t timestamp, int flags); + + /** + * Get the next timestamp in stream[stream_index].time_base units. + * @return the timestamp or AV_NOPTS_VALUE if an error occurred + */ + int64_t (*read_timestamp)(struct AVFormatContext *s, int stream_index, + int64_t *pos, int64_t pos_limit); + + /** + * Start/resume playing - only meaningful if using a network-based format + * (RTSP). + */ + int (*read_play)(struct AVFormatContext *); + + /** + * Pause playing - only meaningful if using a network-based format + * (RTSP). + */ + int (*read_pause)(struct AVFormatContext *); + + /** + * Seek to timestamp ts. + * Seeking will be done so that the point from which all active streams + * can be presented successfully will be closest to ts and within min/max_ts. + * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + */ + int (*read_seek2)(struct AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); + + /** + * Returns device list with it properties. + * @see avdevice_list_devices() for more details. + */ + int (*get_device_list)(struct AVFormatContext *s, struct AVDeviceInfoList *device_list); + + /** + * Initialize device capabilities submodule. + * @see avdevice_capabilities_create() for more details. + */ + int (*create_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); + + /** + * Free device capabilities submodule. + * @see avdevice_capabilities_free() for more details. + */ + int (*free_device_capabilities)(struct AVFormatContext *s, struct AVDeviceCapabilitiesQuery *caps); +} AVInputFormat; +/** + * @} + */ + +enum AVStreamParseType { + AVSTREAM_PARSE_NONE, + AVSTREAM_PARSE_FULL, /**< full parsing and repack */ + AVSTREAM_PARSE_HEADERS, /**< Only parse headers, do not repack. */ + AVSTREAM_PARSE_TIMESTAMPS, /**< full parsing and interpolation of timestamps for frames not starting on a packet boundary */ + AVSTREAM_PARSE_FULL_ONCE, /**< full parsing and repack of the first frame only, only implemented for H.264 currently */ + AVSTREAM_PARSE_FULL_RAW=MKTAG(0,'R','A','W'), /**< full parsing and repack with timestamp and position generation by parser for raw + this assumes that each packet in the file contains no demuxer level headers and + just codec level data, otherwise position generation would fail */ +}; + +typedef struct AVIndexEntry { + int64_t pos; + int64_t timestamp; /**< + * Timestamp in AVStream.time_base units, preferably the time from which on correctly decoded frames are available + * when seeking to this entry. That means preferable PTS on keyframe based formats. + * But demuxers can choose to store a different timestamp, if it is more convenient for the implementation or nothing better + * is known + */ +#define AVINDEX_KEYFRAME 0x0001 + int flags:2; + int size:30; //Yeah, trying to keep the size of this small to reduce memory requirements (it is 24 vs. 32 bytes due to possible 8-byte alignment). + int min_distance; /**< Minimum distance between this and the previous keyframe, used to avoid unneeded searching. */ +} AVIndexEntry; + +#define AV_DISPOSITION_DEFAULT 0x0001 +#define AV_DISPOSITION_DUB 0x0002 +#define AV_DISPOSITION_ORIGINAL 0x0004 +#define AV_DISPOSITION_COMMENT 0x0008 +#define AV_DISPOSITION_LYRICS 0x0010 +#define AV_DISPOSITION_KARAOKE 0x0020 + +/** + * Track should be used during playback by default. + * Useful for subtitle track that should be displayed + * even when user did not explicitly ask for subtitles. + */ +#define AV_DISPOSITION_FORCED 0x0040 +#define AV_DISPOSITION_HEARING_IMPAIRED 0x0080 /**< stream for hearing impaired audiences */ +#define AV_DISPOSITION_VISUAL_IMPAIRED 0x0100 /**< stream for visual impaired audiences */ +#define AV_DISPOSITION_CLEAN_EFFECTS 0x0200 /**< stream without voice */ +/** + * The stream is stored in the file as an attached picture/"cover art" (e.g. + * APIC frame in ID3v2). The single packet associated with it will be returned + * among the first few packets read from the file unless seeking takes place. + * It can also be accessed at any time in AVStream.attached_pic. + */ +#define AV_DISPOSITION_ATTACHED_PIC 0x0400 + +typedef struct AVStreamInternal AVStreamInternal; + +/** + * To specify text track kind (different from subtitles default). + */ +#define AV_DISPOSITION_CAPTIONS 0x10000 +#define AV_DISPOSITION_DESCRIPTIONS 0x20000 +#define AV_DISPOSITION_METADATA 0x40000 + +/** + * Options for behavior on timestamp wrap detection. + */ +#define AV_PTS_WRAP_IGNORE 0 ///< ignore the wrap +#define AV_PTS_WRAP_ADD_OFFSET 1 ///< add the format specific offset on wrap detection +#define AV_PTS_WRAP_SUB_OFFSET -1 ///< subtract the format specific offset on wrap detection + +/** + * Stream structure. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVStream) must not be used outside libav*. + */ +typedef struct AVStream { + int index; /**< stream index in AVFormatContext */ + /** + * Format-specific stream ID. + * decoding: set by libavformat + * encoding: set by the user, replaced by libavformat if left unset + */ + int id; +#if FF_API_LAVF_AVCTX + /** + * @deprecated use the codecpar struct instead + */ + attribute_deprecated + AVCodecContext *codec; +#endif + void *priv_data; + +#if FF_API_LAVF_FRAC + /** + * @deprecated this field is unused + */ + attribute_deprecated + struct AVFrac pts; +#endif + + /** + * This is the fundamental unit of time (in seconds) in terms + * of which frame timestamps are represented. + * + * decoding: set by libavformat + * encoding: May be set by the caller before avformat_write_header() to + * provide a hint to the muxer about the desired timebase. In + * avformat_write_header(), the muxer will overwrite this field + * with the timebase that will actually be used for the timestamps + * written into the file (which may or may not be related to the + * user-provided one, depending on the format). + */ + AVRational time_base; + + /** + * Decoding: pts of the first frame of the stream in presentation order, in stream time base. + * Only set this if you are absolutely 100% sure that the value you set + * it to really is the pts of the first frame. + * This may be undefined (AV_NOPTS_VALUE). + * @note The ASF header does NOT contain a correct start_time the ASF + * demuxer must NOT set this. + */ + int64_t start_time; + + /** + * Decoding: duration of the stream, in stream time base. + * If a source file does not specify a duration, but does specify + * a bitrate, this value will be estimated from bitrate and file size. + */ + int64_t duration; + + int64_t nb_frames; ///< number of frames in this stream if known or 0 + + int disposition; /**< AV_DISPOSITION_* bit field */ + + enum AVDiscard discard; ///< Selects which packets can be discarded at will and do not need to be demuxed. + + /** + * sample aspect ratio (0 if unknown) + * - encoding: Set by user. + * - decoding: Set by libavformat. + */ + AVRational sample_aspect_ratio; + + AVDictionary *metadata; + + /** + * Average framerate + * + * - demuxing: May be set by libavformat when creating the stream or in + * avformat_find_stream_info(). + * - muxing: May be set by the caller before avformat_write_header(). + */ + AVRational avg_frame_rate; + + /** + * For streams with AV_DISPOSITION_ATTACHED_PIC disposition, this packet + * will contain the attached picture. + * + * decoding: set by libavformat, must not be modified by the caller. + * encoding: unused + */ + AVPacket attached_pic; + + /** + * An array of side data that applies to the whole stream (i.e. the + * container does not allow it to change between packets). + * + * There may be no overlap between the side data in this array and side data + * in the packets. I.e. a given side data is either exported by the muxer + * (demuxing) / set by the caller (muxing) in this array, then it never + * appears in the packets, or the side data is exported / sent through + * the packets (always in the first packet where the value becomes known or + * changes), then it does not appear in this array. + * + * - demuxing: Set by libavformat when the stream is created. + * - muxing: May be set by the caller before avformat_write_header(). + * + * Freed by libavformat in avformat_free_context(). + * + * @see av_format_inject_global_side_data() + */ + AVPacketSideData *side_data; + /** + * The number of elements in the AVStream.side_data array. + */ + int nb_side_data; + + /** + * Flags for the user to detect events happening on the stream. Flags must + * be cleared by the user once the event has been handled. + * A combination of AVSTREAM_EVENT_FLAG_*. + */ + int event_flags; +#define AVSTREAM_EVENT_FLAG_METADATA_UPDATED 0x0001 ///< The call resulted in updated metadata. + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + + /** + * Stream information used internally by av_find_stream_info() + */ +#define MAX_STD_TIMEBASES (30*12+30+3+6) + struct { + int64_t last_dts; + int64_t duration_gcd; + int duration_count; + int64_t rfps_duration_sum; + double (*duration_error)[2][MAX_STD_TIMEBASES]; + int64_t codec_info_duration; + int64_t codec_info_duration_fields; + + /** + * 0 -> decoder has not been searched for yet. + * >0 -> decoder found + * <0 -> decoder with codec_id == -found_decoder has not been found + */ + int found_decoder; + + int64_t last_duration; + + /** + * Those are used for average framerate estimation. + */ + int64_t fps_first_dts; + int fps_first_dts_idx; + int64_t fps_last_dts; + int fps_last_dts_idx; + + } *info; + + int pts_wrap_bits; /**< number of bits in pts (used for wrapping control) */ + + // Timestamp generation support: + /** + * Timestamp corresponding to the last dts sync point. + * + * Initialized when AVCodecParserContext.dts_sync_point >= 0 and + * a DTS is received from the underlying container. Otherwise set to + * AV_NOPTS_VALUE by default. + */ + int64_t first_dts; + int64_t cur_dts; + int64_t last_IP_pts; + int last_IP_duration; + + /** + * Number of packets to buffer for codec probing + */ + int probe_packets; + + /** + * Number of frames that have been demuxed during av_find_stream_info() + */ + int codec_info_nb_frames; + + /* av_read_frame() support */ + enum AVStreamParseType need_parsing; + struct AVCodecParserContext *parser; + + /** + * last packet in packet_buffer for this stream when muxing. + */ + struct AVPacketList *last_in_packet_buffer; + AVProbeData probe_data; +#define MAX_REORDER_DELAY 16 + int64_t pts_buffer[MAX_REORDER_DELAY+1]; + + AVIndexEntry *index_entries; /**< Only used if the format does not + support seeking natively. */ + int nb_index_entries; + unsigned int index_entries_allocated_size; + + /** + * Real base framerate of the stream. + * This is the lowest framerate with which all timestamps can be + * represented accurately (it is the least common multiple of all + * framerates in the stream). Note, this value is just a guess! + * For example, if the time base is 1/90000 and all frames have either + * approximately 3600 or 1800 timer ticks, then r_frame_rate will be 50/1. + * + * Code outside avformat should access this field using: + * av_stream_get/set_r_frame_rate(stream) + */ + AVRational r_frame_rate; + + /** + * Stream Identifier + * This is the MPEG-TS stream identifier +1 + * 0 means unknown + */ + int stream_identifier; + + int64_t interleaver_chunk_size; + int64_t interleaver_chunk_duration; + + /** + * stream probing state + * -1 -> probing finished + * 0 -> no probing requested + * rest -> perform probing with request_probe being the minimum score to accept. + * NOT PART OF PUBLIC API + */ + int request_probe; + /** + * Indicates that everything up to the next keyframe + * should be discarded. + */ + int skip_to_keyframe; + + /** + * Number of samples to skip at the start of the frame decoded from the next packet. + */ + int skip_samples; + + /** + * If not 0, the number of samples that should be skipped from the start of + * the stream (the samples are removed from packets with pts==0, which also + * assumes negative timestamps do not happen). + * Intended for use with formats such as mp3 with ad-hoc gapless audio + * support. + */ + int64_t start_skip_samples; + + /** + * If not 0, the first audio sample that should be discarded from the stream. + * This is broken by design (needs global sample count), but can't be + * avoided for broken by design formats such as mp3 with ad-hoc gapless + * audio support. + */ + int64_t first_discard_sample; + + /** + * The sample after last sample that is intended to be discarded after + * first_discard_sample. Works on frame boundaries only. Used to prevent + * early EOF if the gapless info is broken (considered concatenated mp3s). + */ + int64_t last_discard_sample; + + /** + * Number of internally decoded frames, used internally in libavformat, do not access + * its lifetime differs from info which is why it is not in that structure. + */ + int nb_decoded_frames; + + /** + * Timestamp offset added to timestamps before muxing + * NOT PART OF PUBLIC API + */ + int64_t mux_ts_offset; + + /** + * Internal data to check for wrapping of the time stamp + */ + int64_t pts_wrap_reference; + + /** + * Options for behavior, when a wrap is detected. + * + * Defined by AV_PTS_WRAP_ values. + * + * If correction is enabled, there are two possibilities: + * If the first time stamp is near the wrap point, the wrap offset + * will be subtracted, which will create negative time stamps. + * Otherwise the offset will be added. + */ + int pts_wrap_behavior; + + /** + * Internal data to prevent doing update_initial_durations() twice + */ + int update_initial_durations_done; + + /** + * Internal data to generate dts from pts + */ + int64_t pts_reorder_error[MAX_REORDER_DELAY+1]; + uint8_t pts_reorder_error_count[MAX_REORDER_DELAY+1]; + + /** + * Internal data to analyze DTS and detect faulty mpeg streams + */ + int64_t last_dts_for_order_check; + uint8_t dts_ordered; + uint8_t dts_misordered; + + /** + * Internal data to inject global side data + */ + int inject_global_side_data; + + /** + * String containing paris of key and values describing recommended encoder configuration. + * Paris are separated by ','. + * Keys are separated from values by '='. + */ + char *recommended_encoder_configuration; + + /** + * display aspect ratio (0 if unknown) + * - encoding: unused + * - decoding: Set by libavformat to calculate sample_aspect_ratio internally + */ + AVRational display_aspect_ratio; + + struct FFFrac *priv_pts; + + /** + * An opaque field for libavformat internal usage. + * Must not be accessed in any way by callers. + */ + AVStreamInternal *internal; + + /* + * Codec parameters associated with this stream. Allocated and freed by + * libavformat in avformat_new_stream() and avformat_free_context() + * respectively. + * + * - demuxing: filled by libavformat on stream creation or in + * avformat_find_stream_info() + * - muxing: filled by the caller before avformat_write_header() + */ + AVCodecParameters *codecpar; +} AVStream; + +AVRational av_stream_get_r_frame_rate(const AVStream *s); +void av_stream_set_r_frame_rate(AVStream *s, AVRational r); +struct AVCodecParserContext *av_stream_get_parser(const AVStream *s); +char* av_stream_get_recommended_encoder_configuration(const AVStream *s); +void av_stream_set_recommended_encoder_configuration(AVStream *s, char *configuration); + +/** + * Returns the pts of the last muxed packet + its duration + * + * the retuned value is undefined when used with a demuxer. + */ +int64_t av_stream_get_end_pts(const AVStream *st); + +#define AV_PROGRAM_RUNNING 1 + +/** + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVProgram) must not be used outside libav*. + */ +typedef struct AVProgram { + int id; + int flags; + enum AVDiscard discard; ///< selects which program to discard and which to feed to the caller + unsigned int *stream_index; + unsigned int nb_stream_indexes; + AVDictionary *metadata; + + int program_num; + int pmt_pid; + int pcr_pid; + + /***************************************************************** + * All fields below this line are not part of the public API. They + * may not be used outside of libavformat and can be changed and + * removed at will. + * New public fields should be added right above. + ***************************************************************** + */ + int64_t start_time; + int64_t end_time; + + int64_t pts_wrap_reference; ///< reference dts for wrap detection + int pts_wrap_behavior; ///< behavior on wrap detection +} AVProgram; + +#define AVFMTCTX_NOHEADER 0x0001 /**< signal that no header is present + (streams are added dynamically) */ + +typedef struct AVChapter { + int id; ///< unique ID to identify the chapter + AVRational time_base; ///< time base in which the start/end timestamps are specified + int64_t start, end; ///< chapter start/end time in time_base units + AVDictionary *metadata; +} AVChapter; + + +/** + * Callback used by devices to communicate with application. + */ +typedef int (*av_format_control_message)(struct AVFormatContext *s, int type, + void *data, size_t data_size); + +typedef int (*AVOpenCallback)(struct AVFormatContext *s, AVIOContext **pb, const char *url, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * The duration of a video can be estimated through various ways, and this enum can be used + * to know how the duration was estimated. + */ +enum AVDurationEstimationMethod { + AVFMT_DURATION_FROM_PTS, ///< Duration accurately estimated from PTSes + AVFMT_DURATION_FROM_STREAM, ///< Duration estimated from a stream with a known duration + AVFMT_DURATION_FROM_BITRATE ///< Duration estimated from bitrate (less accurate) +}; + +typedef struct AVFormatInternal AVFormatInternal; + +/** + * Format I/O context. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVFormatContext) must not be used outside libav*, use + * avformat_alloc_context() to create an AVFormatContext. + * + * Fields can be accessed through AVOptions (av_opt*), + * the name string used matches the associated command line parameter name and + * can be found in libavformat/options_table.h. + * The AVOption/command line parameter names differ in some cases from the C + * structure field names for historic reasons or brevity. + */ +typedef struct AVFormatContext { + /** + * A class for logging and @ref avoptions. Set by avformat_alloc_context(). + * Exports (de)muxer private options if they exist. + */ + const AVClass *av_class; + + /** + * The input container format. + * + * Demuxing only, set by avformat_open_input(). + */ + struct AVInputFormat *iformat; + + /** + * The output container format. + * + * Muxing only, must be set by the caller before avformat_write_header(). + */ + struct AVOutputFormat *oformat; + + /** + * Format private data. This is an AVOptions-enabled struct + * if and only if iformat/oformat.priv_class is not NULL. + * + * - muxing: set by avformat_write_header() + * - demuxing: set by avformat_open_input() + */ + void *priv_data; + + /** + * I/O context. + * + * - demuxing: either set by the user before avformat_open_input() (then + * the user must close it manually) or set by avformat_open_input(). + * - muxing: set by the user before avformat_write_header(). The caller must + * take care of closing / freeing the IO context. + * + * Do NOT set this field if AVFMT_NOFILE flag is set in + * iformat/oformat.flags. In such a case, the (de)muxer will handle + * I/O in some other way and this field will be NULL. + */ + AVIOContext *pb; + + /* stream info */ + /** + * Flags signalling stream properties. A combination of AVFMTCTX_*. + * Set by libavformat. + */ + int ctx_flags; + + /** + * Number of elements in AVFormatContext.streams. + * + * Set by avformat_new_stream(), must not be modified by any other code. + */ + unsigned int nb_streams; + /** + * A list of all streams in the file. New streams are created with + * avformat_new_stream(). + * + * - demuxing: streams are created by libavformat in avformat_open_input(). + * If AVFMTCTX_NOHEADER is set in ctx_flags, then new streams may also + * appear in av_read_frame(). + * - muxing: streams are created by the user before avformat_write_header(). + * + * Freed by libavformat in avformat_free_context(). + */ + AVStream **streams; + + /** + * input or output filename + * + * - demuxing: set by avformat_open_input() + * - muxing: may be set by the caller before avformat_write_header() + */ + char filename[1024]; + + /** + * Position of the first frame of the component, in + * AV_TIME_BASE fractional seconds. NEVER set this value directly: + * It is deduced from the AVStream values. + * + * Demuxing only, set by libavformat. + */ + int64_t start_time; + + /** + * Duration of the stream, in AV_TIME_BASE fractional + * seconds. Only set this value if you know none of the individual stream + * durations and also do not set any of them. This is deduced from the + * AVStream values if not set. + * + * Demuxing only, set by libavformat. + */ + int64_t duration; + + /** + * Total stream bitrate in bit/s, 0 if not + * available. Never set it directly if the file_size and the + * duration are known as FFmpeg can compute it automatically. + */ + int64_t bit_rate; + + unsigned int packet_size; + int max_delay; + + /** + * Flags modifying the (de)muxer behaviour. A combination of AVFMT_FLAG_*. + * Set by the user before avformat_open_input() / avformat_write_header(). + */ + int flags; +#define AVFMT_FLAG_GENPTS 0x0001 ///< Generate missing pts even if it requires parsing future frames. +#define AVFMT_FLAG_IGNIDX 0x0002 ///< Ignore index. +#define AVFMT_FLAG_NONBLOCK 0x0004 ///< Do not block when reading packets from input. +#define AVFMT_FLAG_IGNDTS 0x0008 ///< Ignore DTS on frames that contain both DTS & PTS +#define AVFMT_FLAG_NOFILLIN 0x0010 ///< Do not infer any values from other values, just return what is stored in the container +#define AVFMT_FLAG_NOPARSE 0x0020 ///< Do not use AVParsers, you also must set AVFMT_FLAG_NOFILLIN as the fillin code works on frames and no parsing -> no frames. Also seeking to frames can not work if parsing to find frame boundaries has been disabled +#define AVFMT_FLAG_NOBUFFER 0x0040 ///< Do not buffer frames when possible +#define AVFMT_FLAG_CUSTOM_IO 0x0080 ///< The caller has supplied a custom AVIOContext, don't avio_close() it. +#define AVFMT_FLAG_DISCARD_CORRUPT 0x0100 ///< Discard frames marked corrupted +#define AVFMT_FLAG_FLUSH_PACKETS 0x0200 ///< Flush the AVIOContext every packet. +/** + * When muxing, try to avoid writing any random/volatile data to the output. + * This includes any random IDs, real-time timestamps/dates, muxer version, etc. + * + * This flag is mainly intended for testing. + */ +#define AVFMT_FLAG_BITEXACT 0x0400 +#define AVFMT_FLAG_MP4A_LATM 0x8000 ///< Enable RTP MP4A-LATM payload +#define AVFMT_FLAG_SORT_DTS 0x10000 ///< try to interleave outputted packets by dts (using this flag can slow demuxing down) +#define AVFMT_FLAG_PRIV_OPT 0x20000 ///< Enable use of private options by delaying codec open (this could be made default once all code is converted) +#define AVFMT_FLAG_KEEP_SIDE_DATA 0x40000 ///< Don't merge side data but keep it separate. +#define AVFMT_FLAG_FAST_SEEK 0x80000 ///< Enable fast, but inaccurate seeks for some formats + + /** + * Maximum size of the data read from input for determining + * the input container format. + * Demuxing only, set by the caller before avformat_open_input(). + */ + int64_t probesize; + + /** + * Maximum duration (in AV_TIME_BASE units) of the data read + * from input in avformat_find_stream_info(). + * Demuxing only, set by the caller before avformat_find_stream_info(). + * Can be set to 0 to let avformat choose using a heuristic. + */ + int64_t max_analyze_duration; + + const uint8_t *key; + int keylen; + + unsigned int nb_programs; + AVProgram **programs; + + /** + * Forced video codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID video_codec_id; + + /** + * Forced audio codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID audio_codec_id; + + /** + * Forced subtitle codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID subtitle_codec_id; + + /** + * Maximum amount of memory in bytes to use for the index of each stream. + * If the index exceeds this size, entries will be discarded as + * needed to maintain a smaller size. This can lead to slower or less + * accurate seeking (depends on demuxer). + * Demuxers for which a full in-memory index is mandatory will ignore + * this. + * - muxing: unused + * - demuxing: set by user + */ + unsigned int max_index_size; + + /** + * Maximum amount of memory in bytes to use for buffering frames + * obtained from realtime capture devices. + */ + unsigned int max_picture_buffer; + + /** + * Number of chapters in AVChapter array. + * When muxing, chapters are normally written in the file header, + * so nb_chapters should normally be initialized before write_header + * is called. Some muxers (e.g. mov and mkv) can also write chapters + * in the trailer. To write chapters in the trailer, nb_chapters + * must be zero when write_header is called and non-zero when + * write_trailer is called. + * - muxing: set by user + * - demuxing: set by libavformat + */ + unsigned int nb_chapters; + AVChapter **chapters; + + /** + * Metadata that applies to the whole file. + * + * - demuxing: set by libavformat in avformat_open_input() + * - muxing: may be set by the caller before avformat_write_header() + * + * Freed by libavformat in avformat_free_context(). + */ + AVDictionary *metadata; + + /** + * Start time of the stream in real world time, in microseconds + * since the Unix epoch (00:00 1st January 1970). That is, pts=0 in the + * stream was captured at this real world time. + * - muxing: Set by the caller before avformat_write_header(). If set to + * either 0 or AV_NOPTS_VALUE, then the current wall-time will + * be used. + * - demuxing: Set by libavformat. AV_NOPTS_VALUE if unknown. Note that + * the value may become known after some number of frames + * have been received. + */ + int64_t start_time_realtime; + + /** + * The number of frames used for determining the framerate in + * avformat_find_stream_info(). + * Demuxing only, set by the caller before avformat_find_stream_info(). + */ + int fps_probe_size; + + /** + * Error recognition; higher values will detect more errors but may + * misdetect some more or less valid parts as errors. + * Demuxing only, set by the caller before avformat_open_input(). + */ + int error_recognition; + + /** + * Custom interrupt callbacks for the I/O layer. + * + * demuxing: set by the user before avformat_open_input(). + * muxing: set by the user before avformat_write_header() + * (mainly useful for AVFMT_NOFILE formats). The callback + * should also be passed to avio_open2() if it's used to + * open the file. + */ + AVIOInterruptCB interrupt_callback; + + /** + * Flags to enable debugging. + */ + int debug; +#define FF_FDEBUG_TS 0x0001 + + /** + * Maximum buffering duration for interleaving. + * + * To ensure all the streams are interleaved correctly, + * av_interleaved_write_frame() will wait until it has at least one packet + * for each stream before actually writing any packets to the output file. + * When some streams are "sparse" (i.e. there are large gaps between + * successive packets), this can result in excessive buffering. + * + * This field specifies the maximum difference between the timestamps of the + * first and the last packet in the muxing queue, above which libavformat + * will output a packet regardless of whether it has queued a packet for all + * the streams. + * + * Muxing only, set by the caller before avformat_write_header(). + */ + int64_t max_interleave_delta; + + /** + * Allow non-standard and experimental extension + * @see AVCodecContext.strict_std_compliance + */ + int strict_std_compliance; + + /** + * Flags for the user to detect events happening on the file. Flags must + * be cleared by the user once the event has been handled. + * A combination of AVFMT_EVENT_FLAG_*. + */ + int event_flags; +#define AVFMT_EVENT_FLAG_METADATA_UPDATED 0x0001 ///< The call resulted in updated metadata. + + /** + * Maximum number of packets to read while waiting for the first timestamp. + * Decoding only. + */ + int max_ts_probe; + + /** + * Avoid negative timestamps during muxing. + * Any value of the AVFMT_AVOID_NEG_TS_* constants. + * Note, this only works when using av_interleaved_write_frame. (interleave_packet_per_dts is in use) + * - muxing: Set by user + * - demuxing: unused + */ + int avoid_negative_ts; +#define AVFMT_AVOID_NEG_TS_AUTO -1 ///< Enabled when required by target format +#define AVFMT_AVOID_NEG_TS_MAKE_NON_NEGATIVE 1 ///< Shift timestamps so they are non negative +#define AVFMT_AVOID_NEG_TS_MAKE_ZERO 2 ///< Shift timestamps so that they start at 0 + + /** + * Transport stream id. + * This will be moved into demuxer private options. Thus no API/ABI compatibility + */ + int ts_id; + + /** + * Audio preload in microseconds. + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int audio_preload; + + /** + * Max chunk time in microseconds. + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int max_chunk_duration; + + /** + * Max chunk size in bytes + * Note, not all formats support this and unpredictable things may happen if it is used when not supported. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int max_chunk_size; + + /** + * forces the use of wallclock timestamps as pts/dts of packets + * This has undefined results in the presence of B frames. + * - encoding: unused + * - decoding: Set by user via AVOptions (NO direct access) + */ + int use_wallclock_as_timestamps; + + /** + * avio flags, used to force AVIO_FLAG_DIRECT. + * - encoding: unused + * - decoding: Set by user via AVOptions (NO direct access) + */ + int avio_flags; + + /** + * The duration field can be estimated through various ways, and this field can be used + * to know how the duration was estimated. + * - encoding: unused + * - decoding: Read by user via AVOptions (NO direct access) + */ + enum AVDurationEstimationMethod duration_estimation_method; + + /** + * Skip initial bytes when opening stream + * - encoding: unused + * - decoding: Set by user via AVOptions (NO direct access) + */ + int64_t skip_initial_bytes; + + /** + * Correct single timestamp overflows + * - encoding: unused + * - decoding: Set by user via AVOptions (NO direct access) + */ + unsigned int correct_ts_overflow; + + /** + * Force seeking to any (also non key) frames. + * - encoding: unused + * - decoding: Set by user via AVOptions (NO direct access) + */ + int seek2any; + + /** + * Flush the I/O context after each packet. + * - encoding: Set by user via AVOptions (NO direct access) + * - decoding: unused + */ + int flush_packets; + + /** + * format probing score. + * The maximal score is AVPROBE_SCORE_MAX, its set when the demuxer probes + * the format. + * - encoding: unused + * - decoding: set by avformat, read by user via av_format_get_probe_score() (NO direct access) + */ + int probe_score; + + /** + * number of bytes to read maximally to identify format. + * - encoding: unused + * - decoding: set by user through AVOPtions (NO direct access) + */ + int format_probesize; + + /** + * ',' separated list of allowed decoders. + * If NULL then all are allowed + * - encoding: unused + * - decoding: set by user through AVOptions (NO direct access) + */ + char *codec_whitelist; + + /** + * ',' separated list of allowed demuxers. + * If NULL then all are allowed + * - encoding: unused + * - decoding: set by user through AVOptions (NO direct access) + */ + char *format_whitelist; + + /** + * An opaque field for libavformat internal usage. + * Must not be accessed in any way by callers. + */ + AVFormatInternal *internal; + + /** + * IO repositioned flag. + * This is set by avformat when the underlaying IO context read pointer + * is repositioned, for example when doing byte based seeking. + * Demuxers can use the flag to detect such changes. + */ + int io_repositioned; + + /** + * Forced video codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_video_codec (NO direct access). + */ + AVCodec *video_codec; + + /** + * Forced audio codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_audio_codec (NO direct access). + */ + AVCodec *audio_codec; + + /** + * Forced subtitle codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_subtitle_codec (NO direct access). + */ + AVCodec *subtitle_codec; + + /** + * Forced data codec. + * This allows forcing a specific decoder, even when there are multiple with + * the same codec_id. + * Demuxing: Set by user via av_format_set_data_codec (NO direct access). + */ + AVCodec *data_codec; + + /** + * Number of bytes to be written as padding in a metadata header. + * Demuxing: Unused. + * Muxing: Set by user via av_format_set_metadata_header_padding. + */ + int metadata_header_padding; + + /** + * User data. + * This is a place for some private data of the user. + */ + void *opaque; + + /** + * Callback used by devices to communicate with application. + */ + av_format_control_message control_message_cb; + + /** + * Output timestamp offset, in microseconds. + * Muxing: set by user via AVOptions (NO direct access) + */ + int64_t output_ts_offset; + + /** + * dump format separator. + * can be ", " or "\n " or anything else + * Code outside libavformat should access this field using AVOptions + * (NO direct access). + * - muxing: Set by user. + * - demuxing: Set by user. + */ + uint8_t *dump_separator; + + /** + * Forced Data codec_id. + * Demuxing: Set by user. + */ + enum AVCodecID data_codec_id; + +#if FF_API_OLD_OPEN_CALLBACKS + /** + * Called to open further IO contexts when needed for demuxing. + * + * This can be set by the user application to perform security checks on + * the URLs before opening them. + * The function should behave like avio_open2(), AVFormatContext is provided + * as contextual information and to reach AVFormatContext.opaque. + * + * If NULL then some simple checks are used together with avio_open2(). + * + * Must not be accessed directly from outside avformat. + * @See av_format_set_open_cb() + * + * Demuxing: Set by user. + * + * @deprecated Use io_open and io_close. + */ + attribute_deprecated + int (*open_cb)(struct AVFormatContext *s, AVIOContext **p, const char *url, int flags, const AVIOInterruptCB *int_cb, AVDictionary **options); +#endif + + /** + * ',' separated list of allowed protocols. + * - encoding: unused + * - decoding: set by user through AVOptions (NO direct access) + */ + char *protocol_whitelist; + + /* + * A callback for opening new IO streams. + * + * Whenever a muxer or a demuxer needs to open an IO stream (typically from + * avformat_open_input() for demuxers, but for certain formats can happen at + * other times as well), it will call this callback to obtain an IO context. + * + * @param s the format context + * @param pb on success, the newly opened IO context should be returned here + * @param url the url to open + * @param flags a combination of AVIO_FLAG_* + * @param options a dictionary of additional options, with the same + * semantics as in avio_open2() + * @return 0 on success, a negative AVERROR code on failure + * + * @note Certain muxers and demuxers do nesting, i.e. they open one or more + * additional internal format contexts. Thus the AVFormatContext pointer + * passed to this callback may be different from the one facing the caller. + * It will, however, have the same 'opaque' field. + */ + int (*io_open)(struct AVFormatContext *s, AVIOContext **pb, const char *url, + int flags, AVDictionary **options); + + /** + * A callback for closing the streams opened with AVFormatContext.io_open(). + */ + void (*io_close)(struct AVFormatContext *s, AVIOContext *pb); + + /** + * ',' separated list of disallowed protocols. + * - encoding: unused + * - decoding: set by user through AVOptions (NO direct access) + */ + char *protocol_blacklist; +} AVFormatContext; + +int av_format_get_probe_score(const AVFormatContext *s); +AVCodec * av_format_get_video_codec(const AVFormatContext *s); +void av_format_set_video_codec(AVFormatContext *s, AVCodec *c); +AVCodec * av_format_get_audio_codec(const AVFormatContext *s); +void av_format_set_audio_codec(AVFormatContext *s, AVCodec *c); +AVCodec * av_format_get_subtitle_codec(const AVFormatContext *s); +void av_format_set_subtitle_codec(AVFormatContext *s, AVCodec *c); +AVCodec * av_format_get_data_codec(const AVFormatContext *s); +void av_format_set_data_codec(AVFormatContext *s, AVCodec *c); +int av_format_get_metadata_header_padding(const AVFormatContext *s); +void av_format_set_metadata_header_padding(AVFormatContext *s, int c); +void * av_format_get_opaque(const AVFormatContext *s); +void av_format_set_opaque(AVFormatContext *s, void *opaque); +av_format_control_message av_format_get_control_message_cb(const AVFormatContext *s); +void av_format_set_control_message_cb(AVFormatContext *s, av_format_control_message callback); +#if FF_API_OLD_OPEN_CALLBACKS +attribute_deprecated AVOpenCallback av_format_get_open_cb(const AVFormatContext *s); +attribute_deprecated void av_format_set_open_cb(AVFormatContext *s, AVOpenCallback callback); +#endif + +/** + * This function will cause global side data to be injected in the next packet + * of each stream as well as after any subsequent seek. + */ +void av_format_inject_global_side_data(AVFormatContext *s); + +/** + * Returns the method used to set ctx->duration. + * + * @return AVFMT_DURATION_FROM_PTS, AVFMT_DURATION_FROM_STREAM, or AVFMT_DURATION_FROM_BITRATE. + */ +enum AVDurationEstimationMethod av_fmt_ctx_get_duration_estimation_method(const AVFormatContext* ctx); + +typedef struct AVPacketList { + AVPacket pkt; + struct AVPacketList *next; +} AVPacketList; + + +/** + * @defgroup lavf_core Core functions + * @ingroup libavf + * + * Functions for querying libavformat capabilities, allocating core structures, + * etc. + * @{ + */ + +/** + * Return the LIBAVFORMAT_VERSION_INT constant. + */ +unsigned avformat_version(void); + +/** + * Return the libavformat build-time configuration. + */ +const char *avformat_configuration(void); + +/** + * Return the libavformat license. + */ +const char *avformat_license(void); + +/** + * Initialize libavformat and register all the muxers, demuxers and + * protocols. If you do not call this function, then you can select + * exactly which formats you want to support. + * + * @see av_register_input_format() + * @see av_register_output_format() + */ +void av_register_all(void); + +void av_register_input_format(AVInputFormat *format); +void av_register_output_format(AVOutputFormat *format); + +/** + * Do global initialization of network components. This is optional, + * but recommended, since it avoids the overhead of implicitly + * doing the setup for each session. + * + * Calling this function will become mandatory if using network + * protocols at some major version bump. + */ +int avformat_network_init(void); + +/** + * Undo the initialization done by avformat_network_init. + */ +int avformat_network_deinit(void); + +/** + * If f is NULL, returns the first registered input format, + * if f is non-NULL, returns the next registered input format after f + * or NULL if f is the last one. + */ +AVInputFormat *av_iformat_next(const AVInputFormat *f); + +/** + * If f is NULL, returns the first registered output format, + * if f is non-NULL, returns the next registered output format after f + * or NULL if f is the last one. + */ +AVOutputFormat *av_oformat_next(const AVOutputFormat *f); + +/** + * Allocate an AVFormatContext. + * avformat_free_context() can be used to free the context and everything + * allocated by the framework within it. + */ +AVFormatContext *avformat_alloc_context(void); + +/** + * Free an AVFormatContext and all its streams. + * @param s context to free + */ +void avformat_free_context(AVFormatContext *s); + +/** + * Get the AVClass for AVFormatContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + */ +const AVClass *avformat_get_class(void); + +/** + * Add a new stream to a media file. + * + * When demuxing, it is called by the demuxer in read_header(). If the + * flag AVFMTCTX_NOHEADER is set in s.ctx_flags, then it may also + * be called in read_packet(). + * + * When muxing, should be called by the user before avformat_write_header(). + * + * User is required to call avcodec_close() and avformat_free_context() to + * clean up the allocation by avformat_new_stream(). + * + * @param s media file handle + * @param c If non-NULL, the AVCodecContext corresponding to the new stream + * will be initialized to use this codec. This is needed for e.g. codec-specific + * defaults to be set, so codec should be provided if it is known. + * + * @return newly created stream or NULL on error. + */ +AVStream *avformat_new_stream(AVFormatContext *s, const AVCodec *c); + +/** + * Allocate new information from stream. + * + * @param stream stream + * @param type desired side information type + * @param size side information size + * @return pointer to fresh allocated data or NULL otherwise + */ +uint8_t *av_stream_new_side_data(AVStream *stream, + enum AVPacketSideDataType type, int size); +/** + * Get side information from stream. + * + * @param stream stream + * @param type desired side information type + * @param size pointer for side information size to store (optional) + * @return pointer to data if present or NULL otherwise + */ +uint8_t *av_stream_get_side_data(AVStream *stream, + enum AVPacketSideDataType type, int *size); + +AVProgram *av_new_program(AVFormatContext *s, int id); + +/** + * @} + */ + + +/** + * Allocate an AVFormatContext for an output format. + * avformat_free_context() can be used to free the context and + * everything allocated by the framework within it. + * + * @param *ctx is set to the created format context, or to NULL in + * case of failure + * @param oformat format to use for allocating the context, if NULL + * format_name and filename are used instead + * @param format_name the name of output format to use for allocating the + * context, if NULL filename is used instead + * @param filename the name of the filename to use for allocating the + * context, may be NULL + * @return >= 0 in case of success, a negative AVERROR code in case of + * failure + */ +int avformat_alloc_output_context2(AVFormatContext **ctx, AVOutputFormat *oformat, + const char *format_name, const char *filename); + +/** + * @addtogroup lavf_decoding + * @{ + */ + +/** + * Find AVInputFormat based on the short name of the input format. + */ +AVInputFormat *av_find_input_format(const char *short_name); + +/** + * Guess the file format. + * + * @param pd data to be probed + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + */ +AVInputFormat *av_probe_input_format(AVProbeData *pd, int is_opened); + +/** + * Guess the file format. + * + * @param pd data to be probed + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + * @param score_max A probe score larger that this is required to accept a + * detection, the variable is set to the actual detection + * score afterwards. + * If the score is <= AVPROBE_SCORE_MAX / 4 it is recommended + * to retry with a larger probe buffer. + */ +AVInputFormat *av_probe_input_format2(AVProbeData *pd, int is_opened, int *score_max); + +/** + * Guess the file format. + * + * @param is_opened Whether the file is already opened; determines whether + * demuxers with or without AVFMT_NOFILE are probed. + * @param score_ret The score of the best detection. + */ +AVInputFormat *av_probe_input_format3(AVProbeData *pd, int is_opened, int *score_ret); + +/** + * Probe a bytestream to determine the input format. Each time a probe returns + * with a score that is too low, the probe buffer size is increased and another + * attempt is made. When the maximum probe size is reached, the input format + * with the highest score is returned. + * + * @param pb the bytestream to probe + * @param fmt the input format is put here + * @param url the url of the stream + * @param logctx the log context + * @param offset the offset within the bytestream to probe from + * @param max_probe_size the maximum probe buffer size (zero for default) + * @return the score in case of success, a negative value corresponding to an + * the maximal score is AVPROBE_SCORE_MAX + * AVERROR code otherwise + */ +int av_probe_input_buffer2(AVIOContext *pb, AVInputFormat **fmt, + const char *url, void *logctx, + unsigned int offset, unsigned int max_probe_size); + +/** + * Like av_probe_input_buffer2() but returns 0 on success + */ +int av_probe_input_buffer(AVIOContext *pb, AVInputFormat **fmt, + const char *url, void *logctx, + unsigned int offset, unsigned int max_probe_size); + +/** + * Open an input stream and read the header. The codecs are not opened. + * The stream must be closed with avformat_close_input(). + * + * @param ps Pointer to user-supplied AVFormatContext (allocated by avformat_alloc_context). + * May be a pointer to NULL, in which case an AVFormatContext is allocated by this + * function and written into ps. + * Note that a user-supplied AVFormatContext will be freed on failure. + * @param url URL of the stream to open. + * @param fmt If non-NULL, this parameter forces a specific input format. + * Otherwise the format is autodetected. + * @param options A dictionary filled with AVFormatContext and demuxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return 0 on success, a negative AVERROR on failure. + * + * @note If you want to use custom IO, preallocate the format context and set its pb field. + */ +int avformat_open_input(AVFormatContext **ps, const char *url, AVInputFormat *fmt, AVDictionary **options); + +attribute_deprecated +int av_demuxer_open(AVFormatContext *ic); + +/** + * Read packets of a media file to get stream information. This + * is useful for file formats with no headers such as MPEG. This + * function also computes the real framerate in case of MPEG-2 repeat + * frame mode. + * The logical file position is not changed by this function; + * examined packets may be buffered for later processing. + * + * @param ic media file handle + * @param options If non-NULL, an ic.nb_streams long array of pointers to + * dictionaries, where i-th member contains options for + * codec corresponding to i-th stream. + * On return each dictionary will be filled with options that were not found. + * @return >=0 if OK, AVERROR_xxx on error + * + * @note this function isn't guaranteed to open all the codecs, so + * options being non-empty at return is a perfectly normal behavior. + * + * @todo Let the user decide somehow what information is needed so that + * we do not waste time getting stuff the user does not need. + */ +int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options); + +/** + * Find the programs which belong to a given stream. + * + * @param ic media file handle + * @param last the last found program, the search will start after this + * program, or from the beginning if it is NULL + * @param s stream index + * @return the next program which belongs to s, NULL if no program is found or + * the last program is not among the programs of ic. + */ +AVProgram *av_find_program_from_stream(AVFormatContext *ic, AVProgram *last, int s); + +void av_program_add_stream_index(AVFormatContext *ac, int progid, unsigned int idx); + +/** + * Find the "best" stream in the file. + * The best stream is determined according to various heuristics as the most + * likely to be what the user expects. + * If the decoder parameter is non-NULL, av_find_best_stream will find the + * default decoder for the stream's codec; streams for which no decoder can + * be found are ignored. + * + * @param ic media file handle + * @param type stream type: video, audio, subtitles, etc. + * @param wanted_stream_nb user-requested stream number, + * or -1 for automatic selection + * @param related_stream try to find a stream related (eg. in the same + * program) to this one, or -1 if none + * @param decoder_ret if non-NULL, returns the decoder for the + * selected stream + * @param flags flags; none are currently defined + * @return the non-negative stream number in case of success, + * AVERROR_STREAM_NOT_FOUND if no stream with the requested type + * could be found, + * AVERROR_DECODER_NOT_FOUND if streams were found but no decoder + * @note If av_find_best_stream returns successfully and decoder_ret is not + * NULL, then *decoder_ret is guaranteed to be set to a valid AVCodec. + */ +int av_find_best_stream(AVFormatContext *ic, + enum AVMediaType type, + int wanted_stream_nb, + int related_stream, + AVCodec **decoder_ret, + int flags); + +/** + * Return the next frame of a stream. + * This function returns what is stored in the file, and does not validate + * that what is there are valid frames for the decoder. It will split what is + * stored in the file into frames and return one for each call. It will not + * omit invalid data between valid frames so as to give the decoder the maximum + * information possible for decoding. + * + * If pkt->buf is NULL, then the packet is valid until the next + * av_read_frame() or until avformat_close_input(). Otherwise the packet + * is valid indefinitely. In both cases the packet must be freed with + * av_packet_unref when it is no longer needed. For video, the packet contains + * exactly one frame. For audio, it contains an integer number of frames if each + * frame has a known fixed size (e.g. PCM or ADPCM data). If the audio frames + * have a variable size (e.g. MPEG audio), then it contains one frame. + * + * pkt->pts, pkt->dts and pkt->duration are always set to correct + * values in AVStream.time_base units (and guessed if the format cannot + * provide them). pkt->pts can be AV_NOPTS_VALUE if the video format + * has B-frames, so it is better to rely on pkt->dts if you do not + * decompress the payload. + * + * @return 0 if OK, < 0 on error or end of file + */ +int av_read_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Seek to the keyframe at timestamp. + * 'timestamp' in 'stream_index'. + * + * @param s media file handle + * @param stream_index If stream_index is (-1), a default + * stream is selected, and timestamp is automatically converted + * from AV_TIME_BASE units to the stream specific time_base. + * @param timestamp Timestamp in AVStream.time_base units + * or, if no stream is specified, in AV_TIME_BASE units. + * @param flags flags which select direction and seeking mode + * @return >= 0 on success + */ +int av_seek_frame(AVFormatContext *s, int stream_index, int64_t timestamp, + int flags); + +/** + * Seek to timestamp ts. + * Seeking will be done so that the point from which all active streams + * can be presented successfully will be closest to ts and within min/max_ts. + * Active streams are all streams that have AVStream.discard < AVDISCARD_ALL. + * + * If flags contain AVSEEK_FLAG_BYTE, then all timestamps are in bytes and + * are the file position (this may not be supported by all demuxers). + * If flags contain AVSEEK_FLAG_FRAME, then all timestamps are in frames + * in the stream with stream_index (this may not be supported by all demuxers). + * Otherwise all timestamps are in units of the stream selected by stream_index + * or if stream_index is -1, in AV_TIME_BASE units. + * If flags contain AVSEEK_FLAG_ANY, then non-keyframes are treated as + * keyframes (this may not be supported by all demuxers). + * If flags contain AVSEEK_FLAG_BACKWARD, it is ignored. + * + * @param s media file handle + * @param stream_index index of the stream which is used as time base reference + * @param min_ts smallest acceptable timestamp + * @param ts target timestamp + * @param max_ts largest acceptable timestamp + * @param flags flags + * @return >=0 on success, error code otherwise + * + * @note This is part of the new seek API which is still under construction. + * Thus do not use this yet. It may change at any time, do not expect + * ABI compatibility yet! + */ +int avformat_seek_file(AVFormatContext *s, int stream_index, int64_t min_ts, int64_t ts, int64_t max_ts, int flags); + +/** + * Discard all internally buffered data. This can be useful when dealing with + * discontinuities in the byte stream. Generally works only with formats that + * can resync. This includes headerless formats like MPEG-TS/TS but should also + * work with NUT, Ogg and in a limited way AVI for example. + * + * The set of streams, the detected duration, stream parameters and codecs do + * not change when calling this function. If you want a complete reset, it's + * better to open a new AVFormatContext. + * + * This does not flush the AVIOContext (s->pb). If necessary, call + * avio_flush(s->pb) before calling this function. + * + * @param s media file handle + * @return >=0 on success, error code otherwise + */ +int avformat_flush(AVFormatContext *s); + +/** + * Start playing a network-based stream (e.g. RTSP stream) at the + * current position. + */ +int av_read_play(AVFormatContext *s); + +/** + * Pause a network-based stream (e.g. RTSP stream). + * + * Use av_read_play() to resume it. + */ +int av_read_pause(AVFormatContext *s); + +/** + * Close an opened input AVFormatContext. Free it and all its contents + * and set *s to NULL. + */ +void avformat_close_input(AVFormatContext **s); +/** + * @} + */ + +#define AVSEEK_FLAG_BACKWARD 1 ///< seek backward +#define AVSEEK_FLAG_BYTE 2 ///< seeking based on position in bytes +#define AVSEEK_FLAG_ANY 4 ///< seek to any frame, even non-keyframes +#define AVSEEK_FLAG_FRAME 8 ///< seeking based on frame number + +/** + * @addtogroup lavf_encoding + * @{ + */ +/** + * Allocate the stream private data and write the stream header to + * an output media file. + * + * @param s Media file handle, must be allocated with avformat_alloc_context(). + * Its oformat field must be set to the desired output format; + * Its pb field must be set to an already opened AVIOContext. + * @param options An AVDictionary filled with AVFormatContext and muxer-private options. + * On return this parameter will be destroyed and replaced with a dict containing + * options that were not found. May be NULL. + * + * @return 0 on success, negative AVERROR on failure. + * + * @see av_opt_find, av_dict_set, avio_open, av_oformat_next. + */ +av_warn_unused_result +int avformat_write_header(AVFormatContext *s, AVDictionary **options); + +/** + * Write a packet to an output media file. + * + * This function passes the packet directly to the muxer, without any buffering + * or reordering. The caller is responsible for correctly interleaving the + * packets if the format requires it. Callers that want libavformat to handle + * the interleaving should call av_interleaved_write_frame() instead of this + * function. + * + * @param s media file handle + * @param pkt The packet containing the data to be written. Note that unlike + * av_interleaved_write_frame(), this function does not take + * ownership of the packet passed to it (though some muxers may make + * an internal reference to the input packet). + *
+ * This parameter can be NULL (at any time, not just at the end), in + * order to immediately flush data buffered within the muxer, for + * muxers that buffer up data internally before writing it to the + * output. + *
+ * Packet's @ref AVPacket.stream_index "stream_index" field must be + * set to the index of the corresponding stream in @ref + * AVFormatContext.streams "s->streams". + *
+ * The timestamps (@ref AVPacket.pts "pts", @ref AVPacket.dts "dts") + * must be set to correct values in the stream's timebase (unless the + * output format is flagged with the AVFMT_NOTIMESTAMPS flag, then + * they can be set to AV_NOPTS_VALUE). + * The dts for subsequent packets passed to this function must be strictly + * increasing when compared in their respective timebases (unless the + * output format is flagged with the AVFMT_TS_NONSTRICT, then they + * merely have to be nondecreasing). @ref AVPacket.duration + * "duration") should also be set if known. + * @return < 0 on error, = 0 if OK, 1 if flushed and there is no more data to flush + * + * @see av_interleaved_write_frame() + */ +int av_write_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Write a packet to an output media file ensuring correct interleaving. + * + * This function will buffer the packets internally as needed to make sure the + * packets in the output file are properly interleaved in the order of + * increasing dts. Callers doing their own interleaving should call + * av_write_frame() instead of this function. + * + * Using this function instead of av_write_frame() can give muxers advance + * knowledge of future packets, improving e.g. the behaviour of the mp4 + * muxer for VFR content in fragmenting mode. + * + * @param s media file handle + * @param pkt The packet containing the data to be written. + *
+ * If the packet is reference-counted, this function will take + * ownership of this reference and unreference it later when it sees + * fit. + * The caller must not access the data through this reference after + * this function returns. If the packet is not reference-counted, + * libavformat will make a copy. + *
+ * This parameter can be NULL (at any time, not just at the end), to + * flush the interleaving queues. + *
+ * Packet's @ref AVPacket.stream_index "stream_index" field must be + * set to the index of the corresponding stream in @ref + * AVFormatContext.streams "s->streams". + *
+ * The timestamps (@ref AVPacket.pts "pts", @ref AVPacket.dts "dts") + * must be set to correct values in the stream's timebase (unless the + * output format is flagged with the AVFMT_NOTIMESTAMPS flag, then + * they can be set to AV_NOPTS_VALUE). + * The dts for subsequent packets in one stream must be strictly + * increasing (unless the output format is flagged with the + * AVFMT_TS_NONSTRICT, then they merely have to be nondecreasing). + * @ref AVPacket.duration "duration") should also be set if known. + * + * @return 0 on success, a negative AVERROR on error. Libavformat will always + * take care of freeing the packet, even if this function fails. + * + * @see av_write_frame(), AVFormatContext.max_interleave_delta + */ +int av_interleaved_write_frame(AVFormatContext *s, AVPacket *pkt); + +/** + * Write an uncoded frame to an output media file. + * + * The frame must be correctly interleaved according to the container + * specification; if not, then av_interleaved_write_frame() must be used. + * + * See av_interleaved_write_frame() for details. + */ +int av_write_uncoded_frame(AVFormatContext *s, int stream_index, + AVFrame *frame); + +/** + * Write an uncoded frame to an output media file. + * + * If the muxer supports it, this function makes it possible to write an AVFrame + * structure directly, without encoding it into a packet. + * It is mostly useful for devices and similar special muxers that use raw + * video or PCM data and will not serialize it into a byte stream. + * + * To test whether it is possible to use it with a given muxer and stream, + * use av_write_uncoded_frame_query(). + * + * The caller gives up ownership of the frame and must not access it + * afterwards. + * + * @return >=0 for success, a negative code on error + */ +int av_interleaved_write_uncoded_frame(AVFormatContext *s, int stream_index, + AVFrame *frame); + +/** + * Test whether a muxer supports uncoded frame. + * + * @return >=0 if an uncoded frame can be written to that muxer and stream, + * <0 if not + */ +int av_write_uncoded_frame_query(AVFormatContext *s, int stream_index); + +/** + * Write the stream trailer to an output media file and free the + * file private data. + * + * May only be called after a successful call to avformat_write_header. + * + * @param s media file handle + * @return 0 if OK, AVERROR_xxx on error + */ +int av_write_trailer(AVFormatContext *s); + +/** + * Return the output format in the list of registered output formats + * which best matches the provided parameters, or return NULL if + * there is no match. + * + * @param short_name if non-NULL checks if short_name matches with the + * names of the registered formats + * @param filename if non-NULL checks if filename terminates with the + * extensions of the registered formats + * @param mime_type if non-NULL checks if mime_type matches with the + * MIME type of the registered formats + */ +AVOutputFormat *av_guess_format(const char *short_name, + const char *filename, + const char *mime_type); + +/** + * Guess the codec ID based upon muxer and filename. + */ +enum AVCodecID av_guess_codec(AVOutputFormat *fmt, const char *short_name, + const char *filename, const char *mime_type, + enum AVMediaType type); + +/** + * Get timing information for the data currently output. + * The exact meaning of "currently output" depends on the format. + * It is mostly relevant for devices that have an internal buffer and/or + * work in real time. + * @param s media file handle + * @param stream stream in the media file + * @param[out] dts DTS of the last packet output for the stream, in stream + * time_base units + * @param[out] wall absolute time when that packet whas output, + * in microsecond + * @return 0 if OK, AVERROR(ENOSYS) if the format does not support it + * Note: some formats or devices may not allow to measure dts and wall + * atomically. + */ +int av_get_output_timestamp(struct AVFormatContext *s, int stream, + int64_t *dts, int64_t *wall); + + +/** + * @} + */ + + +/** + * @defgroup lavf_misc Utility functions + * @ingroup libavf + * @{ + * + * Miscellaneous utility functions related to both muxing and demuxing + * (or neither). + */ + +/** + * Send a nice hexadecimal dump of a buffer to the specified file stream. + * + * @param f The file stream pointer where the dump should be sent to. + * @param buf buffer + * @param size buffer size + * + * @see av_hex_dump_log, av_pkt_dump2, av_pkt_dump_log2 + */ +void av_hex_dump(FILE *f, const uint8_t *buf, int size); + +/** + * Send a nice hexadecimal dump of a buffer to the log. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message, lower values signifying + * higher importance. + * @param buf buffer + * @param size buffer size + * + * @see av_hex_dump, av_pkt_dump2, av_pkt_dump_log2 + */ +void av_hex_dump_log(void *avcl, int level, const uint8_t *buf, int size); + +/** + * Send a nice dump of a packet to the specified file stream. + * + * @param f The file stream pointer where the dump should be sent to. + * @param pkt packet to dump + * @param dump_payload True if the payload must be displayed, too. + * @param st AVStream that the packet belongs to + */ +void av_pkt_dump2(FILE *f, const AVPacket *pkt, int dump_payload, const AVStream *st); + + +/** + * Send a nice dump of a packet to the log. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message, lower values signifying + * higher importance. + * @param pkt packet to dump + * @param dump_payload True if the payload must be displayed, too. + * @param st AVStream that the packet belongs to + */ +void av_pkt_dump_log2(void *avcl, int level, const AVPacket *pkt, int dump_payload, + const AVStream *st); + +/** + * Get the AVCodecID for the given codec tag tag. + * If no codec id is found returns AV_CODEC_ID_NONE. + * + * @param tags list of supported codec_id-codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param tag codec tag to match to a codec ID + */ +enum AVCodecID av_codec_get_id(const struct AVCodecTag * const *tags, unsigned int tag); + +/** + * Get the codec tag for the given codec id id. + * If no codec tag is found returns 0. + * + * @param tags list of supported codec_id-codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param id codec ID to match to a codec tag + */ +unsigned int av_codec_get_tag(const struct AVCodecTag * const *tags, enum AVCodecID id); + +/** + * Get the codec tag for the given codec id. + * + * @param tags list of supported codec_id - codec_tag pairs, as stored + * in AVInputFormat.codec_tag and AVOutputFormat.codec_tag + * @param id codec id that should be searched for in the list + * @param tag A pointer to the found tag + * @return 0 if id was not found in tags, > 0 if it was found + */ +int av_codec_get_tag2(const struct AVCodecTag * const *tags, enum AVCodecID id, + unsigned int *tag); + +int av_find_default_stream_index(AVFormatContext *s); + +/** + * Get the index for a specific timestamp. + * + * @param st stream that the timestamp belongs to + * @param timestamp timestamp to retrieve the index for + * @param flags if AVSEEK_FLAG_BACKWARD then the returned index will correspond + * to the timestamp which is <= the requested one, if backward + * is 0, then it will be >= + * if AVSEEK_FLAG_ANY seek to any frame, only keyframes otherwise + * @return < 0 if no such timestamp could be found + */ +int av_index_search_timestamp(AVStream *st, int64_t timestamp, int flags); + +/** + * Add an index entry into a sorted list. Update the entry if the list + * already contains it. + * + * @param timestamp timestamp in the time base of the given stream + */ +int av_add_index_entry(AVStream *st, int64_t pos, int64_t timestamp, + int size, int distance, int flags); + + +/** + * Split a URL string into components. + * + * The pointers to buffers for storing individual components may be null, + * in order to ignore that component. Buffers for components not found are + * set to empty strings. If the port is not found, it is set to a negative + * value. + * + * @param proto the buffer for the protocol + * @param proto_size the size of the proto buffer + * @param authorization the buffer for the authorization + * @param authorization_size the size of the authorization buffer + * @param hostname the buffer for the host name + * @param hostname_size the size of the hostname buffer + * @param port_ptr a pointer to store the port number in + * @param path the buffer for the path + * @param path_size the size of the path buffer + * @param url the URL to split + */ +void av_url_split(char *proto, int proto_size, + char *authorization, int authorization_size, + char *hostname, int hostname_size, + int *port_ptr, + char *path, int path_size, + const char *url); + + +/** + * Print detailed information about the input or output format, such as + * duration, bitrate, streams, container, programs, metadata, side data, + * codec and time base. + * + * @param ic the context to analyze + * @param index index of the stream to dump information about + * @param url the URL to print, such as source or destination file + * @param is_output Select whether the specified context is an input(0) or output(1) + */ +void av_dump_format(AVFormatContext *ic, + int index, + const char *url, + int is_output); + +/** + * Return in 'buf' the path with '%d' replaced by a number. + * + * Also handles the '%0nd' format where 'n' is the total number + * of digits and '%%'. + * + * @param buf destination buffer + * @param buf_size destination buffer size + * @param path numbered sequence string + * @param number frame number + * @return 0 if OK, -1 on format error + */ +int av_get_frame_filename(char *buf, int buf_size, + const char *path, int number); + +/** + * Check whether filename actually is a numbered sequence generator. + * + * @param filename possible numbered sequence string + * @return 1 if a valid numbered sequence string, 0 otherwise + */ +int av_filename_number_test(const char *filename); + +/** + * Generate an SDP for an RTP session. + * + * Note, this overwrites the id values of AVStreams in the muxer contexts + * for getting unique dynamic payload types. + * + * @param ac array of AVFormatContexts describing the RTP streams. If the + * array is composed by only one context, such context can contain + * multiple AVStreams (one AVStream per RTP stream). Otherwise, + * all the contexts in the array (an AVCodecContext per RTP stream) + * must contain only one AVStream. + * @param n_files number of AVCodecContexts contained in ac + * @param buf buffer where the SDP will be stored (must be allocated by + * the caller) + * @param size the size of the buffer + * @return 0 if OK, AVERROR_xxx on error + */ +int av_sdp_create(AVFormatContext *ac[], int n_files, char *buf, int size); + +/** + * Return a positive value if the given filename has one of the given + * extensions, 0 otherwise. + * + * @param filename file name to check against the given extensions + * @param extensions a comma-separated list of filename extensions + */ +int av_match_ext(const char *filename, const char *extensions); + +/** + * Test if the given container can store a codec. + * + * @param ofmt container to check for compatibility + * @param codec_id codec to potentially store in container + * @param std_compliance standards compliance level, one of FF_COMPLIANCE_* + * + * @return 1 if codec with ID codec_id can be stored in ofmt, 0 if it cannot. + * A negative number if this information is not available. + */ +int avformat_query_codec(const AVOutputFormat *ofmt, enum AVCodecID codec_id, + int std_compliance); + +/** + * @defgroup riff_fourcc RIFF FourCCs + * @{ + * Get the tables mapping RIFF FourCCs to libavcodec AVCodecIDs. The tables are + * meant to be passed to av_codec_get_id()/av_codec_get_tag() as in the + * following code: + * @code + * uint32_t tag = MKTAG('H', '2', '6', '4'); + * const struct AVCodecTag *table[] = { avformat_get_riff_video_tags(), 0 }; + * enum AVCodecID id = av_codec_get_id(table, tag); + * @endcode + */ +/** + * @return the table mapping RIFF FourCCs for video to libavcodec AVCodecID. + */ +const struct AVCodecTag *avformat_get_riff_video_tags(void); +/** + * @return the table mapping RIFF FourCCs for audio to AVCodecID. + */ +const struct AVCodecTag *avformat_get_riff_audio_tags(void); +/** + * @return the table mapping MOV FourCCs for video to libavcodec AVCodecID. + */ +const struct AVCodecTag *avformat_get_mov_video_tags(void); +/** + * @return the table mapping MOV FourCCs for audio to AVCodecID. + */ +const struct AVCodecTag *avformat_get_mov_audio_tags(void); + +/** + * @} + */ + +/** + * Guess the sample aspect ratio of a frame, based on both the stream and the + * frame aspect ratio. + * + * Since the frame aspect ratio is set by the codec but the stream aspect ratio + * is set by the demuxer, these two may not be equal. This function tries to + * return the value that you should use if you would like to display the frame. + * + * Basic logic is to use the stream aspect ratio if it is set to something sane + * otherwise use the frame aspect ratio. This way a container setting, which is + * usually easy to modify can override the coded value in the frames. + * + * @param format the format context which the stream is part of + * @param stream the stream which the frame is part of + * @param frame the frame with the aspect ratio to be determined + * @return the guessed (valid) sample_aspect_ratio, 0/1 if no idea + */ +AVRational av_guess_sample_aspect_ratio(AVFormatContext *format, AVStream *stream, AVFrame *frame); + +/** + * Guess the frame rate, based on both the container and codec information. + * + * @param ctx the format context which the stream is part of + * @param stream the stream which the frame is part of + * @param frame the frame for which the frame rate should be determined, may be NULL + * @return the guessed (valid) frame rate, 0/1 if no idea + */ +AVRational av_guess_frame_rate(AVFormatContext *ctx, AVStream *stream, AVFrame *frame); + +/** + * Check if the stream st contained in s is matched by the stream specifier + * spec. + * + * See the "stream specifiers" chapter in the documentation for the syntax + * of spec. + * + * @return >0 if st is matched by spec; + * 0 if st is not matched by spec; + * AVERROR code if spec is invalid + * + * @note A stream specifier can match several streams in the format. + */ +int avformat_match_stream_specifier(AVFormatContext *s, AVStream *st, + const char *spec); + +int avformat_queue_attached_pictures(AVFormatContext *s); + +/** + * Apply a list of bitstream filters to a packet. + * + * @param codec AVCodecContext, usually from an AVStream + * @param pkt the packet to apply filters to. If, on success, the returned + * packet has size == 0 and side_data_elems == 0, it indicates that + * the packet should be dropped + * @param bsfc a NULL-terminated list of filters to apply + * @return >=0 on success; + * AVERROR code on failure + */ +#if FF_API_OLD_BSF +attribute_deprecated +int av_apply_bitstream_filters(AVCodecContext *codec, AVPacket *pkt, + AVBitStreamFilterContext *bsfc); +#endif + +/** + * @} + */ + +#endif /* AVFORMAT_AVFORMAT_H */ diff --git a/third-party/FFmpeg-iOS/include/libavformat/avio.h b/third-party/FFmpeg-iOS/include/libavformat/avio.h new file mode 100644 index 0000000000..889e0a9abc --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavformat/avio.h @@ -0,0 +1,801 @@ +/* + * copyright (c) 2001 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ +#ifndef AVFORMAT_AVIO_H +#define AVFORMAT_AVIO_H + +/** + * @file + * @ingroup lavf_io + * Buffered I/O operations + */ + +#include + +#include "../libavutil/common.h" +#include "../libavutil/dict.h" +#include "../libavutil/log.h" + +#include "../libavformat/version.h" + +#define AVIO_SEEKABLE_NORMAL 0x0001 /**< Seeking works like for a local file */ + +/** + * Callback for checking whether to abort blocking functions. + * AVERROR_EXIT is returned in this case by the interrupted + * function. During blocking operations, callback is called with + * opaque as parameter. If the callback returns 1, the + * blocking operation will be aborted. + * + * No members can be added to this struct without a major bump, if + * new elements have been added after this struct in AVFormatContext + * or AVIOContext. + */ +typedef struct AVIOInterruptCB { + int (*callback)(void*); + void *opaque; +} AVIOInterruptCB; + +/** + * Directory entry types. + */ +enum AVIODirEntryType { + AVIO_ENTRY_UNKNOWN, + AVIO_ENTRY_BLOCK_DEVICE, + AVIO_ENTRY_CHARACTER_DEVICE, + AVIO_ENTRY_DIRECTORY, + AVIO_ENTRY_NAMED_PIPE, + AVIO_ENTRY_SYMBOLIC_LINK, + AVIO_ENTRY_SOCKET, + AVIO_ENTRY_FILE, + AVIO_ENTRY_SERVER, + AVIO_ENTRY_SHARE, + AVIO_ENTRY_WORKGROUP, +}; + +/** + * Describes single entry of the directory. + * + * Only name and type fields are guaranteed be set. + * Rest of fields are protocol or/and platform dependent and might be unknown. + */ +typedef struct AVIODirEntry { + char *name; /**< Filename */ + int type; /**< Type of the entry */ + int utf8; /**< Set to 1 when name is encoded with UTF-8, 0 otherwise. + Name can be encoded with UTF-8 even though 0 is set. */ + int64_t size; /**< File size in bytes, -1 if unknown. */ + int64_t modification_timestamp; /**< Time of last modification in microseconds since unix + epoch, -1 if unknown. */ + int64_t access_timestamp; /**< Time of last access in microseconds since unix epoch, + -1 if unknown. */ + int64_t status_change_timestamp; /**< Time of last status change in microseconds since unix + epoch, -1 if unknown. */ + int64_t user_id; /**< User ID of owner, -1 if unknown. */ + int64_t group_id; /**< Group ID of owner, -1 if unknown. */ + int64_t filemode; /**< Unix file mode, -1 if unknown. */ +} AVIODirEntry; + +typedef struct AVIODirContext { + struct URLContext *url_context; +} AVIODirContext; + +/** + * Different data types that can be returned via the AVIO + * write_data_type callback. + */ +enum AVIODataMarkerType { + /** + * Header data; this needs to be present for the stream to be decodeable. + */ + AVIO_DATA_MARKER_HEADER, + /** + * A point in the output bytestream where a decoder can start decoding + * (i.e. a keyframe). A demuxer/decoder given the data flagged with + * AVIO_DATA_MARKER_HEADER, followed by any AVIO_DATA_MARKER_SYNC_POINT, + * should give decodeable results. + */ + AVIO_DATA_MARKER_SYNC_POINT, + /** + * A point in the output bytestream where a demuxer can start parsing + * (for non self synchronizing bytestream formats). That is, any + * non-keyframe packet start point. + */ + AVIO_DATA_MARKER_BOUNDARY_POINT, + /** + * This is any, unlabelled data. It can either be a muxer not marking + * any positions at all, it can be an actual boundary/sync point + * that the muxer chooses not to mark, or a later part of a packet/fragment + * that is cut into multiple write callbacks due to limited IO buffer size. + */ + AVIO_DATA_MARKER_UNKNOWN, + /** + * Trailer data, which doesn't contain actual content, but only for + * finalizing the output file. + */ + AVIO_DATA_MARKER_TRAILER +}; + +/** + * Bytestream IO Context. + * New fields can be added to the end with minor version bumps. + * Removal, reordering and changes to existing fields require a major + * version bump. + * sizeof(AVIOContext) must not be used outside libav*. + * + * @note None of the function pointers in AVIOContext should be called + * directly, they should only be set by the client application + * when implementing custom I/O. Normally these are set to the + * function pointers specified in avio_alloc_context() + */ +typedef struct AVIOContext { + /** + * A class for private options. + * + * If this AVIOContext is created by avio_open2(), av_class is set and + * passes the options down to protocols. + * + * If this AVIOContext is manually allocated, then av_class may be set by + * the caller. + * + * warning -- this field can be NULL, be sure to not pass this AVIOContext + * to any av_opt_* functions in that case. + */ + const AVClass *av_class; + + /* + * The following shows the relationship between buffer, buf_ptr, buf_end, buf_size, + * and pos, when reading and when writing (since AVIOContext is used for both): + * + ********************************************************************************** + * READING + ********************************************************************************** + * + * | buffer_size | + * |---------------------------------------| + * | | + * + * buffer buf_ptr buf_end + * +---------------+-----------------------+ + * |/ / / / / / / /|/ / / / / / /| | + * read buffer: |/ / consumed / | to be read /| | + * |/ / / / / / / /|/ / / / / / /| | + * +---------------+-----------------------+ + * + * pos + * +-------------------------------------------+-----------------+ + * input file: | | | + * +-------------------------------------------+-----------------+ + * + * + ********************************************************************************** + * WRITING + ********************************************************************************** + * + * | buffer_size | + * |-------------------------------| + * | | + * + * buffer buf_ptr buf_end + * +-------------------+-----------+ + * |/ / / / / / / / / /| | + * write buffer: | / to be flushed / | | + * |/ / / / / / / / / /| | + * +-------------------+-----------+ + * + * pos + * +--------------------------+-----------------------------------+ + * output file: | | | + * +--------------------------+-----------------------------------+ + * + */ + unsigned char *buffer; /**< Start of the buffer. */ + int buffer_size; /**< Maximum buffer size */ + unsigned char *buf_ptr; /**< Current position in the buffer */ + unsigned char *buf_end; /**< End of the data, may be less than + buffer+buffer_size if the read function returned + less data than requested, e.g. for streams where + no more data has been received yet. */ + void *opaque; /**< A private pointer, passed to the read/write/seek/... + functions. */ + int (*read_packet)(void *opaque, uint8_t *buf, int buf_size); + int (*write_packet)(void *opaque, uint8_t *buf, int buf_size); + int64_t (*seek)(void *opaque, int64_t offset, int whence); + int64_t pos; /**< position in the file of the current buffer */ + int must_flush; /**< true if the next seek should flush */ + int eof_reached; /**< true if eof reached */ + int write_flag; /**< true if open for writing */ + int max_packet_size; + unsigned long checksum; + unsigned char *checksum_ptr; + unsigned long (*update_checksum)(unsigned long checksum, const uint8_t *buf, unsigned int size); + int error; /**< contains the error code or 0 if no error happened */ + /** + * Pause or resume playback for network streaming protocols - e.g. MMS. + */ + int (*read_pause)(void *opaque, int pause); + /** + * Seek to a given timestamp in stream with the specified stream_index. + * Needed for some network streaming protocols which don't support seeking + * to byte position. + */ + int64_t (*read_seek)(void *opaque, int stream_index, + int64_t timestamp, int flags); + /** + * A combination of AVIO_SEEKABLE_ flags or 0 when the stream is not seekable. + */ + int seekable; + + /** + * max filesize, used to limit allocations + * This field is internal to libavformat and access from outside is not allowed. + */ + int64_t maxsize; + + /** + * avio_read and avio_write should if possible be satisfied directly + * instead of going through a buffer, and avio_seek will always + * call the underlying seek function directly. + */ + int direct; + + /** + * Bytes read statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int64_t bytes_read; + + /** + * seek statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int seek_count; + + /** + * writeout statistic + * This field is internal to libavformat and access from outside is not allowed. + */ + int writeout_count; + + /** + * Original buffer size + * used internally after probing and ensure seekback to reset the buffer size + * This field is internal to libavformat and access from outside is not allowed. + */ + int orig_buffer_size; + + /** + * Threshold to favor readahead over seek. + * This is current internal only, do not use from outside. + */ + int short_seek_threshold; + + /** + * ',' separated list of allowed protocols. + */ + const char *protocol_whitelist; + + /** + * ',' separated list of disallowed protocols. + */ + const char *protocol_blacklist; + + /** + * A callback that is used instead of write_packet. + */ + int (*write_data_type)(void *opaque, uint8_t *buf, int buf_size, + enum AVIODataMarkerType type, int64_t time); + /** + * If set, don't call write_data_type separately for AVIO_DATA_MARKER_BOUNDARY_POINT, + * but ignore them and treat them as AVIO_DATA_MARKER_UNKNOWN (to avoid needlessly + * small chunks of data returned from the callback). + */ + int ignore_boundary_point; + + /** + * Internal, not meant to be used from outside of AVIOContext. + */ + enum AVIODataMarkerType current_type; + int64_t last_time; +} AVIOContext; + +/** + * Return the name of the protocol that will handle the passed URL. + * + * NULL is returned if no protocol could be found for the given URL. + * + * @return Name of the protocol or NULL. + */ +const char *avio_find_protocol_name(const char *url); + +/** + * Return AVIO_FLAG_* access flags corresponding to the access permissions + * of the resource in url, or a negative value corresponding to an + * AVERROR code in case of failure. The returned access flags are + * masked by the value in flags. + * + * @note This function is intrinsically unsafe, in the sense that the + * checked resource may change its existence or permission status from + * one call to another. Thus you should not trust the returned value, + * unless you are sure that no other processes are accessing the + * checked resource. + */ +int avio_check(const char *url, int flags); + +/** + * Move or rename a resource. + * + * @note url_src and url_dst should share the same protocol and authority. + * + * @param url_src url to resource to be moved + * @param url_dst new url to resource if the operation succeeded + * @return >=0 on success or negative on error. + */ +int avpriv_io_move(const char *url_src, const char *url_dst); + +/** + * Delete a resource. + * + * @param url resource to be deleted. + * @return >=0 on success or negative on error. + */ +int avpriv_io_delete(const char *url); + +/** + * Open directory for reading. + * + * @param s directory read context. Pointer to a NULL pointer must be passed. + * @param url directory to be listed. + * @param options A dictionary filled with protocol-private options. On return + * this parameter will be destroyed and replaced with a dictionary + * containing options that were not found. May be NULL. + * @return >=0 on success or negative on error. + */ +int avio_open_dir(AVIODirContext **s, const char *url, AVDictionary **options); + +/** + * Get next directory entry. + * + * Returned entry must be freed with avio_free_directory_entry(). In particular + * it may outlive AVIODirContext. + * + * @param s directory read context. + * @param[out] next next entry or NULL when no more entries. + * @return >=0 on success or negative on error. End of list is not considered an + * error. + */ +int avio_read_dir(AVIODirContext *s, AVIODirEntry **next); + +/** + * Close directory. + * + * @note Entries created using avio_read_dir() are not deleted and must be + * freeded with avio_free_directory_entry(). + * + * @param s directory read context. + * @return >=0 on success or negative on error. + */ +int avio_close_dir(AVIODirContext **s); + +/** + * Free entry allocated by avio_read_dir(). + * + * @param entry entry to be freed. + */ +void avio_free_directory_entry(AVIODirEntry **entry); + +/** + * Allocate and initialize an AVIOContext for buffered I/O. It must be later + * freed with av_free(). + * + * @param buffer Memory block for input/output operations via AVIOContext. + * The buffer must be allocated with av_malloc() and friends. + * It may be freed and replaced with a new buffer by libavformat. + * AVIOContext.buffer holds the buffer currently in use, + * which must be later freed with av_free(). + * @param buffer_size The buffer size is very important for performance. + * For protocols with fixed blocksize it should be set to this blocksize. + * For others a typical size is a cache page, e.g. 4kb. + * @param write_flag Set to 1 if the buffer should be writable, 0 otherwise. + * @param opaque An opaque pointer to user-specific data. + * @param read_packet A function for refilling the buffer, may be NULL. + * @param write_packet A function for writing the buffer contents, may be NULL. + * The function may not change the input buffers content. + * @param seek A function for seeking to specified byte position, may be NULL. + * + * @return Allocated AVIOContext or NULL on failure. + */ +AVIOContext *avio_alloc_context( + unsigned char *buffer, + int buffer_size, + int write_flag, + void *opaque, + int (*read_packet)(void *opaque, uint8_t *buf, int buf_size), + int (*write_packet)(void *opaque, uint8_t *buf, int buf_size), + int64_t (*seek)(void *opaque, int64_t offset, int whence)); + +void avio_w8(AVIOContext *s, int b); +void avio_write(AVIOContext *s, const unsigned char *buf, int size); +void avio_wl64(AVIOContext *s, uint64_t val); +void avio_wb64(AVIOContext *s, uint64_t val); +void avio_wl32(AVIOContext *s, unsigned int val); +void avio_wb32(AVIOContext *s, unsigned int val); +void avio_wl24(AVIOContext *s, unsigned int val); +void avio_wb24(AVIOContext *s, unsigned int val); +void avio_wl16(AVIOContext *s, unsigned int val); +void avio_wb16(AVIOContext *s, unsigned int val); + +/** + * Write a NULL-terminated string. + * @return number of bytes written. + */ +int avio_put_str(AVIOContext *s, const char *str); + +/** + * Convert an UTF-8 string to UTF-16LE and write it. + * @param s the AVIOContext + * @param str NULL-terminated UTF-8 string + * + * @return number of bytes written. + */ +int avio_put_str16le(AVIOContext *s, const char *str); + +/** + * Convert an UTF-8 string to UTF-16BE and write it. + * @param s the AVIOContext + * @param str NULL-terminated UTF-8 string + * + * @return number of bytes written. + */ +int avio_put_str16be(AVIOContext *s, const char *str); + +/** + * Mark the written bytestream as a specific type. + * + * Zero-length ranges are omitted from the output. + * + * @param time the stream time the current bytestream pos corresponds to + * (in AV_TIME_BASE units), or AV_NOPTS_VALUE if unknown or not + * applicable + * @param type the kind of data written starting at the current pos + */ +void avio_write_marker(AVIOContext *s, int64_t time, enum AVIODataMarkerType type); + +/** + * ORing this as the "whence" parameter to a seek function causes it to + * return the filesize without seeking anywhere. Supporting this is optional. + * If it is not supported then the seek function will return <0. + */ +#define AVSEEK_SIZE 0x10000 + +/** + * Passing this flag as the "whence" parameter to a seek function causes it to + * seek by any means (like reopening and linear reading) or other normally unreasonable + * means that can be extremely slow. + * This may be ignored by the seek code. + */ +#define AVSEEK_FORCE 0x20000 + +/** + * fseek() equivalent for AVIOContext. + * @return new position or AVERROR. + */ +int64_t avio_seek(AVIOContext *s, int64_t offset, int whence); + +/** + * Skip given number of bytes forward + * @return new position or AVERROR. + */ +int64_t avio_skip(AVIOContext *s, int64_t offset); + +/** + * ftell() equivalent for AVIOContext. + * @return position or AVERROR. + */ +static av_always_inline int64_t avio_tell(AVIOContext *s) +{ + return avio_seek(s, 0, SEEK_CUR); +} + +/** + * Get the filesize. + * @return filesize or AVERROR + */ +int64_t avio_size(AVIOContext *s); + +/** + * feof() equivalent for AVIOContext. + * @return non zero if and only if end of file + */ +int avio_feof(AVIOContext *s); +#if FF_API_URL_FEOF +/** + * @deprecated use avio_feof() + */ +attribute_deprecated +int url_feof(AVIOContext *s); +#endif + +/** @warning Writes up to 4 KiB per call */ +int avio_printf(AVIOContext *s, const char *fmt, ...) av_printf_format(2, 3); + +/** + * Force flushing of buffered data. + * + * For write streams, force the buffered data to be immediately written to the output, + * without to wait to fill the internal buffer. + * + * For read streams, discard all currently buffered data, and advance the + * reported file position to that of the underlying stream. This does not + * read new data, and does not perform any seeks. + */ +void avio_flush(AVIOContext *s); + +/** + * Read size bytes from AVIOContext into buf. + * @return number of bytes read or AVERROR + */ +int avio_read(AVIOContext *s, unsigned char *buf, int size); + +/** + * @name Functions for reading from AVIOContext + * @{ + * + * @note return 0 if EOF, so you cannot use it if EOF handling is + * necessary + */ +int avio_r8 (AVIOContext *s); +unsigned int avio_rl16(AVIOContext *s); +unsigned int avio_rl24(AVIOContext *s); +unsigned int avio_rl32(AVIOContext *s); +uint64_t avio_rl64(AVIOContext *s); +unsigned int avio_rb16(AVIOContext *s); +unsigned int avio_rb24(AVIOContext *s); +unsigned int avio_rb32(AVIOContext *s); +uint64_t avio_rb64(AVIOContext *s); +/** + * @} + */ + +/** + * Read a string from pb into buf. The reading will terminate when either + * a NULL character was encountered, maxlen bytes have been read, or nothing + * more can be read from pb. The result is guaranteed to be NULL-terminated, it + * will be truncated if buf is too small. + * Note that the string is not interpreted or validated in any way, it + * might get truncated in the middle of a sequence for multi-byte encodings. + * + * @return number of bytes read (is always <= maxlen). + * If reading ends on EOF or error, the return value will be one more than + * bytes actually read. + */ +int avio_get_str(AVIOContext *pb, int maxlen, char *buf, int buflen); + +/** + * Read a UTF-16 string from pb and convert it to UTF-8. + * The reading will terminate when either a null or invalid character was + * encountered or maxlen bytes have been read. + * @return number of bytes read (is always <= maxlen) + */ +int avio_get_str16le(AVIOContext *pb, int maxlen, char *buf, int buflen); +int avio_get_str16be(AVIOContext *pb, int maxlen, char *buf, int buflen); + + +/** + * @name URL open modes + * The flags argument to avio_open must be one of the following + * constants, optionally ORed with other flags. + * @{ + */ +#define AVIO_FLAG_READ 1 /**< read-only */ +#define AVIO_FLAG_WRITE 2 /**< write-only */ +#define AVIO_FLAG_READ_WRITE (AVIO_FLAG_READ|AVIO_FLAG_WRITE) /**< read-write pseudo flag */ +/** + * @} + */ + +/** + * Use non-blocking mode. + * If this flag is set, operations on the context will return + * AVERROR(EAGAIN) if they can not be performed immediately. + * If this flag is not set, operations on the context will never return + * AVERROR(EAGAIN). + * Note that this flag does not affect the opening/connecting of the + * context. Connecting a protocol will always block if necessary (e.g. on + * network protocols) but never hang (e.g. on busy devices). + * Warning: non-blocking protocols is work-in-progress; this flag may be + * silently ignored. + */ +#define AVIO_FLAG_NONBLOCK 8 + +/** + * Use direct mode. + * avio_read and avio_write should if possible be satisfied directly + * instead of going through a buffer, and avio_seek will always + * call the underlying seek function directly. + */ +#define AVIO_FLAG_DIRECT 0x8000 + +/** + * Create and initialize a AVIOContext for accessing the + * resource indicated by url. + * @note When the resource indicated by url has been opened in + * read+write mode, the AVIOContext can be used only for writing. + * + * @param s Used to return the pointer to the created AVIOContext. + * In case of failure the pointed to value is set to NULL. + * @param url resource to access + * @param flags flags which control how the resource indicated by url + * is to be opened + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int avio_open(AVIOContext **s, const char *url, int flags); + +/** + * Create and initialize a AVIOContext for accessing the + * resource indicated by url. + * @note When the resource indicated by url has been opened in + * read+write mode, the AVIOContext can be used only for writing. + * + * @param s Used to return the pointer to the created AVIOContext. + * In case of failure the pointed to value is set to NULL. + * @param url resource to access + * @param flags flags which control how the resource indicated by url + * is to be opened + * @param int_cb an interrupt callback to be used at the protocols level + * @param options A dictionary filled with protocol-private options. On return + * this parameter will be destroyed and replaced with a dict containing options + * that were not found. May be NULL. + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code in case of failure + */ +int avio_open2(AVIOContext **s, const char *url, int flags, + const AVIOInterruptCB *int_cb, AVDictionary **options); + +/** + * Close the resource accessed by the AVIOContext s and free it. + * This function can only be used if s was opened by avio_open(). + * + * The internal buffer is automatically flushed before closing the + * resource. + * + * @return 0 on success, an AVERROR < 0 on error. + * @see avio_closep + */ +int avio_close(AVIOContext *s); + +/** + * Close the resource accessed by the AVIOContext *s, free it + * and set the pointer pointing to it to NULL. + * This function can only be used if s was opened by avio_open(). + * + * The internal buffer is automatically flushed before closing the + * resource. + * + * @return 0 on success, an AVERROR < 0 on error. + * @see avio_close + */ +int avio_closep(AVIOContext **s); + + +/** + * Open a write only memory stream. + * + * @param s new IO context + * @return zero if no error. + */ +int avio_open_dyn_buf(AVIOContext **s); + +/** + * Return the written size and a pointer to the buffer. The buffer + * must be freed with av_free(). + * Padding of AV_INPUT_BUFFER_PADDING_SIZE is added to the buffer. + * + * @param s IO context + * @param pbuffer pointer to a byte buffer + * @return the length of the byte buffer + */ +int avio_close_dyn_buf(AVIOContext *s, uint8_t **pbuffer); + +/** + * Iterate through names of available protocols. + * + * @param opaque A private pointer representing current protocol. + * It must be a pointer to NULL on first iteration and will + * be updated by successive calls to avio_enum_protocols. + * @param output If set to 1, iterate over output protocols, + * otherwise over input protocols. + * + * @return A static string containing the name of current protocol or NULL + */ +const char *avio_enum_protocols(void **opaque, int output); + +/** + * Pause and resume playing - only meaningful if using a network streaming + * protocol (e.g. MMS). + * + * @param h IO context from which to call the read_pause function pointer + * @param pause 1 for pause, 0 for resume + */ +int avio_pause(AVIOContext *h, int pause); + +/** + * Seek to a given timestamp relative to some component stream. + * Only meaningful if using a network streaming protocol (e.g. MMS.). + * + * @param h IO context from which to call the seek function pointers + * @param stream_index The stream index that the timestamp is relative to. + * If stream_index is (-1) the timestamp should be in AV_TIME_BASE + * units from the beginning of the presentation. + * If a stream_index >= 0 is used and the protocol does not support + * seeking based on component streams, the call will fail. + * @param timestamp timestamp in AVStream.time_base units + * or if there is no stream specified then in AV_TIME_BASE units. + * @param flags Optional combination of AVSEEK_FLAG_BACKWARD, AVSEEK_FLAG_BYTE + * and AVSEEK_FLAG_ANY. The protocol may silently ignore + * AVSEEK_FLAG_BACKWARD and AVSEEK_FLAG_ANY, but AVSEEK_FLAG_BYTE will + * fail if used and not supported. + * @return >= 0 on success + * @see AVInputFormat::read_seek + */ +int64_t avio_seek_time(AVIOContext *h, int stream_index, + int64_t timestamp, int flags); + +/* Avoid a warning. The header can not be included because it breaks c++. */ +struct AVBPrint; + +/** + * Read contents of h into print buffer, up to max_size bytes, or up to EOF. + * + * @return 0 for success (max_size bytes read or EOF reached), negative error + * code otherwise + */ +int avio_read_to_bprint(AVIOContext *h, struct AVBPrint *pb, size_t max_size); + +/** + * Accept and allocate a client context on a server context. + * @param s the server context + * @param c the client context, must be unallocated + * @return >= 0 on success or a negative value corresponding + * to an AVERROR on failure + */ +int avio_accept(AVIOContext *s, AVIOContext **c); + +/** + * Perform one step of the protocol handshake to accept a new client. + * This function must be called on a client returned by avio_accept() before + * using it as a read/write context. + * It is separate from avio_accept() because it may block. + * A step of the handshake is defined by places where the application may + * decide to change the proceedings. + * For example, on a protocol with a request header and a reply header, each + * one can constitute a step because the application may use the parameters + * from the request to change parameters in the reply; or each individual + * chunk of the request can constitute a step. + * If the handshake is already finished, avio_handshake() does nothing and + * returns 0 immediately. + * + * @param c the client context to perform the handshake on + * @return 0 on a complete and successful handshake + * > 0 if the handshake progressed, but is not complete + * < 0 for an AVERROR code + */ +int avio_handshake(AVIOContext *c); +#endif /* AVFORMAT_AVIO_H */ diff --git a/third-party/FFmpeg-iOS/include/libavformat/version.h b/third-party/FFmpeg-iOS/include/libavformat/version.h new file mode 100644 index 0000000000..645ce04720 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavformat/version.h @@ -0,0 +1,86 @@ +/* + * Version macros. + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVFORMAT_VERSION_H +#define AVFORMAT_VERSION_H + +/** + * @file + * @ingroup libavf + * Libavformat version macros + */ + +#include "../libavutil/version.h" + +// Major bumping may affect Ticket5467, 5421, 5451(compatibility with Chromium) +// Also please add any ticket numbers that you belive might be affected here +#define LIBAVFORMAT_VERSION_MAJOR 57 +#define LIBAVFORMAT_VERSION_MINOR 41 +#define LIBAVFORMAT_VERSION_MICRO 100 + +#define LIBAVFORMAT_VERSION_INT AV_VERSION_INT(LIBAVFORMAT_VERSION_MAJOR, \ + LIBAVFORMAT_VERSION_MINOR, \ + LIBAVFORMAT_VERSION_MICRO) +#define LIBAVFORMAT_VERSION AV_VERSION(LIBAVFORMAT_VERSION_MAJOR, \ + LIBAVFORMAT_VERSION_MINOR, \ + LIBAVFORMAT_VERSION_MICRO) +#define LIBAVFORMAT_BUILD LIBAVFORMAT_VERSION_INT + +#define LIBAVFORMAT_IDENT "Lavf" AV_STRINGIFY(LIBAVFORMAT_VERSION) + +/** + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + * + * @note, when bumping the major version it is recommended to manually + * disable each FF_API_* in its own commit instead of disabling them all + * at once through the bump. This improves the git bisect-ability of the change. + * + */ +#ifndef FF_API_LAVF_BITEXACT +#define FF_API_LAVF_BITEXACT (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_LAVF_FRAC +#define FF_API_LAVF_FRAC (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_LAVF_CODEC_TB +#define FF_API_LAVF_CODEC_TB (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_URL_FEOF +#define FF_API_URL_FEOF (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_LAVF_FMT_RAWPICTURE +#define FF_API_LAVF_FMT_RAWPICTURE (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_COMPUTE_PKT_FIELDS2 +#define FF_API_COMPUTE_PKT_FIELDS2 (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_OLD_OPEN_CALLBACKS +#define FF_API_OLD_OPEN_CALLBACKS (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif +#ifndef FF_API_LAVF_AVCTX +#define FF_API_LAVF_AVCTX (LIBAVFORMAT_VERSION_MAJOR < 58) +#endif + +#ifndef FF_API_R_FRAME_RATE +#define FF_API_R_FRAME_RATE 1 +#endif +#endif /* AVFORMAT_VERSION_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/adler32.h b/third-party/FFmpeg-iOS/include/libavutil/adler32.h new file mode 100644 index 0000000000..0dc69ec0a8 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/adler32.h @@ -0,0 +1,55 @@ +/* + * copyright (c) 2006 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_ADLER32_H +#define AVUTIL_ADLER32_H + +#include +#include "attributes.h" + +/** + * @file + * Public header for libavutil Adler32 hasher + * + * @defgroup lavu_adler32 Adler32 + * @ingroup lavu_crypto + * @{ + */ + +/** + * Calculate the Adler32 checksum of a buffer. + * + * Passing the return value to a subsequent av_adler32_update() call + * allows the checksum of multiple buffers to be calculated as though + * they were concatenated. + * + * @param adler initial checksum value + * @param buf pointer to input buffer + * @param len size of input buffer + * @return updated checksum + */ +unsigned long av_adler32_update(unsigned long adler, const uint8_t *buf, + unsigned int len) av_pure; + +/** + * @} + */ + +#endif /* AVUTIL_ADLER32_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/aes.h b/third-party/FFmpeg-iOS/include/libavutil/aes.h new file mode 100644 index 0000000000..09efbda107 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/aes.h @@ -0,0 +1,65 @@ +/* + * copyright (c) 2007 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AES_H +#define AVUTIL_AES_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_aes AES + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_aes_size; + +struct AVAES; + +/** + * Allocate an AVAES context. + */ +struct AVAES *av_aes_alloc(void); + +/** + * Initialize an AVAES context. + * @param key_bits 128, 192 or 256 + * @param decrypt 0 for encryption, 1 for decryption + */ +int av_aes_init(struct AVAES *a, const uint8_t *key, int key_bits, int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * @param count number of 16 byte blocks + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_aes_crypt(struct AVAES *a, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_AES_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/aes_ctr.h b/third-party/FFmpeg-iOS/include/libavutil/aes_ctr.h new file mode 100644 index 0000000000..f596fa6a46 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/aes_ctr.h @@ -0,0 +1,83 @@ +/* + * AES-CTR cipher + * Copyright (c) 2015 Eran Kornblau + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AES_CTR_H +#define AVUTIL_AES_CTR_H + +#include + +#include "attributes.h" +#include "version.h" + +#define AES_CTR_KEY_SIZE (16) +#define AES_CTR_IV_SIZE (8) + +struct AVAESCTR; + +/** + * Allocate an AVAESCTR context. + */ +struct AVAESCTR *av_aes_ctr_alloc(void); + +/** + * Initialize an AVAESCTR context. + * @param key encryption key, must have a length of AES_CTR_KEY_SIZE + */ +int av_aes_ctr_init(struct AVAESCTR *a, const uint8_t *key); + +/** + * Release an AVAESCTR context. + */ +void av_aes_ctr_free(struct AVAESCTR *a); + +/** + * Process a buffer using a previously initialized context. + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param size the size of src and dst + */ +void av_aes_ctr_crypt(struct AVAESCTR *a, uint8_t *dst, const uint8_t *src, int size); + +/** + * Get the current iv + */ +const uint8_t* av_aes_ctr_get_iv(struct AVAESCTR *a); + +/** + * Generate a random iv + */ +void av_aes_ctr_set_random_iv(struct AVAESCTR *a); + +/** + * Forcefully change the iv + */ +void av_aes_ctr_set_iv(struct AVAESCTR *a, const uint8_t* iv); + +/** + * Increment the top 64 bit of the iv (performed after each frame) + */ +void av_aes_ctr_increment_iv(struct AVAESCTR *a); + +/** + * @} + */ + +#endif /* AVUTIL_AES_CTR_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/attributes.h b/third-party/FFmpeg-iOS/include/libavutil/attributes.h new file mode 100644 index 0000000000..5c6b9deecb --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/attributes.h @@ -0,0 +1,168 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Macro definitions for various function/variable attributes + */ + +#ifndef AVUTIL_ATTRIBUTES_H +#define AVUTIL_ATTRIBUTES_H + +#ifdef __GNUC__ +# define AV_GCC_VERSION_AT_LEAST(x,y) (__GNUC__ > (x) || __GNUC__ == (x) && __GNUC_MINOR__ >= (y)) +# define AV_GCC_VERSION_AT_MOST(x,y) (__GNUC__ < (x) || __GNUC__ == (x) && __GNUC_MINOR__ <= (y)) +#else +# define AV_GCC_VERSION_AT_LEAST(x,y) 0 +# define AV_GCC_VERSION_AT_MOST(x,y) 0 +#endif + +#ifndef av_always_inline +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_always_inline __attribute__((always_inline)) inline +#elif defined(_MSC_VER) +# define av_always_inline __forceinline +#else +# define av_always_inline inline +#endif +#endif + +#ifndef av_extern_inline +#if defined(__ICL) && __ICL >= 1210 || defined(__GNUC_STDC_INLINE__) +# define av_extern_inline extern inline +#else +# define av_extern_inline inline +#endif +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,4) +# define av_warn_unused_result __attribute__((warn_unused_result)) +#else +# define av_warn_unused_result +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_noinline __attribute__((noinline)) +#elif defined(_MSC_VER) +# define av_noinline __declspec(noinline) +#else +# define av_noinline +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_pure __attribute__((pure)) +#else +# define av_pure +#endif + +#if AV_GCC_VERSION_AT_LEAST(2,6) +# define av_const __attribute__((const)) +#else +# define av_const +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,3) +# define av_cold __attribute__((cold)) +#else +# define av_cold +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,1) && !defined(__llvm__) +# define av_flatten __attribute__((flatten)) +#else +# define av_flatten +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define attribute_deprecated __attribute__((deprecated)) +#elif defined(_MSC_VER) +# define attribute_deprecated __declspec(deprecated) +#else +# define attribute_deprecated +#endif + +/** + * Disable warnings about deprecated features + * This is useful for sections of code kept for backward compatibility and + * scheduled for removal. + */ +#ifndef AV_NOWARN_DEPRECATED +#if AV_GCC_VERSION_AT_LEAST(4,6) +# define AV_NOWARN_DEPRECATED(code) \ + _Pragma("GCC diagnostic push") \ + _Pragma("GCC diagnostic ignored \"-Wdeprecated-declarations\"") \ + code \ + _Pragma("GCC diagnostic pop") +#elif defined(_MSC_VER) +# define AV_NOWARN_DEPRECATED(code) \ + __pragma(warning(push)) \ + __pragma(warning(disable : 4996)) \ + code; \ + __pragma(warning(pop)) +#else +# define AV_NOWARN_DEPRECATED(code) code +#endif +#endif + + +#if defined(__GNUC__) +# define av_unused __attribute__((unused)) +#else +# define av_unused +#endif + +/** + * Mark a variable as used and prevent the compiler from optimizing it + * away. This is useful for variables accessed only from inline + * assembler without the compiler being aware. + */ +#if AV_GCC_VERSION_AT_LEAST(3,1) +# define av_used __attribute__((used)) +#else +# define av_used +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,3) +# define av_alias __attribute__((may_alias)) +#else +# define av_alias +#endif + +#if defined(__GNUC__) && !defined(__INTEL_COMPILER) && !defined(__clang__) +# define av_uninit(x) x=x +#else +# define av_uninit(x) x +#endif + +#ifdef __GNUC__ +# define av_builtin_constant_p __builtin_constant_p +# define av_printf_format(fmtpos, attrpos) __attribute__((__format__(__printf__, fmtpos, attrpos))) +#else +# define av_builtin_constant_p(x) 0 +# define av_printf_format(fmtpos, attrpos) +#endif + +#if AV_GCC_VERSION_AT_LEAST(2,5) +# define av_noreturn __attribute__((noreturn)) +#else +# define av_noreturn +#endif + +#endif /* AVUTIL_ATTRIBUTES_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/audio_fifo.h b/third-party/FFmpeg-iOS/include/libavutil/audio_fifo.h new file mode 100644 index 0000000000..d8a9194a8d --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/audio_fifo.h @@ -0,0 +1,187 @@ +/* + * Audio FIFO + * Copyright (c) 2012 Justin Ruggles + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Audio FIFO Buffer + */ + +#ifndef AVUTIL_AUDIO_FIFO_H +#define AVUTIL_AUDIO_FIFO_H + +#include "avutil.h" +#include "fifo.h" +#include "samplefmt.h" + +/** + * @addtogroup lavu_audio + * @{ + * + * @defgroup lavu_audiofifo Audio FIFO Buffer + * @{ + */ + +/** + * Context for an Audio FIFO Buffer. + * + * - Operates at the sample level rather than the byte level. + * - Supports multiple channels with either planar or packed sample format. + * - Automatic reallocation when writing to a full buffer. + */ +typedef struct AVAudioFifo AVAudioFifo; + +/** + * Free an AVAudioFifo. + * + * @param af AVAudioFifo to free + */ +void av_audio_fifo_free(AVAudioFifo *af); + +/** + * Allocate an AVAudioFifo. + * + * @param sample_fmt sample format + * @param channels number of channels + * @param nb_samples initial allocation size, in samples + * @return newly allocated AVAudioFifo, or NULL on error + */ +AVAudioFifo *av_audio_fifo_alloc(enum AVSampleFormat sample_fmt, int channels, + int nb_samples); + +/** + * Reallocate an AVAudioFifo. + * + * @param af AVAudioFifo to reallocate + * @param nb_samples new allocation size, in samples + * @return 0 if OK, or negative AVERROR code on failure + */ +av_warn_unused_result +int av_audio_fifo_realloc(AVAudioFifo *af, int nb_samples); + +/** + * Write data to an AVAudioFifo. + * + * The AVAudioFifo will be reallocated automatically if the available space + * is less than nb_samples. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to write to + * @param data audio data plane pointers + * @param nb_samples number of samples to write + * @return number of samples actually written, or negative AVERROR + * code on failure. If successful, the number of samples + * actually written will always be nb_samples. + */ +int av_audio_fifo_write(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Peek data from an AVAudioFifo. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to read from + * @param data audio data plane pointers + * @param nb_samples number of samples to peek + * @return number of samples actually peek, or negative AVERROR code + * on failure. The number of samples actually peek will not + * be greater than nb_samples, and will only be less than + * nb_samples if av_audio_fifo_size is less than nb_samples. + */ +int av_audio_fifo_peek(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Peek data from an AVAudioFifo. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to read from + * @param data audio data plane pointers + * @param nb_samples number of samples to peek + * @param offset offset from current read position + * @return number of samples actually peek, or negative AVERROR code + * on failure. The number of samples actually peek will not + * be greater than nb_samples, and will only be less than + * nb_samples if av_audio_fifo_size is less than nb_samples. + */ +int av_audio_fifo_peek_at(AVAudioFifo *af, void **data, int nb_samples, int offset); + +/** + * Read data from an AVAudioFifo. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param af AVAudioFifo to read from + * @param data audio data plane pointers + * @param nb_samples number of samples to read + * @return number of samples actually read, or negative AVERROR code + * on failure. The number of samples actually read will not + * be greater than nb_samples, and will only be less than + * nb_samples if av_audio_fifo_size is less than nb_samples. + */ +int av_audio_fifo_read(AVAudioFifo *af, void **data, int nb_samples); + +/** + * Drain data from an AVAudioFifo. + * + * Removes the data without reading it. + * + * @param af AVAudioFifo to drain + * @param nb_samples number of samples to drain + * @return 0 if OK, or negative AVERROR code on failure + */ +int av_audio_fifo_drain(AVAudioFifo *af, int nb_samples); + +/** + * Reset the AVAudioFifo buffer. + * + * This empties all data in the buffer. + * + * @param af AVAudioFifo to reset + */ +void av_audio_fifo_reset(AVAudioFifo *af); + +/** + * Get the current number of samples in the AVAudioFifo available for reading. + * + * @param af the AVAudioFifo to query + * @return number of samples available for reading + */ +int av_audio_fifo_size(AVAudioFifo *af); + +/** + * Get the current number of samples in the AVAudioFifo available for writing. + * + * @param af the AVAudioFifo to query + * @return number of samples available for writing + */ +int av_audio_fifo_space(AVAudioFifo *af); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_AUDIO_FIFO_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/avassert.h b/third-party/FFmpeg-iOS/include/libavutil/avassert.h new file mode 100644 index 0000000000..f473637649 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/avassert.h @@ -0,0 +1,66 @@ +/* + * copyright (c) 2010 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple assert() macros that are a bit more flexible than ISO C assert(). + * @author Michael Niedermayer + */ + +#ifndef AVUTIL_AVASSERT_H +#define AVUTIL_AVASSERT_H + +#include +#include "avutil.h" +#include "log.h" + +/** + * assert() equivalent, that is always enabled. + */ +#define av_assert0(cond) do { \ + if (!(cond)) { \ + av_log(NULL, AV_LOG_PANIC, "Assertion %s failed at %s:%d\n", \ + AV_STRINGIFY(cond), __FILE__, __LINE__); \ + abort(); \ + } \ +} while (0) + + +/** + * assert() equivalent, that does not lie in speed critical code. + * These asserts() thus can be enabled without fearing speed loss. + */ +#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 0 +#define av_assert1(cond) av_assert0(cond) +#else +#define av_assert1(cond) ((void)0) +#endif + + +/** + * assert() equivalent, that does lie in speed critical code. + */ +#if defined(ASSERT_LEVEL) && ASSERT_LEVEL > 1 +#define av_assert2(cond) av_assert0(cond) +#else +#define av_assert2(cond) ((void)0) +#endif + +#endif /* AVUTIL_AVASSERT_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/avconfig.h b/third-party/FFmpeg-iOS/include/libavutil/avconfig.h new file mode 100644 index 0000000000..36a8cd14da --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/avconfig.h @@ -0,0 +1,7 @@ +/* Generated by ffconf */ +#ifndef AVUTIL_AVCONFIG_H +#define AVUTIL_AVCONFIG_H +#define AV_HAVE_BIGENDIAN 0 +#define AV_HAVE_FAST_UNALIGNED 1 +#define AV_HAVE_INCOMPATIBLE_LIBAV_ABI 0 +#endif /* AVUTIL_AVCONFIG_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/avstring.h b/third-party/FFmpeg-iOS/include/libavutil/avstring.h new file mode 100644 index 0000000000..dd2876990f --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/avstring.h @@ -0,0 +1,402 @@ +/* + * Copyright (c) 2007 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AVSTRING_H +#define AVUTIL_AVSTRING_H + +#include +#include +#include "attributes.h" + +/** + * @addtogroup lavu_string + * @{ + */ + +/** + * Return non-zero if pfx is a prefix of str. If it is, *ptr is set to + * the address of the first character in str after the prefix. + * + * @param str input string + * @param pfx prefix to test + * @param ptr updated if the prefix is matched inside str + * @return non-zero if the prefix matches, zero otherwise + */ +int av_strstart(const char *str, const char *pfx, const char **ptr); + +/** + * Return non-zero if pfx is a prefix of str independent of case. If + * it is, *ptr is set to the address of the first character in str + * after the prefix. + * + * @param str input string + * @param pfx prefix to test + * @param ptr updated if the prefix is matched inside str + * @return non-zero if the prefix matches, zero otherwise + */ +int av_stristart(const char *str, const char *pfx, const char **ptr); + +/** + * Locate the first case-independent occurrence in the string haystack + * of the string needle. A zero-length string needle is considered to + * match at the start of haystack. + * + * This function is a case-insensitive version of the standard strstr(). + * + * @param haystack string to search in + * @param needle string to search for + * @return pointer to the located match within haystack + * or a null pointer if no match + */ +char *av_stristr(const char *haystack, const char *needle); + +/** + * Locate the first occurrence of the string needle in the string haystack + * where not more than hay_length characters are searched. A zero-length + * string needle is considered to match at the start of haystack. + * + * This function is a length-limited version of the standard strstr(). + * + * @param haystack string to search in + * @param needle string to search for + * @param hay_length length of string to search in + * @return pointer to the located match within haystack + * or a null pointer if no match + */ +char *av_strnstr(const char *haystack, const char *needle, size_t hay_length); + +/** + * Copy the string src to dst, but no more than size - 1 bytes, and + * null-terminate dst. + * + * This function is the same as BSD strlcpy(). + * + * @param dst destination buffer + * @param src source string + * @param size size of destination buffer + * @return the length of src + * + * @warning since the return value is the length of src, src absolutely + * _must_ be a properly 0-terminated string, otherwise this will read beyond + * the end of the buffer and possibly crash. + */ +size_t av_strlcpy(char *dst, const char *src, size_t size); + +/** + * Append the string src to the string dst, but to a total length of + * no more than size - 1 bytes, and null-terminate dst. + * + * This function is similar to BSD strlcat(), but differs when + * size <= strlen(dst). + * + * @param dst destination buffer + * @param src source string + * @param size size of destination buffer + * @return the total length of src and dst + * + * @warning since the return value use the length of src and dst, these + * absolutely _must_ be a properly 0-terminated strings, otherwise this + * will read beyond the end of the buffer and possibly crash. + */ +size_t av_strlcat(char *dst, const char *src, size_t size); + +/** + * Append output to a string, according to a format. Never write out of + * the destination buffer, and always put a terminating 0 within + * the buffer. + * @param dst destination buffer (string to which the output is + * appended) + * @param size total size of the destination buffer + * @param fmt printf-compatible format string, specifying how the + * following parameters are used + * @return the length of the string that would have been generated + * if enough space had been available + */ +size_t av_strlcatf(char *dst, size_t size, const char *fmt, ...) av_printf_format(3, 4); + +/** + * Get the count of continuous non zero chars starting from the beginning. + * + * @param len maximum number of characters to check in the string, that + * is the maximum value which is returned by the function + */ +static inline size_t av_strnlen(const char *s, size_t len) +{ + size_t i; + for (i = 0; i < len && s[i]; i++) + ; + return i; +} + +/** + * Print arguments following specified format into a large enough auto + * allocated buffer. It is similar to GNU asprintf(). + * @param fmt printf-compatible format string, specifying how the + * following parameters are used. + * @return the allocated string + * @note You have to free the string yourself with av_free(). + */ +char *av_asprintf(const char *fmt, ...) av_printf_format(1, 2); + +/** + * Convert a number to an av_malloced string. + */ +char *av_d2str(double d); + +/** + * Unescape the given string until a non escaped terminating char, + * and return the token corresponding to the unescaped string. + * + * The normal \ and ' escaping is supported. Leading and trailing + * whitespaces are removed, unless they are escaped with '\' or are + * enclosed between ''. + * + * @param buf the buffer to parse, buf will be updated to point to the + * terminating char + * @param term a 0-terminated list of terminating chars + * @return the malloced unescaped string, which must be av_freed by + * the user, NULL in case of allocation failure + */ +char *av_get_token(const char **buf, const char *term); + +/** + * Split the string into several tokens which can be accessed by + * successive calls to av_strtok(). + * + * A token is defined as a sequence of characters not belonging to the + * set specified in delim. + * + * On the first call to av_strtok(), s should point to the string to + * parse, and the value of saveptr is ignored. In subsequent calls, s + * should be NULL, and saveptr should be unchanged since the previous + * call. + * + * This function is similar to strtok_r() defined in POSIX.1. + * + * @param s the string to parse, may be NULL + * @param delim 0-terminated list of token delimiters, must be non-NULL + * @param saveptr user-provided pointer which points to stored + * information necessary for av_strtok() to continue scanning the same + * string. saveptr is updated to point to the next character after the + * first delimiter found, or to NULL if the string was terminated + * @return the found token, or NULL when no token is found + */ +char *av_strtok(char *s, const char *delim, char **saveptr); + +/** + * Locale-independent conversion of ASCII isdigit. + */ +static inline av_const int av_isdigit(int c) +{ + return c >= '0' && c <= '9'; +} + +/** + * Locale-independent conversion of ASCII isgraph. + */ +static inline av_const int av_isgraph(int c) +{ + return c > 32 && c < 127; +} + +/** + * Locale-independent conversion of ASCII isspace. + */ +static inline av_const int av_isspace(int c) +{ + return c == ' ' || c == '\f' || c == '\n' || c == '\r' || c == '\t' || + c == '\v'; +} + +/** + * Locale-independent conversion of ASCII characters to uppercase. + */ +static inline av_const int av_toupper(int c) +{ + if (c >= 'a' && c <= 'z') + c ^= 0x20; + return c; +} + +/** + * Locale-independent conversion of ASCII characters to lowercase. + */ +static inline av_const int av_tolower(int c) +{ + if (c >= 'A' && c <= 'Z') + c ^= 0x20; + return c; +} + +/** + * Locale-independent conversion of ASCII isxdigit. + */ +static inline av_const int av_isxdigit(int c) +{ + c = av_tolower(c); + return av_isdigit(c) || (c >= 'a' && c <= 'f'); +} + +/** + * Locale-independent case-insensitive compare. + * @note This means only ASCII-range characters are case-insensitive + */ +int av_strcasecmp(const char *a, const char *b); + +/** + * Locale-independent case-insensitive compare. + * @note This means only ASCII-range characters are case-insensitive + */ +int av_strncasecmp(const char *a, const char *b, size_t n); + + +/** + * Thread safe basename. + * @param path the path, on DOS both \ and / are considered separators. + * @return pointer to the basename substring. + */ +const char *av_basename(const char *path); + +/** + * Thread safe dirname. + * @param path the path, on DOS both \ and / are considered separators. + * @return the path with the separator replaced by the string terminator or ".". + * @note the function may change the input string. + */ +const char *av_dirname(char *path); + +/** + * Match instances of a name in a comma-separated list of names. + * List entries are checked from the start to the end of the names list, + * the first match ends further processing. If an entry prefixed with '-' + * matches, then 0 is returned. The "ALL" list entry is considered to + * match all names. + * + * @param name Name to look for. + * @param names List of names. + * @return 1 on match, 0 otherwise. + */ +int av_match_name(const char *name, const char *names); + +/** + * Append path component to the existing path. + * Path separator '/' is placed between when needed. + * Resulting string have to be freed with av_free(). + * @param path base path + * @param component component to be appended + * @return new path or NULL on error. + */ +char *av_append_path_component(const char *path, const char *component); + +enum AVEscapeMode { + AV_ESCAPE_MODE_AUTO, ///< Use auto-selected escaping mode. + AV_ESCAPE_MODE_BACKSLASH, ///< Use backslash escaping. + AV_ESCAPE_MODE_QUOTE, ///< Use single-quote escaping. +}; + +/** + * Consider spaces special and escape them even in the middle of the + * string. + * + * This is equivalent to adding the whitespace characters to the special + * characters lists, except it is guaranteed to use the exact same list + * of whitespace characters as the rest of libavutil. + */ +#define AV_ESCAPE_FLAG_WHITESPACE (1 << 0) + +/** + * Escape only specified special characters. + * Without this flag, escape also any characters that may be considered + * special by av_get_token(), such as the single quote. + */ +#define AV_ESCAPE_FLAG_STRICT (1 << 1) + +/** + * Escape string in src, and put the escaped string in an allocated + * string in *dst, which must be freed with av_free(). + * + * @param dst pointer where an allocated string is put + * @param src string to escape, must be non-NULL + * @param special_chars string containing the special characters which + * need to be escaped, can be NULL + * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. + * Any unknown value for mode will be considered equivalent to + * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without + * notice. + * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_ macros + * @return the length of the allocated string, or a negative error code in case of error + * @see av_bprint_escape() + */ +av_warn_unused_result +int av_escape(char **dst, const char *src, const char *special_chars, + enum AVEscapeMode mode, int flags); + +#define AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES 1 ///< accept codepoints over 0x10FFFF +#define AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS 2 ///< accept non-characters - 0xFFFE and 0xFFFF +#define AV_UTF8_FLAG_ACCEPT_SURROGATES 4 ///< accept UTF-16 surrogates codes +#define AV_UTF8_FLAG_EXCLUDE_XML_INVALID_CONTROL_CODES 8 ///< exclude control codes not accepted by XML + +#define AV_UTF8_FLAG_ACCEPT_ALL \ + AV_UTF8_FLAG_ACCEPT_INVALID_BIG_CODES|AV_UTF8_FLAG_ACCEPT_NON_CHARACTERS|AV_UTF8_FLAG_ACCEPT_SURROGATES + +/** + * Read and decode a single UTF-8 code point (character) from the + * buffer in *buf, and update *buf to point to the next byte to + * decode. + * + * In case of an invalid byte sequence, the pointer will be updated to + * the next byte after the invalid sequence and the function will + * return an error code. + * + * Depending on the specified flags, the function will also fail in + * case the decoded code point does not belong to a valid range. + * + * @note For speed-relevant code a carefully implemented use of + * GET_UTF8() may be preferred. + * + * @param codep pointer used to return the parsed code in case of success. + * The value in *codep is set even in case the range check fails. + * @param bufp pointer to the address the first byte of the sequence + * to decode, updated by the function to point to the + * byte next after the decoded sequence + * @param buf_end pointer to the end of the buffer, points to the next + * byte past the last in the buffer. This is used to + * avoid buffer overreads (in case of an unfinished + * UTF-8 sequence towards the end of the buffer). + * @param flags a collection of AV_UTF8_FLAG_* flags + * @return >= 0 in case a sequence was successfully read, a negative + * value in case of invalid sequence + */ +av_warn_unused_result +int av_utf8_decode(int32_t *codep, const uint8_t **bufp, const uint8_t *buf_end, + unsigned int flags); + +/** + * Check if a name is in a list. + * @returns 0 if not found, or the 1 based index where it has been found in the + * list. + */ +int av_match_list(const char *name, const char *list, char separator); + +/** + * @} + */ + +#endif /* AVUTIL_AVSTRING_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/avutil.h b/third-party/FFmpeg-iOS/include/libavutil/avutil.h new file mode 100644 index 0000000000..c1f76c16ea --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/avutil.h @@ -0,0 +1,343 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_AVUTIL_H +#define AVUTIL_AVUTIL_H + +/** + * @file + * external API header + */ + +/** + * @mainpage + * + * @section ffmpeg_intro Introduction + * + * This document describes the usage of the different libraries + * provided by FFmpeg. + * + * @li @ref libavc "libavcodec" encoding/decoding library + * @li @ref lavfi "libavfilter" graph-based frame editing library + * @li @ref libavf "libavformat" I/O and muxing/demuxing library + * @li @ref lavd "libavdevice" special devices muxing/demuxing library + * @li @ref lavu "libavutil" common utility library + * @li @ref lswr "libswresample" audio resampling, format conversion and mixing + * @li @ref lpp "libpostproc" post processing library + * @li @ref libsws "libswscale" color conversion and scaling library + * + * @section ffmpeg_versioning Versioning and compatibility + * + * Each of the FFmpeg libraries contains a version.h header, which defines a + * major, minor and micro version number with the + * LIBRARYNAME_VERSION_{MAJOR,MINOR,MICRO} macros. The major version + * number is incremented with backward incompatible changes - e.g. removing + * parts of the public API, reordering public struct members, etc. The minor + * version number is incremented for backward compatible API changes or major + * new features - e.g. adding a new public function or a new decoder. The micro + * version number is incremented for smaller changes that a calling program + * might still want to check for - e.g. changing behavior in a previously + * unspecified situation. + * + * FFmpeg guarantees backward API and ABI compatibility for each library as long + * as its major version number is unchanged. This means that no public symbols + * will be removed or renamed. Types and names of the public struct members and + * values of public macros and enums will remain the same (unless they were + * explicitly declared as not part of the public API). Documented behavior will + * not change. + * + * In other words, any correct program that works with a given FFmpeg snapshot + * should work just as well without any changes with any later snapshot with the + * same major versions. This applies to both rebuilding the program against new + * FFmpeg versions or to replacing the dynamic FFmpeg libraries that a program + * links against. + * + * However, new public symbols may be added and new members may be appended to + * public structs whose size is not part of public ABI (most public structs in + * FFmpeg). New macros and enum values may be added. Behavior in undocumented + * situations may change slightly (and be documented). All those are accompanied + * by an entry in doc/APIchanges and incrementing either the minor or micro + * version number. + */ + +/** + * @defgroup lavu Common utility functions + * + * @brief + * libavutil contains the code shared across all the other FFmpeg + * libraries + * + * @note In order to use the functions provided by avutil you must include + * the specific header. + * + * @{ + * + * @defgroup lavu_crypto Crypto and Hashing + * + * @{ + * @} + * + * @defgroup lavu_math Maths + * @{ + * + * @} + * + * @defgroup lavu_string String Manipulation + * + * @{ + * + * @} + * + * @defgroup lavu_mem Memory Management + * + * @{ + * + * @} + * + * @defgroup lavu_data Data Structures + * @{ + * + * @} + * + * @defgroup lavu_audio Audio related + * + * @{ + * + * @} + * + * @defgroup lavu_error Error Codes + * + * @{ + * + * @} + * + * @defgroup lavu_log Logging Facility + * + * @{ + * + * @} + * + * @defgroup lavu_misc Other + * + * @{ + * + * @defgroup preproc_misc Preprocessor String Macros + * + * @{ + * + * @} + * + * @defgroup version_utils Library Version Macros + * + * @{ + * + * @} + */ + + +/** + * @addtogroup lavu_ver + * @{ + */ + +/** + * Return the LIBAVUTIL_VERSION_INT constant. + */ +unsigned avutil_version(void); + +/** + * Return an informative version string. This usually is the actual release + * version number or a git commit description. This string has no fixed format + * and can change any time. It should never be parsed by code. + */ +const char *av_version_info(void); + +/** + * Return the libavutil build-time configuration. + */ +const char *avutil_configuration(void); + +/** + * Return the libavutil license. + */ +const char *avutil_license(void); + +/** + * @} + */ + +/** + * @addtogroup lavu_media Media Type + * @brief Media Type + */ + +enum AVMediaType { + AVMEDIA_TYPE_UNKNOWN = -1, ///< Usually treated as AVMEDIA_TYPE_DATA + AVMEDIA_TYPE_VIDEO, + AVMEDIA_TYPE_AUDIO, + AVMEDIA_TYPE_DATA, ///< Opaque data information usually continuous + AVMEDIA_TYPE_SUBTITLE, + AVMEDIA_TYPE_ATTACHMENT, ///< Opaque data information usually sparse + AVMEDIA_TYPE_NB +}; + +/** + * Return a string describing the media_type enum, NULL if media_type + * is unknown. + */ +const char *av_get_media_type_string(enum AVMediaType media_type); + +/** + * @defgroup lavu_const Constants + * @{ + * + * @defgroup lavu_enc Encoding specific + * + * @note those definition should move to avcodec + * @{ + */ + +#define FF_LAMBDA_SHIFT 7 +#define FF_LAMBDA_SCALE (1< + +/** + * @defgroup lavu_base64 Base64 + * @ingroup lavu_crypto + * @{ + */ + +/** + * Decode a base64-encoded string. + * + * @param out buffer for decoded data + * @param in null-terminated input string + * @param out_size size in bytes of the out buffer, must be at + * least 3/4 of the length of in, that is AV_BASE64_DECODE_SIZE(strlen(in)) + * @return number of bytes written, or a negative value in case of + * invalid input + */ +int av_base64_decode(uint8_t *out, const char *in, int out_size); + +/** + * Calculate the output size in bytes needed to decode a base64 string + * with length x to a data buffer. + */ +#define AV_BASE64_DECODE_SIZE(x) ((x) * 3LL / 4) + +/** + * Encode data to base64 and null-terminate. + * + * @param out buffer for encoded data + * @param out_size size in bytes of the out buffer (including the + * null terminator), must be at least AV_BASE64_SIZE(in_size) + * @param in input buffer containing the data to encode + * @param in_size size in bytes of the in buffer + * @return out or NULL in case of error + */ +char *av_base64_encode(char *out, int out_size, const uint8_t *in, int in_size); + +/** + * Calculate the output size needed to base64-encode x bytes to a + * null-terminated string. + */ +#define AV_BASE64_SIZE(x) (((x)+2) / 3 * 4 + 1) + + /** + * @} + */ + +#endif /* AVUTIL_BASE64_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/blowfish.h b/third-party/FFmpeg-iOS/include/libavutil/blowfish.h new file mode 100644 index 0000000000..9e289a40da --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/blowfish.h @@ -0,0 +1,82 @@ +/* + * Blowfish algorithm + * Copyright (c) 2012 Samuel Pitoiset + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BLOWFISH_H +#define AVUTIL_BLOWFISH_H + +#include + +/** + * @defgroup lavu_blowfish Blowfish + * @ingroup lavu_crypto + * @{ + */ + +#define AV_BF_ROUNDS 16 + +typedef struct AVBlowfish { + uint32_t p[AV_BF_ROUNDS + 2]; + uint32_t s[4][256]; +} AVBlowfish; + +/** + * Allocate an AVBlowfish context. + */ +AVBlowfish *av_blowfish_alloc(void); + +/** + * Initialize an AVBlowfish context. + * + * @param ctx an AVBlowfish context + * @param key a key + * @param key_len length of the key + */ +void av_blowfish_init(struct AVBlowfish *ctx, const uint8_t *key, int key_len); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVBlowfish context + * @param xl left four bytes halves of input to be encrypted + * @param xr right four bytes halves of input to be encrypted + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_blowfish_crypt_ecb(struct AVBlowfish *ctx, uint32_t *xl, uint32_t *xr, + int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVBlowfish context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_blowfish_crypt(struct AVBlowfish *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_BLOWFISH_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/bprint.h b/third-party/FFmpeg-iOS/include/libavutil/bprint.h new file mode 100644 index 0000000000..c09b1ac1e1 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/bprint.h @@ -0,0 +1,219 @@ +/* + * Copyright (c) 2012 Nicolas George + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_BPRINT_H +#define AVUTIL_BPRINT_H + +#include + +#include "attributes.h" +#include "avstring.h" + +/** + * Define a structure with extra padding to a fixed size + * This helps ensuring binary compatibility with future versions. + */ + +#define FF_PAD_STRUCTURE(name, size, ...) \ +struct ff_pad_helper_##name { __VA_ARGS__ }; \ +typedef struct name { \ + __VA_ARGS__ \ + char reserved_padding[size - sizeof(struct ff_pad_helper_##name)]; \ +} name; + +/** + * Buffer to print data progressively + * + * The string buffer grows as necessary and is always 0-terminated. + * The content of the string is never accessed, and thus is + * encoding-agnostic and can even hold binary data. + * + * Small buffers are kept in the structure itself, and thus require no + * memory allocation at all (unless the contents of the buffer is needed + * after the structure goes out of scope). This is almost as lightweight as + * declaring a local "char buf[512]". + * + * The length of the string can go beyond the allocated size: the buffer is + * then truncated, but the functions still keep account of the actual total + * length. + * + * In other words, buf->len can be greater than buf->size and records the + * total length of what would have been to the buffer if there had been + * enough memory. + * + * Append operations do not need to be tested for failure: if a memory + * allocation fails, data stop being appended to the buffer, but the length + * is still updated. This situation can be tested with + * av_bprint_is_complete(). + * + * The size_max field determines several possible behaviours: + * + * size_max = -1 (= UINT_MAX) or any large value will let the buffer be + * reallocated as necessary, with an amortized linear cost. + * + * size_max = 0 prevents writing anything to the buffer: only the total + * length is computed. The write operations can then possibly be repeated in + * a buffer with exactly the necessary size + * (using size_init = size_max = len + 1). + * + * size_max = 1 is automatically replaced by the exact size available in the + * structure itself, thus ensuring no dynamic memory allocation. The + * internal buffer is large enough to hold a reasonable paragraph of text, + * such as the current paragraph. + */ + +FF_PAD_STRUCTURE(AVBPrint, 1024, + char *str; /**< string so far */ + unsigned len; /**< length so far */ + unsigned size; /**< allocated memory */ + unsigned size_max; /**< maximum allocated memory */ + char reserved_internal_buffer[1]; +) + +/** + * Convenience macros for special values for av_bprint_init() size_max + * parameter. + */ +#define AV_BPRINT_SIZE_UNLIMITED ((unsigned)-1) +#define AV_BPRINT_SIZE_AUTOMATIC 1 +#define AV_BPRINT_SIZE_COUNT_ONLY 0 + +/** + * Init a print buffer. + * + * @param buf buffer to init + * @param size_init initial size (including the final 0) + * @param size_max maximum size; + * 0 means do not write anything, just count the length; + * 1 is replaced by the maximum value for automatic storage; + * any large value means that the internal buffer will be + * reallocated as needed up to that limit; -1 is converted to + * UINT_MAX, the largest limit possible. + * Check also AV_BPRINT_SIZE_* macros. + */ +void av_bprint_init(AVBPrint *buf, unsigned size_init, unsigned size_max); + +/** + * Init a print buffer using a pre-existing buffer. + * + * The buffer will not be reallocated. + * + * @param buf buffer structure to init + * @param buffer byte buffer to use for the string data + * @param size size of buffer + */ +void av_bprint_init_for_buffer(AVBPrint *buf, char *buffer, unsigned size); + +/** + * Append a formatted string to a print buffer. + */ +void av_bprintf(AVBPrint *buf, const char *fmt, ...) av_printf_format(2, 3); + +/** + * Append a formatted string to a print buffer. + */ +void av_vbprintf(AVBPrint *buf, const char *fmt, va_list vl_arg); + +/** + * Append char c n times to a print buffer. + */ +void av_bprint_chars(AVBPrint *buf, char c, unsigned n); + +/** + * Append data to a print buffer. + * + * param buf bprint buffer to use + * param data pointer to data + * param size size of data + */ +void av_bprint_append_data(AVBPrint *buf, const char *data, unsigned size); + +struct tm; +/** + * Append a formatted date and time to a print buffer. + * + * param buf bprint buffer to use + * param fmt date and time format string, see strftime() + * param tm broken-down time structure to translate + * + * @note due to poor design of the standard strftime function, it may + * produce poor results if the format string expands to a very long text and + * the bprint buffer is near the limit stated by the size_max option. + */ +void av_bprint_strftime(AVBPrint *buf, const char *fmt, const struct tm *tm); + +/** + * Allocate bytes in the buffer for external use. + * + * @param[in] buf buffer structure + * @param[in] size required size + * @param[out] mem pointer to the memory area + * @param[out] actual_size size of the memory area after allocation; + * can be larger or smaller than size + */ +void av_bprint_get_buffer(AVBPrint *buf, unsigned size, + unsigned char **mem, unsigned *actual_size); + +/** + * Reset the string to "" but keep internal allocated data. + */ +void av_bprint_clear(AVBPrint *buf); + +/** + * Test if the print buffer is complete (not truncated). + * + * It may have been truncated due to a memory allocation failure + * or the size_max limit (compare size and size_max if necessary). + */ +static inline int av_bprint_is_complete(const AVBPrint *buf) +{ + return buf->len < buf->size; +} + +/** + * Finalize a print buffer. + * + * The print buffer can no longer be used afterwards, + * but the len and size fields are still valid. + * + * @arg[out] ret_str if not NULL, used to return a permanent copy of the + * buffer contents, or NULL if memory allocation fails; + * if NULL, the buffer is discarded and freed + * @return 0 for success or error code (probably AVERROR(ENOMEM)) + */ +int av_bprint_finalize(AVBPrint *buf, char **ret_str); + +/** + * Escape the content in src and append it to dstbuf. + * + * @param dstbuf already inited destination bprint buffer + * @param src string containing the text to escape + * @param special_chars string containing the special characters which + * need to be escaped, can be NULL + * @param mode escape mode to employ, see AV_ESCAPE_MODE_* macros. + * Any unknown value for mode will be considered equivalent to + * AV_ESCAPE_MODE_BACKSLASH, but this behaviour can change without + * notice. + * @param flags flags which control how to escape, see AV_ESCAPE_FLAG_* macros + */ +void av_bprint_escape(AVBPrint *dstbuf, const char *src, const char *special_chars, + enum AVEscapeMode mode, int flags); + +#endif /* AVUTIL_BPRINT_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/bswap.h b/third-party/FFmpeg-iOS/include/libavutil/bswap.h new file mode 100644 index 0000000000..a21a03483f --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/bswap.h @@ -0,0 +1,109 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * byte swapping routines + */ + +#ifndef AVUTIL_BSWAP_H +#define AVUTIL_BSWAP_H + +#include +#include "../libavutilavconfig.h" +#include "attributes.h" + +#ifdef HAVE_AV_CONFIG_H + +#include "config.h" + +#if ARCH_AARCH64 +# include "aarch64/bswap.h" +#elif ARCH_ARM +# include "arm/bswap.h" +#elif ARCH_AVR32 +# include "avr32/bswap.h" +#elif ARCH_SH4 +# include "sh4/bswap.h" +#elif ARCH_X86 +# include "x86/bswap.h" +#endif + +#endif /* HAVE_AV_CONFIG_H */ + +#define AV_BSWAP16C(x) (((x) << 8 & 0xff00) | ((x) >> 8 & 0x00ff)) +#define AV_BSWAP32C(x) (AV_BSWAP16C(x) << 16 | AV_BSWAP16C((x) >> 16)) +#define AV_BSWAP64C(x) (AV_BSWAP32C(x) << 32 | AV_BSWAP32C((x) >> 32)) + +#define AV_BSWAPC(s, x) AV_BSWAP##s##C(x) + +#ifndef av_bswap16 +static av_always_inline av_const uint16_t av_bswap16(uint16_t x) +{ + x= (x>>8) | (x<<8); + return x; +} +#endif + +#ifndef av_bswap32 +static av_always_inline av_const uint32_t av_bswap32(uint32_t x) +{ + return AV_BSWAP32C(x); +} +#endif + +#ifndef av_bswap64 +static inline uint64_t av_const av_bswap64(uint64_t x) +{ + return (uint64_t)av_bswap32(x) << 32 | av_bswap32(x >> 32); +} +#endif + +// be2ne ... big-endian to native-endian +// le2ne ... little-endian to native-endian + +#if AV_HAVE_BIGENDIAN +#define av_be2ne16(x) (x) +#define av_be2ne32(x) (x) +#define av_be2ne64(x) (x) +#define av_le2ne16(x) av_bswap16(x) +#define av_le2ne32(x) av_bswap32(x) +#define av_le2ne64(x) av_bswap64(x) +#define AV_BE2NEC(s, x) (x) +#define AV_LE2NEC(s, x) AV_BSWAPC(s, x) +#else +#define av_be2ne16(x) av_bswap16(x) +#define av_be2ne32(x) av_bswap32(x) +#define av_be2ne64(x) av_bswap64(x) +#define av_le2ne16(x) (x) +#define av_le2ne32(x) (x) +#define av_le2ne64(x) (x) +#define AV_BE2NEC(s, x) AV_BSWAPC(s, x) +#define AV_LE2NEC(s, x) (x) +#endif + +#define AV_BE2NE16C(x) AV_BE2NEC(16, x) +#define AV_BE2NE32C(x) AV_BE2NEC(32, x) +#define AV_BE2NE64C(x) AV_BE2NEC(64, x) +#define AV_LE2NE16C(x) AV_LE2NEC(16, x) +#define AV_LE2NE32C(x) AV_LE2NEC(32, x) +#define AV_LE2NE64C(x) AV_LE2NEC(64, x) + +#endif /* AVUTIL_BSWAP_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/buffer.h b/third-party/FFmpeg-iOS/include/libavutil/buffer.h new file mode 100644 index 0000000000..0c0ce12cf2 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/buffer.h @@ -0,0 +1,290 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_buffer + * refcounted data buffer API + */ + +#ifndef AVUTIL_BUFFER_H +#define AVUTIL_BUFFER_H + +#include + +/** + * @defgroup lavu_buffer AVBuffer + * @ingroup lavu_data + * + * @{ + * AVBuffer is an API for reference-counted data buffers. + * + * There are two core objects in this API -- AVBuffer and AVBufferRef. AVBuffer + * represents the data buffer itself; it is opaque and not meant to be accessed + * by the caller directly, but only through AVBufferRef. However, the caller may + * e.g. compare two AVBuffer pointers to check whether two different references + * are describing the same data buffer. AVBufferRef represents a single + * reference to an AVBuffer and it is the object that may be manipulated by the + * caller directly. + * + * There are two functions provided for creating a new AVBuffer with a single + * reference -- av_buffer_alloc() to just allocate a new buffer, and + * av_buffer_create() to wrap an existing array in an AVBuffer. From an existing + * reference, additional references may be created with av_buffer_ref(). + * Use av_buffer_unref() to free a reference (this will automatically free the + * data once all the references are freed). + * + * The convention throughout this API and the rest of FFmpeg is such that the + * buffer is considered writable if there exists only one reference to it (and + * it has not been marked as read-only). The av_buffer_is_writable() function is + * provided to check whether this is true and av_buffer_make_writable() will + * automatically create a new writable buffer when necessary. + * Of course nothing prevents the calling code from violating this convention, + * however that is safe only when all the existing references are under its + * control. + * + * @note Referencing and unreferencing the buffers is thread-safe and thus + * may be done from multiple threads simultaneously without any need for + * additional locking. + * + * @note Two different references to the same buffer can point to different + * parts of the buffer (i.e. their AVBufferRef.data will not be equal). + */ + +/** + * A reference counted buffer type. It is opaque and is meant to be used through + * references (AVBufferRef). + */ +typedef struct AVBuffer AVBuffer; + +/** + * A reference to a data buffer. + * + * The size of this struct is not a part of the public ABI and it is not meant + * to be allocated directly. + */ +typedef struct AVBufferRef { + AVBuffer *buffer; + + /** + * The data buffer. It is considered writable if and only if + * this is the only reference to the buffer, in which case + * av_buffer_is_writable() returns 1. + */ + uint8_t *data; + /** + * Size of data in bytes. + */ + int size; +} AVBufferRef; + +/** + * Allocate an AVBuffer of the given size using av_malloc(). + * + * @return an AVBufferRef of given size or NULL when out of memory + */ +AVBufferRef *av_buffer_alloc(int size); + +/** + * Same as av_buffer_alloc(), except the returned buffer will be initialized + * to zero. + */ +AVBufferRef *av_buffer_allocz(int size); + +/** + * Always treat the buffer as read-only, even when it has only one + * reference. + */ +#define AV_BUFFER_FLAG_READONLY (1 << 0) + +/** + * Create an AVBuffer from an existing array. + * + * If this function is successful, data is owned by the AVBuffer. The caller may + * only access data through the returned AVBufferRef and references derived from + * it. + * If this function fails, data is left untouched. + * @param data data array + * @param size size of data in bytes + * @param free a callback for freeing this buffer's data + * @param opaque parameter to be got for processing or passed to free + * @param flags a combination of AV_BUFFER_FLAG_* + * + * @return an AVBufferRef referring to data on success, NULL on failure. + */ +AVBufferRef *av_buffer_create(uint8_t *data, int size, + void (*free)(void *opaque, uint8_t *data), + void *opaque, int flags); + +/** + * Default free callback, which calls av_free() on the buffer data. + * This function is meant to be passed to av_buffer_create(), not called + * directly. + */ +void av_buffer_default_free(void *opaque, uint8_t *data); + +/** + * Create a new reference to an AVBuffer. + * + * @return a new AVBufferRef referring to the same AVBuffer as buf or NULL on + * failure. + */ +AVBufferRef *av_buffer_ref(AVBufferRef *buf); + +/** + * Free a given reference and automatically free the buffer if there are no more + * references to it. + * + * @param buf the reference to be freed. The pointer is set to NULL on return. + */ +void av_buffer_unref(AVBufferRef **buf); + +/** + * @return 1 if the caller may write to the data referred to by buf (which is + * true if and only if buf is the only reference to the underlying AVBuffer). + * Return 0 otherwise. + * A positive answer is valid until av_buffer_ref() is called on buf. + */ +int av_buffer_is_writable(const AVBufferRef *buf); + +/** + * @return the opaque parameter set by av_buffer_create. + */ +void *av_buffer_get_opaque(const AVBufferRef *buf); + +int av_buffer_get_ref_count(const AVBufferRef *buf); + +/** + * Create a writable reference from a given buffer reference, avoiding data copy + * if possible. + * + * @param buf buffer reference to make writable. On success, buf is either left + * untouched, or it is unreferenced and a new writable AVBufferRef is + * written in its place. On failure, buf is left untouched. + * @return 0 on success, a negative AVERROR on failure. + */ +int av_buffer_make_writable(AVBufferRef **buf); + +/** + * Reallocate a given buffer. + * + * @param buf a buffer reference to reallocate. On success, buf will be + * unreferenced and a new reference with the required size will be + * written in its place. On failure buf will be left untouched. *buf + * may be NULL, then a new buffer is allocated. + * @param size required new buffer size. + * @return 0 on success, a negative AVERROR on failure. + * + * @note the buffer is actually reallocated with av_realloc() only if it was + * initially allocated through av_buffer_realloc(NULL) and there is only one + * reference to it (i.e. the one passed to this function). In all other cases + * a new buffer is allocated and the data is copied. + */ +int av_buffer_realloc(AVBufferRef **buf, int size); + +/** + * @} + */ + +/** + * @defgroup lavu_bufferpool AVBufferPool + * @ingroup lavu_data + * + * @{ + * AVBufferPool is an API for a lock-free thread-safe pool of AVBuffers. + * + * Frequently allocating and freeing large buffers may be slow. AVBufferPool is + * meant to solve this in cases when the caller needs a set of buffers of the + * same size (the most obvious use case being buffers for raw video or audio + * frames). + * + * At the beginning, the user must call av_buffer_pool_init() to create the + * buffer pool. Then whenever a buffer is needed, call av_buffer_pool_get() to + * get a reference to a new buffer, similar to av_buffer_alloc(). This new + * reference works in all aspects the same way as the one created by + * av_buffer_alloc(). However, when the last reference to this buffer is + * unreferenced, it is returned to the pool instead of being freed and will be + * reused for subsequent av_buffer_pool_get() calls. + * + * When the caller is done with the pool and no longer needs to allocate any new + * buffers, av_buffer_pool_uninit() must be called to mark the pool as freeable. + * Once all the buffers are released, it will automatically be freed. + * + * Allocating and releasing buffers with this API is thread-safe as long as + * either the default alloc callback is used, or the user-supplied one is + * thread-safe. + */ + +/** + * The buffer pool. This structure is opaque and not meant to be accessed + * directly. It is allocated with av_buffer_pool_init() and freed with + * av_buffer_pool_uninit(). + */ +typedef struct AVBufferPool AVBufferPool; + +/** + * Allocate and initialize a buffer pool. + * + * @param size size of each buffer in this pool + * @param alloc a function that will be used to allocate new buffers when the + * pool is empty. May be NULL, then the default allocator will be used + * (av_buffer_alloc()). + * @return newly created buffer pool on success, NULL on error. + */ +AVBufferPool *av_buffer_pool_init(int size, AVBufferRef* (*alloc)(int size)); + +/** + * Allocate and initialize a buffer pool with a more complex allocator. + * + * @param size size of each buffer in this pool + * @param opaque arbitrary user data used by the allocator + * @param alloc a function that will be used to allocate new buffers when the + * pool is empty. + * @param pool_free a function that will be called immediately before the pool + * is freed. I.e. after av_buffer_pool_can_uninit() is called + * by the pool and all the frames are returned to the pool and + * freed. It is intended to uninitialize the user opaque data. + * @return newly created buffer pool on success, NULL on error. + */ +AVBufferPool *av_buffer_pool_init2(int size, void *opaque, + AVBufferRef* (*alloc)(void *opaque, int size), + void (*pool_free)(void *opaque)); + +/** + * Mark the pool as being available for freeing. It will actually be freed only + * once all the allocated buffers associated with the pool are released. Thus it + * is safe to call this function while some of the allocated buffers are still + * in use. + * + * @param pool pointer to the pool to be freed. It will be set to NULL. + */ +void av_buffer_pool_uninit(AVBufferPool **pool); + +/** + * Allocate a new AVBuffer, reusing an old buffer from the pool when available. + * This function may be called simultaneously from multiple threads. + * + * @return a reference to the new buffer on success, NULL on error. + */ +AVBufferRef *av_buffer_pool_get(AVBufferPool *pool); + +/** + * @} + */ + +#endif /* AVUTIL_BUFFER_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/camellia.h b/third-party/FFmpeg-iOS/include/libavutil/camellia.h new file mode 100644 index 0000000000..e674c9b9a4 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/camellia.h @@ -0,0 +1,70 @@ +/* + * An implementation of the CAMELLIA algorithm as mentioned in RFC3713 + * Copyright (c) 2014 Supraja Meedinti + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CAMELLIA_H +#define AVUTIL_CAMELLIA_H + +#include + + +/** + * @file + * @brief Public header for libavutil CAMELLIA algorithm + * @defgroup lavu_camellia CAMELLIA + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_camellia_size; + +struct AVCAMELLIA; + +/** + * Allocate an AVCAMELLIA context + * To free the struct: av_free(ptr) + */ +struct AVCAMELLIA *av_camellia_alloc(void); + +/** + * Initialize an AVCAMELLIA context. + * + * @param ctx an AVCAMELLIA context + * @param key a key of 16, 24, 32 bytes used for encryption/decryption + * @param key_bits number of keybits: possible are 128, 192, 256 + */ +int av_camellia_init(struct AVCAMELLIA *ctx, const uint8_t *key, int key_bits); + +/** + * Encrypt or decrypt a buffer using a previously initialized context + * + * @param ctx an AVCAMELLIA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 16 byte blocks + * @paran iv initialization vector for CBC mode, NULL for ECB mode + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_camellia_crypt(struct AVCAMELLIA *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t* iv, int decrypt); + +/** + * @} + */ +#endif /* AVUTIL_CAMELLIA_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/cast5.h b/third-party/FFmpeg-iOS/include/libavutil/cast5.h new file mode 100644 index 0000000000..ad5b347e68 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/cast5.h @@ -0,0 +1,80 @@ +/* + * An implementation of the CAST128 algorithm as mentioned in RFC2144 + * Copyright (c) 2014 Supraja Meedinti + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CAST5_H +#define AVUTIL_CAST5_H + +#include + + +/** + * @file + * @brief Public header for libavutil CAST5 algorithm + * @defgroup lavu_cast5 CAST5 + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_cast5_size; + +struct AVCAST5; + +/** + * Allocate an AVCAST5 context + * To free the struct: av_free(ptr) + */ +struct AVCAST5 *av_cast5_alloc(void); +/** + * Initialize an AVCAST5 context. + * + * @param ctx an AVCAST5 context + * @param key a key of 5,6,...16 bytes used for encryption/decryption + * @param key_bits number of keybits: possible are 40,48,...,128 + * @return 0 on success, less than 0 on failure + */ +int av_cast5_init(struct AVCAST5 *ctx, const uint8_t *key, int key_bits); + +/** + * Encrypt or decrypt a buffer using a previously initialized context, ECB mode only + * + * @param ctx an AVCAST5 context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_cast5_crypt(struct AVCAST5 *ctx, uint8_t *dst, const uint8_t *src, int count, int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context + * + * @param ctx an AVCAST5 context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, NULL for ECB mode + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_cast5_crypt2(struct AVCAST5 *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); +/** + * @} + */ +#endif /* AVUTIL_CAST5_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/channel_layout.h b/third-party/FFmpeg-iOS/include/libavutil/channel_layout.h new file mode 100644 index 0000000000..ec7effead1 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/channel_layout.h @@ -0,0 +1,223 @@ +/* + * Copyright (c) 2006 Michael Niedermayer + * Copyright (c) 2008 Peter Ross + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CHANNEL_LAYOUT_H +#define AVUTIL_CHANNEL_LAYOUT_H + +#include + +/** + * @file + * audio channel layout utility functions + */ + +/** + * @addtogroup lavu_audio + * @{ + */ + +/** + * @defgroup channel_masks Audio channel masks + * + * A channel layout is a 64-bits integer with a bit set for every channel. + * The number of bits set must be equal to the number of channels. + * The value 0 means that the channel layout is not known. + * @note this data structure is not powerful enough to handle channels + * combinations that have the same channel multiple times, such as + * dual-mono. + * + * @{ + */ +#define AV_CH_FRONT_LEFT 0x00000001 +#define AV_CH_FRONT_RIGHT 0x00000002 +#define AV_CH_FRONT_CENTER 0x00000004 +#define AV_CH_LOW_FREQUENCY 0x00000008 +#define AV_CH_BACK_LEFT 0x00000010 +#define AV_CH_BACK_RIGHT 0x00000020 +#define AV_CH_FRONT_LEFT_OF_CENTER 0x00000040 +#define AV_CH_FRONT_RIGHT_OF_CENTER 0x00000080 +#define AV_CH_BACK_CENTER 0x00000100 +#define AV_CH_SIDE_LEFT 0x00000200 +#define AV_CH_SIDE_RIGHT 0x00000400 +#define AV_CH_TOP_CENTER 0x00000800 +#define AV_CH_TOP_FRONT_LEFT 0x00001000 +#define AV_CH_TOP_FRONT_CENTER 0x00002000 +#define AV_CH_TOP_FRONT_RIGHT 0x00004000 +#define AV_CH_TOP_BACK_LEFT 0x00008000 +#define AV_CH_TOP_BACK_CENTER 0x00010000 +#define AV_CH_TOP_BACK_RIGHT 0x00020000 +#define AV_CH_STEREO_LEFT 0x20000000 ///< Stereo downmix. +#define AV_CH_STEREO_RIGHT 0x40000000 ///< See AV_CH_STEREO_LEFT. +#define AV_CH_WIDE_LEFT 0x0000000080000000ULL +#define AV_CH_WIDE_RIGHT 0x0000000100000000ULL +#define AV_CH_SURROUND_DIRECT_LEFT 0x0000000200000000ULL +#define AV_CH_SURROUND_DIRECT_RIGHT 0x0000000400000000ULL +#define AV_CH_LOW_FREQUENCY_2 0x0000000800000000ULL + +/** Channel mask value used for AVCodecContext.request_channel_layout + to indicate that the user requests the channel order of the decoder output + to be the native codec channel order. */ +#define AV_CH_LAYOUT_NATIVE 0x8000000000000000ULL + +/** + * @} + * @defgroup channel_mask_c Audio channel layouts + * @{ + * */ +#define AV_CH_LAYOUT_MONO (AV_CH_FRONT_CENTER) +#define AV_CH_LAYOUT_STEREO (AV_CH_FRONT_LEFT|AV_CH_FRONT_RIGHT) +#define AV_CH_LAYOUT_2POINT1 (AV_CH_LAYOUT_STEREO|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_2_1 (AV_CH_LAYOUT_STEREO|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_SURROUND (AV_CH_LAYOUT_STEREO|AV_CH_FRONT_CENTER) +#define AV_CH_LAYOUT_3POINT1 (AV_CH_LAYOUT_SURROUND|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_4POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_4POINT1 (AV_CH_LAYOUT_4POINT0|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_2_2 (AV_CH_LAYOUT_STEREO|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) +#define AV_CH_LAYOUT_QUAD (AV_CH_LAYOUT_STEREO|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_5POINT0 (AV_CH_LAYOUT_SURROUND|AV_CH_SIDE_LEFT|AV_CH_SIDE_RIGHT) +#define AV_CH_LAYOUT_5POINT1 (AV_CH_LAYOUT_5POINT0|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_5POINT0_BACK (AV_CH_LAYOUT_SURROUND|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_5POINT1_BACK (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_6POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT0_FRONT (AV_CH_LAYOUT_2_2|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_HEXAGONAL (AV_CH_LAYOUT_5POINT0_BACK|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_BACK_CENTER) +#define AV_CH_LAYOUT_6POINT1_FRONT (AV_CH_LAYOUT_6POINT0_FRONT|AV_CH_LOW_FREQUENCY) +#define AV_CH_LAYOUT_7POINT0 (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_7POINT0_FRONT (AV_CH_LAYOUT_5POINT0|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_7POINT1 (AV_CH_LAYOUT_5POINT1|AV_CH_BACK_LEFT|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_7POINT1_WIDE (AV_CH_LAYOUT_5POINT1|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_7POINT1_WIDE_BACK (AV_CH_LAYOUT_5POINT1_BACK|AV_CH_FRONT_LEFT_OF_CENTER|AV_CH_FRONT_RIGHT_OF_CENTER) +#define AV_CH_LAYOUT_OCTAGONAL (AV_CH_LAYOUT_5POINT0|AV_CH_BACK_LEFT|AV_CH_BACK_CENTER|AV_CH_BACK_RIGHT) +#define AV_CH_LAYOUT_HEXADECAGONAL (AV_CH_LAYOUT_OCTAGONAL|AV_CH_WIDE_LEFT|AV_CH_WIDE_RIGHT|AV_CH_TOP_BACK_LEFT|AV_CH_TOP_BACK_RIGHT|AV_CH_TOP_BACK_CENTER|AV_CH_TOP_FRONT_CENTER|AV_CH_TOP_FRONT_LEFT|AV_CH_TOP_FRONT_RIGHT) +#define AV_CH_LAYOUT_STEREO_DOWNMIX (AV_CH_STEREO_LEFT|AV_CH_STEREO_RIGHT) + +enum AVMatrixEncoding { + AV_MATRIX_ENCODING_NONE, + AV_MATRIX_ENCODING_DOLBY, + AV_MATRIX_ENCODING_DPLII, + AV_MATRIX_ENCODING_DPLIIX, + AV_MATRIX_ENCODING_DPLIIZ, + AV_MATRIX_ENCODING_DOLBYEX, + AV_MATRIX_ENCODING_DOLBYHEADPHONE, + AV_MATRIX_ENCODING_NB +}; + +/** + * Return a channel layout id that matches name, or 0 if no match is found. + * + * name can be one or several of the following notations, + * separated by '+' or '|': + * - the name of an usual channel layout (mono, stereo, 4.0, quad, 5.0, + * 5.0(side), 5.1, 5.1(side), 7.1, 7.1(wide), downmix); + * - the name of a single channel (FL, FR, FC, LFE, BL, BR, FLC, FRC, BC, + * SL, SR, TC, TFL, TFC, TFR, TBL, TBC, TBR, DL, DR); + * - a number of channels, in decimal, optionally followed by 'c', yielding + * the default channel layout for that number of channels (@see + * av_get_default_channel_layout); + * - a channel layout mask, in hexadecimal starting with "0x" (see the + * AV_CH_* macros). + * + * @warning Starting from the next major bump the trailing character + * 'c' to specify a number of channels will be required, while a + * channel layout mask could also be specified as a decimal number + * (if and only if not followed by "c"). + * + * Example: "stereo+FC" = "2c+FC" = "2c+1c" = "0x7" + */ +uint64_t av_get_channel_layout(const char *name); + +/** + * Return a description of a channel layout. + * If nb_channels is <= 0, it is guessed from the channel_layout. + * + * @param buf put here the string containing the channel layout + * @param buf_size size in bytes of the buffer + */ +void av_get_channel_layout_string(char *buf, int buf_size, int nb_channels, uint64_t channel_layout); + +struct AVBPrint; +/** + * Append a description of a channel layout to a bprint buffer. + */ +void av_bprint_channel_layout(struct AVBPrint *bp, int nb_channels, uint64_t channel_layout); + +/** + * Return the number of channels in the channel layout. + */ +int av_get_channel_layout_nb_channels(uint64_t channel_layout); + +/** + * Return default channel layout for a given number of channels. + */ +int64_t av_get_default_channel_layout(int nb_channels); + +/** + * Get the index of a channel in channel_layout. + * + * @param channel a channel layout describing exactly one channel which must be + * present in channel_layout. + * + * @return index of channel in channel_layout on success, a negative AVERROR + * on error. + */ +int av_get_channel_layout_channel_index(uint64_t channel_layout, + uint64_t channel); + +/** + * Get the channel with the given index in channel_layout. + */ +uint64_t av_channel_layout_extract_channel(uint64_t channel_layout, int index); + +/** + * Get the name of a given channel. + * + * @return channel name on success, NULL on error. + */ +const char *av_get_channel_name(uint64_t channel); + +/** + * Get the description of a given channel. + * + * @param channel a channel layout with a single channel + * @return channel description on success, NULL on error + */ +const char *av_get_channel_description(uint64_t channel); + +/** + * Get the value and name of a standard channel layout. + * + * @param[in] index index in an internal list, starting at 0 + * @param[out] layout channel layout mask + * @param[out] name name of the layout + * @return 0 if the layout exists, + * <0 if index is beyond the limits + */ +int av_get_standard_channel_layout(unsigned index, uint64_t *layout, + const char **name); + +/** + * @} + * @} + */ + +#endif /* AVUTIL_CHANNEL_LAYOUT_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/common.h b/third-party/FFmpeg-iOS/include/libavutil/common.h new file mode 100644 index 0000000000..c9b3f21a44 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/common.h @@ -0,0 +1,530 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * common internal and external API header + */ + +#ifndef AVUTIL_COMMON_H +#define AVUTIL_COMMON_H + +#if defined(__cplusplus) && !defined(__STDC_CONSTANT_MACROS) && !defined(UINT64_C) +#error missing -D__STDC_CONSTANT_MACROS / #define __STDC_CONSTANT_MACROS +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "attributes.h" +#include "macros.h" +#include "version.h" +#include "../libavutil/avconfig.h" + +#if AV_HAVE_BIGENDIAN +# define AV_NE(be, le) (be) +#else +# define AV_NE(be, le) (le) +#endif + +//rounded division & shift +#define RSHIFT(a,b) ((a) > 0 ? ((a) + ((1<<(b))>>1))>>(b) : ((a) + ((1<<(b))>>1)-1)>>(b)) +/* assume b>0 */ +#define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) +/* Fast a/(1<=0 and b>=0 */ +#define AV_CEIL_RSHIFT(a,b) (!av_builtin_constant_p(b) ? -((-(a)) >> (b)) \ + : ((a) + (1<<(b)) - 1) >> (b)) +/* Backwards compat. */ +#define FF_CEIL_RSHIFT AV_CEIL_RSHIFT + +#define FFUDIV(a,b) (((a)>0 ?(a):(a)-(b)+1) / (b)) +#define FFUMOD(a,b) ((a)-(b)*FFUDIV(a,b)) + +/** + * Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they + * are not representable as absolute values of their type. This is the same + * as with *abs() + * @see FFNABS() + */ +#define FFABS(a) ((a) >= 0 ? (a) : (-(a))) +#define FFSIGN(a) ((a) > 0 ? 1 : -1) + +/** + * Negative Absolute value. + * this works for all integers of all types. + * As with many macros, this evaluates its argument twice, it thus must not have + * a sideeffect, that is FFNABS(x++) has undefined behavior. + */ +#define FFNABS(a) ((a) <= 0 ? (a) : (-(a))) + +/** + * Comparator. + * For two numerical expressions x and y, gives 1 if x > y, -1 if x < y, and 0 + * if x == y. This is useful for instance in a qsort comparator callback. + * Furthermore, compilers are able to optimize this to branchless code, and + * there is no risk of overflow with signed types. + * As with many macros, this evaluates its argument multiple times, it thus + * must not have a side-effect. + */ +#define FFDIFFSIGN(x,y) (((x)>(y)) - ((x)<(y))) + +#define FFMAX(a,b) ((a) > (b) ? (a) : (b)) +#define FFMAX3(a,b,c) FFMAX(FFMAX(a,b),c) +#define FFMIN(a,b) ((a) > (b) ? (b) : (a)) +#define FFMIN3(a,b,c) FFMIN(FFMIN(a,b),c) + +#define FFSWAP(type,a,b) do{type SWAP_tmp= b; b= a; a= SWAP_tmp;}while(0) +#define FF_ARRAY_ELEMS(a) (sizeof(a) / sizeof((a)[0])) + +/* misc math functions */ + +#ifdef HAVE_AV_CONFIG_H +# include "config.h" +# include "intmath.h" +#endif + +/* Pull in unguarded fallback defines at the end of this file. */ +#include "common.h" + +#ifndef av_log2 +av_const int av_log2(unsigned v); +#endif + +#ifndef av_log2_16bit +av_const int av_log2_16bit(unsigned v); +#endif + +/** + * Clip a signed integer value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const int av_clip_c(int a, int amin, int amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a signed 64bit integer value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const int64_t av_clip64_c(int64_t a, int64_t amin, int64_t amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a signed integer value into the 0-255 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const uint8_t av_clip_uint8_c(int a) +{ + if (a&(~0xFF)) return (-a)>>31; + else return a; +} + +/** + * Clip a signed integer value into the -128,127 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int8_t av_clip_int8_c(int a) +{ + if ((a+0x80U) & ~0xFF) return (a>>31) ^ 0x7F; + else return a; +} + +/** + * Clip a signed integer value into the 0-65535 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const uint16_t av_clip_uint16_c(int a) +{ + if (a&(~0xFFFF)) return (-a)>>31; + else return a; +} + +/** + * Clip a signed integer value into the -32768,32767 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int16_t av_clip_int16_c(int a) +{ + if ((a+0x8000U) & ~0xFFFF) return (a>>31) ^ 0x7FFF; + else return a; +} + +/** + * Clip a signed 64-bit integer value into the -2147483648,2147483647 range. + * @param a value to clip + * @return clipped value + */ +static av_always_inline av_const int32_t av_clipl_int32_c(int64_t a) +{ + if ((a+0x80000000u) & ~UINT64_C(0xFFFFFFFF)) return (int32_t)((a>>63) ^ 0x7FFFFFFF); + else return (int32_t)a; +} + +/** + * Clip a signed integer into the -(2^p),(2^p-1) range. + * @param a value to clip + * @param p bit position to clip at + * @return clipped value + */ +static av_always_inline av_const int av_clip_intp2_c(int a, int p) +{ + if (((unsigned)a + (1 << p)) & ~((2 << p) - 1)) + return (a >> 31) ^ ((1 << p) - 1); + else + return a; +} + +/** + * Clip a signed integer to an unsigned power of two range. + * @param a value to clip + * @param p bit position to clip at + * @return clipped value + */ +static av_always_inline av_const unsigned av_clip_uintp2_c(int a, int p) +{ + if (a & ~((1<> 31 & ((1<= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** + * Clip a double value into the amin-amax range. + * @param a value to clip + * @param amin minimum value of the clip range + * @param amax maximum value of the clip range + * @return clipped value + */ +static av_always_inline av_const double av_clipd_c(double a, double amin, double amax) +{ +#if defined(HAVE_AV_CONFIG_H) && defined(ASSERT_LEVEL) && ASSERT_LEVEL >= 2 + if (amin > amax) abort(); +#endif + if (a < amin) return amin; + else if (a > amax) return amax; + else return a; +} + +/** Compute ceil(log2(x)). + * @param x value used to compute ceil(log2(x)) + * @return computed ceiling of log2(x) + */ +static av_always_inline av_const int av_ceil_log2_c(int x) +{ + return av_log2((x - 1) << 1); +} + +/** + * Count number of bits set to one in x + * @param x value to count bits of + * @return the number of bits set to one in x + */ +static av_always_inline av_const int av_popcount_c(uint32_t x) +{ + x -= (x >> 1) & 0x55555555; + x = (x & 0x33333333) + ((x >> 2) & 0x33333333); + x = (x + (x >> 4)) & 0x0F0F0F0F; + x += x >> 8; + return (x + (x >> 16)) & 0x3F; +} + +/** + * Count number of bits set to one in x + * @param x value to count bits of + * @return the number of bits set to one in x + */ +static av_always_inline av_const int av_popcount64_c(uint64_t x) +{ + return av_popcount((uint32_t)x) + av_popcount((uint32_t)(x >> 32)); +} + +static av_always_inline av_const int av_parity_c(uint32_t v) +{ + return av_popcount(v) & 1; +} + +#define MKTAG(a,b,c,d) ((a) | ((b) << 8) | ((c) << 16) | ((unsigned)(d) << 24)) +#define MKBETAG(a,b,c,d) ((d) | ((c) << 8) | ((b) << 16) | ((unsigned)(a) << 24)) + +/** + * Convert a UTF-8 character (up to 4 bytes) to its 32-bit UCS-4 encoded form. + * + * @param val Output value, must be an lvalue of type uint32_t. + * @param GET_BYTE Expression reading one byte from the input. + * Evaluated up to 7 times (4 for the currently + * assigned Unicode range). With a memory buffer + * input, this could be *ptr++. + * @param ERROR Expression to be evaluated on invalid input, + * typically a goto statement. + * + * @warning ERROR should not contain a loop control statement which + * could interact with the internal while loop, and should force an + * exit from the macro code (e.g. through a goto or a return) in order + * to prevent undefined results. + */ +#define GET_UTF8(val, GET_BYTE, ERROR)\ + val= (GET_BYTE);\ + {\ + uint32_t top = (val & 128) >> 1;\ + if ((val & 0xc0) == 0x80 || val >= 0xFE)\ + ERROR\ + while (val & top) {\ + int tmp= (GET_BYTE) - 128;\ + if(tmp>>6)\ + ERROR\ + val= (val<<6) + tmp;\ + top <<= 5;\ + }\ + val &= (top << 1) - 1;\ + } + +/** + * Convert a UTF-16 character (2 or 4 bytes) to its 32-bit UCS-4 encoded form. + * + * @param val Output value, must be an lvalue of type uint32_t. + * @param GET_16BIT Expression returning two bytes of UTF-16 data converted + * to native byte order. Evaluated one or two times. + * @param ERROR Expression to be evaluated on invalid input, + * typically a goto statement. + */ +#define GET_UTF16(val, GET_16BIT, ERROR)\ + val = GET_16BIT;\ + {\ + unsigned int hi = val - 0xD800;\ + if (hi < 0x800) {\ + val = GET_16BIT - 0xDC00;\ + if (val > 0x3FFU || hi > 0x3FFU)\ + ERROR\ + val += (hi<<10) + 0x10000;\ + }\ + }\ + +/** + * @def PUT_UTF8(val, tmp, PUT_BYTE) + * Convert a 32-bit Unicode character to its UTF-8 encoded form (up to 4 bytes long). + * @param val is an input-only argument and should be of type uint32_t. It holds + * a UCS-4 encoded Unicode character that is to be converted to UTF-8. If + * val is given as a function it is executed only once. + * @param tmp is a temporary variable and should be of type uint8_t. It + * represents an intermediate value during conversion that is to be + * output by PUT_BYTE. + * @param PUT_BYTE writes the converted UTF-8 bytes to any proper destination. + * It could be a function or a statement, and uses tmp as the input byte. + * For example, PUT_BYTE could be "*output++ = tmp;" PUT_BYTE will be + * executed up to 4 times for values in the valid UTF-8 range and up to + * 7 times in the general case, depending on the length of the converted + * Unicode character. + */ +#define PUT_UTF8(val, tmp, PUT_BYTE)\ + {\ + int bytes, shift;\ + uint32_t in = val;\ + if (in < 0x80) {\ + tmp = in;\ + PUT_BYTE\ + } else {\ + bytes = (av_log2(in) + 4) / 5;\ + shift = (bytes - 1) * 6;\ + tmp = (256 - (256 >> bytes)) | (in >> shift);\ + PUT_BYTE\ + while (shift >= 6) {\ + shift -= 6;\ + tmp = 0x80 | ((in >> shift) & 0x3f);\ + PUT_BYTE\ + }\ + }\ + } + +/** + * @def PUT_UTF16(val, tmp, PUT_16BIT) + * Convert a 32-bit Unicode character to its UTF-16 encoded form (2 or 4 bytes). + * @param val is an input-only argument and should be of type uint32_t. It holds + * a UCS-4 encoded Unicode character that is to be converted to UTF-16. If + * val is given as a function it is executed only once. + * @param tmp is a temporary variable and should be of type uint16_t. It + * represents an intermediate value during conversion that is to be + * output by PUT_16BIT. + * @param PUT_16BIT writes the converted UTF-16 data to any proper destination + * in desired endianness. It could be a function or a statement, and uses tmp + * as the input byte. For example, PUT_BYTE could be "*output++ = tmp;" + * PUT_BYTE will be executed 1 or 2 times depending on input character. + */ +#define PUT_UTF16(val, tmp, PUT_16BIT)\ + {\ + uint32_t in = val;\ + if (in < 0x10000) {\ + tmp = in;\ + PUT_16BIT\ + } else {\ + tmp = 0xD800 | ((in - 0x10000) >> 10);\ + PUT_16BIT\ + tmp = 0xDC00 | ((in - 0x10000) & 0x3FF);\ + PUT_16BIT\ + }\ + }\ + + + +#include "mem.h" + +#ifdef HAVE_AV_CONFIG_H +# include "internal.h" +#endif /* HAVE_AV_CONFIG_H */ + +#endif /* AVUTIL_COMMON_H */ + +/* + * The following definitions are outside the multiple inclusion guard + * to ensure they are immediately available in intmath.h. + */ + +#ifndef av_ceil_log2 +# define av_ceil_log2 av_ceil_log2_c +#endif +#ifndef av_clip +# define av_clip av_clip_c +#endif +#ifndef av_clip64 +# define av_clip64 av_clip64_c +#endif +#ifndef av_clip_uint8 +# define av_clip_uint8 av_clip_uint8_c +#endif +#ifndef av_clip_int8 +# define av_clip_int8 av_clip_int8_c +#endif +#ifndef av_clip_uint16 +# define av_clip_uint16 av_clip_uint16_c +#endif +#ifndef av_clip_int16 +# define av_clip_int16 av_clip_int16_c +#endif +#ifndef av_clipl_int32 +# define av_clipl_int32 av_clipl_int32_c +#endif +#ifndef av_clip_intp2 +# define av_clip_intp2 av_clip_intp2_c +#endif +#ifndef av_clip_uintp2 +# define av_clip_uintp2 av_clip_uintp2_c +#endif +#ifndef av_mod_uintp2 +# define av_mod_uintp2 av_mod_uintp2_c +#endif +#ifndef av_sat_add32 +# define av_sat_add32 av_sat_add32_c +#endif +#ifndef av_sat_dadd32 +# define av_sat_dadd32 av_sat_dadd32_c +#endif +#ifndef av_clipf +# define av_clipf av_clipf_c +#endif +#ifndef av_clipd +# define av_clipd av_clipd_c +#endif +#ifndef av_popcount +# define av_popcount av_popcount_c +#endif +#ifndef av_popcount64 +# define av_popcount64 av_popcount64_c +#endif +#ifndef av_parity +# define av_parity av_parity_c +#endif diff --git a/third-party/FFmpeg-iOS/include/libavutil/cpu.h b/third-party/FFmpeg-iOS/include/libavutil/cpu.h new file mode 100644 index 0000000000..4bff16714a --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/cpu.h @@ -0,0 +1,117 @@ +/* + * Copyright (c) 2000, 2001, 2002 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CPU_H +#define AVUTIL_CPU_H + +#include "attributes.h" + +#define AV_CPU_FLAG_FORCE 0x80000000 /* force usage of selected flags (OR) */ + + /* lower 16 bits - CPU features */ +#define AV_CPU_FLAG_MMX 0x0001 ///< standard MMX +#define AV_CPU_FLAG_MMXEXT 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_MMX2 0x0002 ///< SSE integer functions or AMD MMX ext +#define AV_CPU_FLAG_3DNOW 0x0004 ///< AMD 3DNOW +#define AV_CPU_FLAG_SSE 0x0008 ///< SSE functions +#define AV_CPU_FLAG_SSE2 0x0010 ///< PIV SSE2 functions +#define AV_CPU_FLAG_SSE2SLOW 0x40000000 ///< SSE2 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_3DNOWEXT 0x0020 ///< AMD 3DNowExt +#define AV_CPU_FLAG_SSE3 0x0040 ///< Prescott SSE3 functions +#define AV_CPU_FLAG_SSE3SLOW 0x20000000 ///< SSE3 supported, but usually not faster + ///< than regular MMX/SSE (e.g. Core1) +#define AV_CPU_FLAG_SSSE3 0x0080 ///< Conroe SSSE3 functions +#define AV_CPU_FLAG_ATOM 0x10000000 ///< Atom processor, some SSSE3 instructions are slower +#define AV_CPU_FLAG_SSE4 0x0100 ///< Penryn SSE4.1 functions +#define AV_CPU_FLAG_SSE42 0x0200 ///< Nehalem SSE4.2 functions +#define AV_CPU_FLAG_AESNI 0x80000 ///< Advanced Encryption Standard functions +#define AV_CPU_FLAG_AVX 0x4000 ///< AVX functions: requires OS support even if YMM registers aren't used +#define AV_CPU_FLAG_AVXSLOW 0x8000000 ///< AVX supported, but slow when using YMM registers (e.g. Bulldozer) +#define AV_CPU_FLAG_XOP 0x0400 ///< Bulldozer XOP functions +#define AV_CPU_FLAG_FMA4 0x0800 ///< Bulldozer FMA4 functions +#define AV_CPU_FLAG_CMOV 0x1000 ///< supports cmov instruction +#define AV_CPU_FLAG_AVX2 0x8000 ///< AVX2 functions: requires OS support even if YMM registers aren't used +#define AV_CPU_FLAG_FMA3 0x10000 ///< Haswell FMA3 functions +#define AV_CPU_FLAG_BMI1 0x20000 ///< Bit Manipulation Instruction Set 1 +#define AV_CPU_FLAG_BMI2 0x40000 ///< Bit Manipulation Instruction Set 2 + +#define AV_CPU_FLAG_ALTIVEC 0x0001 ///< standard +#define AV_CPU_FLAG_VSX 0x0002 ///< ISA 2.06 +#define AV_CPU_FLAG_POWER8 0x0004 ///< ISA 2.07 + +#define AV_CPU_FLAG_ARMV5TE (1 << 0) +#define AV_CPU_FLAG_ARMV6 (1 << 1) +#define AV_CPU_FLAG_ARMV6T2 (1 << 2) +#define AV_CPU_FLAG_VFP (1 << 3) +#define AV_CPU_FLAG_VFPV3 (1 << 4) +#define AV_CPU_FLAG_NEON (1 << 5) +#define AV_CPU_FLAG_ARMV8 (1 << 6) +#define AV_CPU_FLAG_VFP_VM (1 << 7) ///< VFPv2 vector mode, deprecated in ARMv7-A and unavailable in various CPUs implementations +#define AV_CPU_FLAG_SETEND (1 <<16) + +/** + * Return the flags which specify extensions supported by the CPU. + * The returned value is affected by av_force_cpu_flags() if that was used + * before. So av_get_cpu_flags() can easily be used in an application to + * detect the enabled cpu flags. + */ +int av_get_cpu_flags(void); + +/** + * Disables cpu detection and forces the specified flags. + * -1 is a special case that disables forcing of specific flags. + */ +void av_force_cpu_flags(int flags); + +/** + * Set a mask on flags returned by av_get_cpu_flags(). + * This function is mainly useful for testing. + * Please use av_force_cpu_flags() and av_get_cpu_flags() instead which are more flexible + * + * @warning this function is not thread safe. + */ +attribute_deprecated void av_set_cpu_flags_mask(int mask); + +/** + * Parse CPU flags from a string. + * + * The returned flags contain the specified flags as well as related unspecified flags. + * + * This function exists only for compatibility with libav. + * Please use av_parse_cpu_caps() when possible. + * @return a combination of AV_CPU_* flags, negative on error. + */ +attribute_deprecated +int av_parse_cpu_flags(const char *s); + +/** + * Parse CPU caps from a string and update the given AV_CPU_* flags based on that. + * + * @return negative on error. + */ +int av_parse_cpu_caps(unsigned *flags, const char *s); + +/** + * @return the number of logical CPU cores present. + */ +int av_cpu_count(void); + +#endif /* AVUTIL_CPU_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/crc.h b/third-party/FFmpeg-iOS/include/libavutil/crc.h new file mode 100644 index 0000000000..ef8a7137e4 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/crc.h @@ -0,0 +1,91 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_CRC_H +#define AVUTIL_CRC_H + +#include +#include +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_crc32 CRC32 + * @ingroup lavu_crypto + * @{ + */ + +typedef uint32_t AVCRC; + +typedef enum { + AV_CRC_8_ATM, + AV_CRC_16_ANSI, + AV_CRC_16_CCITT, + AV_CRC_32_IEEE, + AV_CRC_32_IEEE_LE, /*< reversed bitorder version of AV_CRC_32_IEEE */ + AV_CRC_16_ANSI_LE, /*< reversed bitorder version of AV_CRC_16_ANSI */ +#if FF_API_CRC_BIG_TABLE + AV_CRC_24_IEEE = 12, +#else + AV_CRC_24_IEEE, +#endif /* FF_API_CRC_BIG_TABLE */ + AV_CRC_MAX, /*< Not part of public API! Do not use outside libavutil. */ +}AVCRCId; + +/** + * Initialize a CRC table. + * @param ctx must be an array of size sizeof(AVCRC)*257 or sizeof(AVCRC)*1024 + * @param le If 1, the lowest bit represents the coefficient for the highest + * exponent of the corresponding polynomial (both for poly and + * actual CRC). + * If 0, you must swap the CRC parameter and the result of av_crc + * if you need the standard representation (can be simplified in + * most cases to e.g. bswap16): + * av_bswap32(crc << (32-bits)) + * @param bits number of bits for the CRC + * @param poly generator polynomial without the x**bits coefficient, in the + * representation as specified by le + * @param ctx_size size of ctx in bytes + * @return <0 on failure + */ +int av_crc_init(AVCRC *ctx, int le, int bits, uint32_t poly, int ctx_size); + +/** + * Get an initialized standard CRC table. + * @param crc_id ID of a standard CRC + * @return a pointer to the CRC table or NULL on failure + */ +const AVCRC *av_crc_get_table(AVCRCId crc_id); + +/** + * Calculate the CRC of a block. + * @param crc CRC of previous blocks if any or initial value for CRC + * @return CRC updated with the data from the given block + * + * @see av_crc_init() "le" parameter + */ +uint32_t av_crc(const AVCRC *ctx, uint32_t crc, + const uint8_t *buffer, size_t length) av_pure; + +/** + * @} + */ + +#endif /* AVUTIL_CRC_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/des.h b/third-party/FFmpeg-iOS/include/libavutil/des.h new file mode 100644 index 0000000000..4cf11f5bca --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/des.h @@ -0,0 +1,77 @@ +/* + * DES encryption/decryption + * Copyright (c) 2007 Reimar Doeffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_DES_H +#define AVUTIL_DES_H + +#include + +/** + * @defgroup lavu_des DES + * @ingroup lavu_crypto + * @{ + */ + +typedef struct AVDES { + uint64_t round_keys[3][16]; + int triple_des; +} AVDES; + +/** + * Allocate an AVDES context. + */ +AVDES *av_des_alloc(void); + +/** + * @brief Initializes an AVDES context. + * + * @param key_bits must be 64 or 192 + * @param decrypt 0 for encryption/CBC-MAC, 1 for decryption + * @return zero on success, negative value otherwise + */ +int av_des_init(struct AVDES *d, const uint8_t *key, int key_bits, int decrypt); + +/** + * @brief Encrypts / decrypts using the DES algorithm. + * + * @param count number of 8 byte blocks + * @param dst destination array, can be equal to src, must be 8-byte aligned + * @param src source array, can be equal to dst, must be 8-byte aligned, may be NULL + * @param iv initialization vector for CBC mode, if NULL then ECB will be used, + * must be 8-byte aligned + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_des_crypt(struct AVDES *d, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); + +/** + * @brief Calculates CBC-MAC using the DES algorithm. + * + * @param count number of 8 byte blocks + * @param dst destination array, can be equal to src, must be 8-byte aligned + * @param src source array, can be equal to dst, must be 8-byte aligned, may be NULL + */ +void av_des_mac(struct AVDES *d, uint8_t *dst, const uint8_t *src, int count); + +/** + * @} + */ + +#endif /* AVUTIL_DES_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/dict.h b/third-party/FFmpeg-iOS/include/libavutil/dict.h new file mode 100644 index 0000000000..118f1f00ed --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/dict.h @@ -0,0 +1,200 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Public dictionary API. + * @deprecated + * AVDictionary is provided for compatibility with libav. It is both in + * implementation as well as API inefficient. It does not scale and is + * extremely slow with large dictionaries. + * It is recommended that new code uses our tree container from tree.c/h + * where applicable, which uses AVL trees to achieve O(log n) performance. + */ + +#ifndef AVUTIL_DICT_H +#define AVUTIL_DICT_H + +#include + +#include "version.h" + +/** + * @addtogroup lavu_dict AVDictionary + * @ingroup lavu_data + * + * @brief Simple key:value store + * + * @{ + * Dictionaries are used for storing key:value pairs. To create + * an AVDictionary, simply pass an address of a NULL pointer to + * av_dict_set(). NULL can be used as an empty dictionary wherever + * a pointer to an AVDictionary is required. + * Use av_dict_get() to retrieve an entry or iterate over all + * entries and finally av_dict_free() to free the dictionary + * and all its contents. + * + @code + AVDictionary *d = NULL; // "create" an empty dictionary + AVDictionaryEntry *t = NULL; + + av_dict_set(&d, "foo", "bar", 0); // add an entry + + char *k = av_strdup("key"); // if your strings are already allocated, + char *v = av_strdup("value"); // you can avoid copying them like this + av_dict_set(&d, k, v, AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL); + + while (t = av_dict_get(d, "", t, AV_DICT_IGNORE_SUFFIX)) { + <....> // iterate over all entries in d + } + av_dict_free(&d); + @endcode + */ + +#define AV_DICT_MATCH_CASE 1 /**< Only get an entry with exact-case key match. Only relevant in av_dict_get(). */ +#define AV_DICT_IGNORE_SUFFIX 2 /**< Return first entry in a dictionary whose first part corresponds to the search key, + ignoring the suffix of the found key string. Only relevant in av_dict_get(). */ +#define AV_DICT_DONT_STRDUP_KEY 4 /**< Take ownership of a key that's been + allocated with av_malloc() or another memory allocation function. */ +#define AV_DICT_DONT_STRDUP_VAL 8 /**< Take ownership of a value that's been + allocated with av_malloc() or another memory allocation function. */ +#define AV_DICT_DONT_OVERWRITE 16 ///< Don't overwrite existing entries. +#define AV_DICT_APPEND 32 /**< If the entry already exists, append to it. Note that no + delimiter is added, the strings are simply concatenated. */ +#define AV_DICT_MULTIKEY 64 /**< Allow to store several equal keys in the dictionary */ + +typedef struct AVDictionaryEntry { + char *key; + char *value; +} AVDictionaryEntry; + +typedef struct AVDictionary AVDictionary; + +/** + * Get a dictionary entry with matching key. + * + * The returned entry key or value must not be changed, or it will + * cause undefined behavior. + * + * To iterate through all the dictionary entries, you can set the matching key + * to the null string "" and set the AV_DICT_IGNORE_SUFFIX flag. + * + * @param prev Set to the previous matching element to find the next. + * If set to NULL the first matching element is returned. + * @param key matching key + * @param flags a collection of AV_DICT_* flags controlling how the entry is retrieved + * @return found entry or NULL in case no matching entry was found in the dictionary + */ +AVDictionaryEntry *av_dict_get(const AVDictionary *m, const char *key, + const AVDictionaryEntry *prev, int flags); + +/** + * Get number of entries in dictionary. + * + * @param m dictionary + * @return number of entries in dictionary + */ +int av_dict_count(const AVDictionary *m); + +/** + * Set the given entry in *pm, overwriting an existing entry. + * + * Note: If AV_DICT_DONT_STRDUP_KEY or AV_DICT_DONT_STRDUP_VAL is set, + * these arguments will be freed on error. + * + * Warning: Adding a new entry to a dictionary invalidates all existing entries + * previously returned with av_dict_get. + * + * @param pm pointer to a pointer to a dictionary struct. If *pm is NULL + * a dictionary struct is allocated and put in *pm. + * @param key entry key to add to *pm (will either be av_strduped or added as a new key depending on flags) + * @param value entry value to add to *pm (will be av_strduped or added as a new key depending on flags). + * Passing a NULL value will cause an existing entry to be deleted. + * @return >= 0 on success otherwise an error code <0 + */ +int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags); + +/** + * Convenience wrapper for av_dict_set that converts the value to a string + * and stores it. + * + * Note: If AV_DICT_DONT_STRDUP_KEY is set, key will be freed on error. + */ +int av_dict_set_int(AVDictionary **pm, const char *key, int64_t value, int flags); + +/** + * Parse the key/value pairs list and add the parsed entries to a dictionary. + * + * In case of failure, all the successfully set entries are stored in + * *pm. You may need to manually free the created dictionary. + * + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other + * @param flags flags to use when adding to dictionary. + * AV_DICT_DONT_STRDUP_KEY and AV_DICT_DONT_STRDUP_VAL + * are ignored since the key/value tokens will always + * be duplicated. + * @return 0 on success, negative AVERROR code on failure + */ +int av_dict_parse_string(AVDictionary **pm, const char *str, + const char *key_val_sep, const char *pairs_sep, + int flags); + +/** + * Copy entries from one AVDictionary struct into another. + * @param dst pointer to a pointer to a AVDictionary struct. If *dst is NULL, + * this function will allocate a struct for you and put it in *dst + * @param src pointer to source AVDictionary struct + * @param flags flags to use when setting entries in *dst + * @note metadata is read using the AV_DICT_IGNORE_SUFFIX flag + * @return 0 on success, negative AVERROR code on failure. If dst was allocated + * by this function, callers should free the associated memory. + */ +int av_dict_copy(AVDictionary **dst, const AVDictionary *src, int flags); + +/** + * Free all the memory allocated for an AVDictionary struct + * and all keys and values. + */ +void av_dict_free(AVDictionary **m); + +/** + * Get dictionary entries as a string. + * + * Create a string containing dictionary's entries. + * Such string may be passed back to av_dict_parse_string(). + * @note String is escaped with backslashes ('\'). + * + * @param[in] m dictionary + * @param[out] buffer Pointer to buffer that will be allocated with string containg entries. + * Buffer must be freed by the caller when is no longer needed. + * @param[in] key_val_sep character used to separate key from value + * @param[in] pairs_sep character used to separate two pairs from each other + * @return >= 0 on success, negative on error + * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same. + */ +int av_dict_get_string(const AVDictionary *m, char **buffer, + const char key_val_sep, const char pairs_sep); + +/** + * @} + */ + +#endif /* AVUTIL_DICT_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/display.h b/third-party/FFmpeg-iOS/include/libavutil/display.h new file mode 100644 index 0000000000..39c15ee6b8 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/display.h @@ -0,0 +1,87 @@ +/* + * Copyright (c) 2014 Vittorio Giovara + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_DISPLAY_H +#define AVUTIL_DISPLAY_H + +#include +#include "common.h" + +/** + * The display transformation matrix specifies an affine transformation that + * should be applied to video frames for correct presentation. It is compatible + * with the matrices stored in the ISO/IEC 14496-12 container format. + * + * The data is a 3x3 matrix represented as a 9-element array: + * + * | a b u | + * (a, b, u, c, d, v, x, y, w) -> | c d v | + * | x y w | + * + * All numbers are stored in native endianness, as 16.16 fixed-point values, + * except for u, v and w, which are stored as 2.30 fixed-point values. + * + * The transformation maps a point (p, q) in the source (pre-transformation) + * frame to the point (p', q') in the destination (post-transformation) frame as + * follows: + * | a b u | + * (p, q, 1) . | c d v | = z * (p', q', 1) + * | x y w | + * + * The transformation can also be more explicitly written in components as + * follows: + * p' = (a * p + c * q + x) / z; + * q' = (b * p + d * q + y) / z; + * z = u * p + v * q + w + */ + +/** + * Extract the rotation component of the transformation matrix. + * + * @param matrix the transformation matrix + * @return the angle (in degrees) by which the transformation rotates the frame + * counterclockwise. The angle will be in range [-180.0, 180.0], + * or NaN if the matrix is singular. + * + * @note floating point numbers are inherently inexact, so callers are + * recommended to round the return value to nearest integer before use. + */ +double av_display_rotation_get(const int32_t matrix[9]); + +/** + * Initialize a transformation matrix describing a pure counterclockwise + * rotation by the specified angle (in degrees). + * + * @param matrix an allocated transformation matrix (will be fully overwritten + * by this function) + * @param angle rotation angle in degrees. + */ +void av_display_rotation_set(int32_t matrix[9], double angle); + +/** + * Flip the input matrix horizontally and/or vertically. + * + * @param matrix an allocated transformation matrix + * @param hflip whether the matrix should be flipped horizontally + * @param vflip whether the matrix should be flipped vertically + */ +void av_display_matrix_flip(int32_t matrix[9], int hflip, int vflip); + +#endif /* AVUTIL_DISPLAY_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/downmix_info.h b/third-party/FFmpeg-iOS/include/libavutil/downmix_info.h new file mode 100644 index 0000000000..221cf5bf9b --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/downmix_info.h @@ -0,0 +1,115 @@ +/* + * Copyright (c) 2014 Tim Walker + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_DOWNMIX_INFO_H +#define AVUTIL_DOWNMIX_INFO_H + +#include "frame.h" + +/** + * @file + * audio downmix medatata + */ + +/** + * @addtogroup lavu_audio + * @{ + */ + +/** + * @defgroup downmix_info Audio downmix metadata + * @{ + */ + +/** + * Possible downmix types. + */ +enum AVDownmixType { + AV_DOWNMIX_TYPE_UNKNOWN, /**< Not indicated. */ + AV_DOWNMIX_TYPE_LORO, /**< Lo/Ro 2-channel downmix (Stereo). */ + AV_DOWNMIX_TYPE_LTRT, /**< Lt/Rt 2-channel downmix, Dolby Surround compatible. */ + AV_DOWNMIX_TYPE_DPLII, /**< Lt/Rt 2-channel downmix, Dolby Pro Logic II compatible. */ + AV_DOWNMIX_TYPE_NB /**< Number of downmix types. Not part of ABI. */ +}; + +/** + * This structure describes optional metadata relevant to a downmix procedure. + * + * All fields are set by the decoder to the value indicated in the audio + * bitstream (if present), or to a "sane" default otherwise. + */ +typedef struct AVDownmixInfo { + /** + * Type of downmix preferred by the mastering engineer. + */ + enum AVDownmixType preferred_downmix_type; + + /** + * Absolute scale factor representing the nominal level of the center + * channel during a regular downmix. + */ + double center_mix_level; + + /** + * Absolute scale factor representing the nominal level of the center + * channel during an Lt/Rt compatible downmix. + */ + double center_mix_level_ltrt; + + /** + * Absolute scale factor representing the nominal level of the surround + * channels during a regular downmix. + */ + double surround_mix_level; + + /** + * Absolute scale factor representing the nominal level of the surround + * channels during an Lt/Rt compatible downmix. + */ + double surround_mix_level_ltrt; + + /** + * Absolute scale factor representing the level at which the LFE data is + * mixed into L/R channels during downmixing. + */ + double lfe_mix_level; +} AVDownmixInfo; + +/** + * Get a frame's AV_FRAME_DATA_DOWNMIX_INFO side data for editing. + * + * If the side data is absent, it is created and added to the frame. + * + * @param frame the frame for which the side data is to be obtained or created + * + * @return the AVDownmixInfo structure to be edited by the caller, or NULL if + * the structure cannot be allocated. + */ +AVDownmixInfo *av_downmix_info_update_side_data(AVFrame *frame); + +/** + * @} + */ + +/** + * @} + */ + +#endif /* AVUTIL_DOWNMIX_INFO_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/error.h b/third-party/FFmpeg-iOS/include/libavutil/error.h new file mode 100644 index 0000000000..71df4da353 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/error.h @@ -0,0 +1,126 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * error code definitions + */ + +#ifndef AVUTIL_ERROR_H +#define AVUTIL_ERROR_H + +#include +#include + +/** + * @addtogroup lavu_error + * + * @{ + */ + + +/* error handling */ +#if EDOM > 0 +#define AVERROR(e) (-(e)) ///< Returns a negative error code from a POSIX error code, to return from library functions. +#define AVUNERROR(e) (-(e)) ///< Returns a POSIX error code from a library function error return value. +#else +/* Some platforms have E* and errno already negated. */ +#define AVERROR(e) (e) +#define AVUNERROR(e) (e) +#endif + +#define FFERRTAG(a, b, c, d) (-(int)MKTAG(a, b, c, d)) + +#define AVERROR_BSF_NOT_FOUND FFERRTAG(0xF8,'B','S','F') ///< Bitstream filter not found +#define AVERROR_BUG FFERRTAG( 'B','U','G','!') ///< Internal bug, also see AVERROR_BUG2 +#define AVERROR_BUFFER_TOO_SMALL FFERRTAG( 'B','U','F','S') ///< Buffer too small +#define AVERROR_DECODER_NOT_FOUND FFERRTAG(0xF8,'D','E','C') ///< Decoder not found +#define AVERROR_DEMUXER_NOT_FOUND FFERRTAG(0xF8,'D','E','M') ///< Demuxer not found +#define AVERROR_ENCODER_NOT_FOUND FFERRTAG(0xF8,'E','N','C') ///< Encoder not found +#define AVERROR_EOF FFERRTAG( 'E','O','F',' ') ///< End of file +#define AVERROR_EXIT FFERRTAG( 'E','X','I','T') ///< Immediate exit was requested; the called function should not be restarted +#define AVERROR_EXTERNAL FFERRTAG( 'E','X','T',' ') ///< Generic error in an external library +#define AVERROR_FILTER_NOT_FOUND FFERRTAG(0xF8,'F','I','L') ///< Filter not found +#define AVERROR_INVALIDDATA FFERRTAG( 'I','N','D','A') ///< Invalid data found when processing input +#define AVERROR_MUXER_NOT_FOUND FFERRTAG(0xF8,'M','U','X') ///< Muxer not found +#define AVERROR_OPTION_NOT_FOUND FFERRTAG(0xF8,'O','P','T') ///< Option not found +#define AVERROR_PATCHWELCOME FFERRTAG( 'P','A','W','E') ///< Not yet implemented in FFmpeg, patches welcome +#define AVERROR_PROTOCOL_NOT_FOUND FFERRTAG(0xF8,'P','R','O') ///< Protocol not found + +#define AVERROR_STREAM_NOT_FOUND FFERRTAG(0xF8,'S','T','R') ///< Stream not found +/** + * This is semantically identical to AVERROR_BUG + * it has been introduced in Libav after our AVERROR_BUG and with a modified value. + */ +#define AVERROR_BUG2 FFERRTAG( 'B','U','G',' ') +#define AVERROR_UNKNOWN FFERRTAG( 'U','N','K','N') ///< Unknown error, typically from an external library +#define AVERROR_EXPERIMENTAL (-0x2bb2afa8) ///< Requested feature is flagged experimental. Set strict_std_compliance if you really want to use it. +#define AVERROR_INPUT_CHANGED (-0x636e6701) ///< Input changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_OUTPUT_CHANGED) +#define AVERROR_OUTPUT_CHANGED (-0x636e6702) ///< Output changed between calls. Reconfiguration is required. (can be OR-ed with AVERROR_INPUT_CHANGED) +/* HTTP & RTSP errors */ +#define AVERROR_HTTP_BAD_REQUEST FFERRTAG(0xF8,'4','0','0') +#define AVERROR_HTTP_UNAUTHORIZED FFERRTAG(0xF8,'4','0','1') +#define AVERROR_HTTP_FORBIDDEN FFERRTAG(0xF8,'4','0','3') +#define AVERROR_HTTP_NOT_FOUND FFERRTAG(0xF8,'4','0','4') +#define AVERROR_HTTP_OTHER_4XX FFERRTAG(0xF8,'4','X','X') +#define AVERROR_HTTP_SERVER_ERROR FFERRTAG(0xF8,'5','X','X') + +#define AV_ERROR_MAX_STRING_SIZE 64 + +/** + * Put a description of the AVERROR code errnum in errbuf. + * In case of failure the global variable errno is set to indicate the + * error. Even in case of failure av_strerror() will print a generic + * error message indicating the errnum provided to errbuf. + * + * @param errnum error code to describe + * @param errbuf buffer to which description is written + * @param errbuf_size the size in bytes of errbuf + * @return 0 on success, a negative value if a description for errnum + * cannot be found + */ +int av_strerror(int errnum, char *errbuf, size_t errbuf_size); + +/** + * Fill the provided buffer with a string containing an error string + * corresponding to the AVERROR code errnum. + * + * @param errbuf a buffer + * @param errbuf_size size in bytes of errbuf + * @param errnum error code to describe + * @return the buffer in input, filled with the error description + * @see av_strerror() + */ +static inline char *av_make_error_string(char *errbuf, size_t errbuf_size, int errnum) +{ + av_strerror(errnum, errbuf, errbuf_size); + return errbuf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_err2str(errnum) \ + av_make_error_string((char[AV_ERROR_MAX_STRING_SIZE]){0}, AV_ERROR_MAX_STRING_SIZE, errnum) + +/** + * @} + */ + +#endif /* AVUTIL_ERROR_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/eval.h b/third-party/FFmpeg-iOS/include/libavutil/eval.h new file mode 100644 index 0000000000..dacd22b96e --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/eval.h @@ -0,0 +1,113 @@ +/* + * Copyright (c) 2002 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * simple arithmetic expression evaluator + */ + +#ifndef AVUTIL_EVAL_H +#define AVUTIL_EVAL_H + +#include "avutil.h" + +typedef struct AVExpr AVExpr; + +/** + * Parse and evaluate an expression. + * Note, this is significantly slower than av_expr_eval(). + * + * @param res a pointer to a double where is put the result value of + * the expression, or NAN in case of error + * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" + * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} + * @param const_values a zero terminated array of values for the identifiers from const_names + * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers + * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument + * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers + * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments + * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 + * @param log_ctx parent logging context + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int av_expr_parse_and_eval(double *res, const char *s, + const char * const *const_names, const double *const_values, + const char * const *func1_names, double (* const *funcs1)(void *, double), + const char * const *func2_names, double (* const *funcs2)(void *, double, double), + void *opaque, int log_offset, void *log_ctx); + +/** + * Parse an expression. + * + * @param expr a pointer where is put an AVExpr containing the parsed + * value in case of successful parsing, or NULL otherwise. + * The pointed to AVExpr must be freed with av_expr_free() by the user + * when it is not needed anymore. + * @param s expression as a zero terminated string, for example "1+2^3+5*5+sin(2/3)" + * @param const_names NULL terminated array of zero terminated strings of constant identifiers, for example {"PI", "E", 0} + * @param func1_names NULL terminated array of zero terminated strings of funcs1 identifiers + * @param funcs1 NULL terminated array of function pointers for functions which take 1 argument + * @param func2_names NULL terminated array of zero terminated strings of funcs2 identifiers + * @param funcs2 NULL terminated array of function pointers for functions which take 2 arguments + * @param log_ctx parent logging context + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int av_expr_parse(AVExpr **expr, const char *s, + const char * const *const_names, + const char * const *func1_names, double (* const *funcs1)(void *, double), + const char * const *func2_names, double (* const *funcs2)(void *, double, double), + int log_offset, void *log_ctx); + +/** + * Evaluate a previously parsed expression. + * + * @param const_values a zero terminated array of values for the identifiers from av_expr_parse() const_names + * @param opaque a pointer which will be passed to all functions from funcs1 and funcs2 + * @return the value of the expression + */ +double av_expr_eval(AVExpr *e, const double *const_values, void *opaque); + +/** + * Free a parsed expression previously created with av_expr_parse(). + */ +void av_expr_free(AVExpr *e); + +/** + * Parse the string in numstr and return its value as a double. If + * the string is empty, contains only whitespaces, or does not contain + * an initial substring that has the expected syntax for a + * floating-point number, no conversion is performed. In this case, + * returns a value of zero and the value returned in tail is the value + * of numstr. + * + * @param numstr a string representing a number, may contain one of + * the International System number postfixes, for example 'K', 'M', + * 'G'. If 'i' is appended after the postfix, powers of 2 are used + * instead of powers of 10. The 'B' postfix multiplies the value by + * 8, and can be appended after another postfix or used alone. This + * allows using for example 'KB', 'MiB', 'G' and 'B' as postfix. + * @param tail if non-NULL puts here the pointer to the char next + * after the last parsed character + */ +double av_strtod(const char *numstr, char **tail); + +#endif /* AVUTIL_EVAL_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/ffversion.h b/third-party/FFmpeg-iOS/include/libavutil/ffversion.h new file mode 100644 index 0000000000..1c6356a94f --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/ffversion.h @@ -0,0 +1,5 @@ +/* Automatically generated by version.sh, do not manually edit! */ +#ifndef AVUTIL_FFVERSION_H +#define AVUTIL_FFVERSION_H +#define FFMPEG_VERSION "3.1.1" +#endif /* AVUTIL_FFVERSION_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/fifo.h b/third-party/FFmpeg-iOS/include/libavutil/fifo.h new file mode 100644 index 0000000000..dc7bc6f0dd --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/fifo.h @@ -0,0 +1,179 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * a very simple circular buffer FIFO implementation + */ + +#ifndef AVUTIL_FIFO_H +#define AVUTIL_FIFO_H + +#include +#include "avutil.h" +#include "attributes.h" + +typedef struct AVFifoBuffer { + uint8_t *buffer; + uint8_t *rptr, *wptr, *end; + uint32_t rndx, wndx; +} AVFifoBuffer; + +/** + * Initialize an AVFifoBuffer. + * @param size of FIFO + * @return AVFifoBuffer or NULL in case of memory allocation failure + */ +AVFifoBuffer *av_fifo_alloc(unsigned int size); + +/** + * Initialize an AVFifoBuffer. + * @param nmemb number of elements + * @param size size of the single element + * @return AVFifoBuffer or NULL in case of memory allocation failure + */ +AVFifoBuffer *av_fifo_alloc_array(size_t nmemb, size_t size); + +/** + * Free an AVFifoBuffer. + * @param f AVFifoBuffer to free + */ +void av_fifo_free(AVFifoBuffer *f); + +/** + * Free an AVFifoBuffer and reset pointer to NULL. + * @param f AVFifoBuffer to free + */ +void av_fifo_freep(AVFifoBuffer **f); + +/** + * Reset the AVFifoBuffer to the state right after av_fifo_alloc, in particular it is emptied. + * @param f AVFifoBuffer to reset + */ +void av_fifo_reset(AVFifoBuffer *f); + +/** + * Return the amount of data in bytes in the AVFifoBuffer, that is the + * amount of data you can read from it. + * @param f AVFifoBuffer to read from + * @return size + */ +int av_fifo_size(const AVFifoBuffer *f); + +/** + * Return the amount of space in bytes in the AVFifoBuffer, that is the + * amount of data you can write into it. + * @param f AVFifoBuffer to write into + * @return size + */ +int av_fifo_space(const AVFifoBuffer *f); + +/** + * Feed data at specific position from an AVFifoBuffer to a user-supplied callback. + * Similar as av_fifo_gereric_read but without discarding data. + * @param f AVFifoBuffer to read from + * @param offset offset from current read position + * @param buf_size number of bytes to read + * @param func generic read function + * @param dest data destination + */ +int av_fifo_generic_peek_at(AVFifoBuffer *f, void *dest, int offset, int buf_size, void (*func)(void*, void*, int)); + +/** + * Feed data from an AVFifoBuffer to a user-supplied callback. + * Similar as av_fifo_gereric_read but without discarding data. + * @param f AVFifoBuffer to read from + * @param buf_size number of bytes to read + * @param func generic read function + * @param dest data destination + */ +int av_fifo_generic_peek(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int)); + +/** + * Feed data from an AVFifoBuffer to a user-supplied callback. + * @param f AVFifoBuffer to read from + * @param buf_size number of bytes to read + * @param func generic read function + * @param dest data destination + */ +int av_fifo_generic_read(AVFifoBuffer *f, void *dest, int buf_size, void (*func)(void*, void*, int)); + +/** + * Feed data from a user-supplied callback to an AVFifoBuffer. + * @param f AVFifoBuffer to write to + * @param src data source; non-const since it may be used as a + * modifiable context by the function defined in func + * @param size number of bytes to write + * @param func generic write function; the first parameter is src, + * the second is dest_buf, the third is dest_buf_size. + * func must return the number of bytes written to dest_buf, or <= 0 to + * indicate no more data available to write. + * If func is NULL, src is interpreted as a simple byte array for source data. + * @return the number of bytes written to the FIFO + */ +int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int)); + +/** + * Resize an AVFifoBuffer. + * In case of reallocation failure, the old FIFO is kept unchanged. + * + * @param f AVFifoBuffer to resize + * @param size new AVFifoBuffer size in bytes + * @return <0 for failure, >=0 otherwise + */ +int av_fifo_realloc2(AVFifoBuffer *f, unsigned int size); + +/** + * Enlarge an AVFifoBuffer. + * In case of reallocation failure, the old FIFO is kept unchanged. + * The new fifo size may be larger than the requested size. + * + * @param f AVFifoBuffer to resize + * @param additional_space the amount of space in bytes to allocate in addition to av_fifo_size() + * @return <0 for failure, >=0 otherwise + */ +int av_fifo_grow(AVFifoBuffer *f, unsigned int additional_space); + +/** + * Read and discard the specified amount of data from an AVFifoBuffer. + * @param f AVFifoBuffer to read from + * @param size amount of data to read in bytes + */ +void av_fifo_drain(AVFifoBuffer *f, int size); + +/** + * Return a pointer to the data stored in a FIFO buffer at a certain offset. + * The FIFO buffer is not modified. + * + * @param f AVFifoBuffer to peek at, f must be non-NULL + * @param offs an offset in bytes, its absolute value must be less + * than the used buffer size or the returned pointer will + * point outside to the buffer data. + * The used buffer size can be checked with av_fifo_size(). + */ +static inline uint8_t *av_fifo_peek2(const AVFifoBuffer *f, int offs) +{ + uint8_t *ptr = f->rptr + offs; + if (ptr >= f->end) + ptr = f->buffer + (ptr - f->end); + else if (ptr < f->buffer) + ptr = f->end - (f->buffer - ptr); + return ptr; +} + +#endif /* AVUTIL_FIFO_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/file.h b/third-party/FFmpeg-iOS/include/libavutil/file.h new file mode 100644 index 0000000000..8666c7b1d5 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/file.h @@ -0,0 +1,69 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_FILE_H +#define AVUTIL_FILE_H + +#include + +#include "avutil.h" + +/** + * @file + * Misc file utilities. + */ + +/** + * Read the file with name filename, and put its content in a newly + * allocated buffer or map it with mmap() when available. + * In case of success set *bufptr to the read or mmapped buffer, and + * *size to the size in bytes of the buffer in *bufptr. + * The returned buffer must be released with av_file_unmap(). + * + * @param log_offset loglevel offset used for logging + * @param log_ctx context used for logging + * @return a non negative number in case of success, a negative value + * corresponding to an AVERROR error code in case of failure + */ +av_warn_unused_result +int av_file_map(const char *filename, uint8_t **bufptr, size_t *size, + int log_offset, void *log_ctx); + +/** + * Unmap or free the buffer bufptr created by av_file_map(). + * + * @param size size in bytes of bufptr, must be the same as returned + * by av_file_map() + */ +void av_file_unmap(uint8_t *bufptr, size_t size); + +/** + * Wrapper to work around the lack of mkstemp() on mingw. + * Also, tries to create file in /tmp first, if possible. + * *prefix can be a character constant; *filename will be allocated internally. + * @return file descriptor of opened file (or negative value corresponding to an + * AVERROR code on error) + * and opened file name in **filename. + * @note On very old libcs it is necessary to set a secure umask before + * calling this, av_tempfile() can't call umask itself as it is used in + * libraries and could interfere with the calling application. + * @deprecated as fd numbers cannot be passed saftely between libs on some platforms + */ +int av_tempfile(const char *prefix, char **filename, int log_offset, void *log_ctx); + +#endif /* AVUTIL_FILE_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/frame.h b/third-party/FFmpeg-iOS/include/libavutil/frame.h new file mode 100644 index 0000000000..2b5c3320c3 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/frame.h @@ -0,0 +1,746 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu_frame + * reference-counted frame API + */ + +#ifndef AVUTIL_FRAME_H +#define AVUTIL_FRAME_H + +#include + +#include "avutil.h" +#include "buffer.h" +#include "dict.h" +#include "rational.h" +#include "samplefmt.h" +#include "pixfmt.h" +#include "version.h" + + +/** + * @defgroup lavu_frame AVFrame + * @ingroup lavu_data + * + * @{ + * AVFrame is an abstraction for reference-counted raw multimedia data. + */ + +enum AVFrameSideDataType { + /** + * The data is the AVPanScan struct defined in libavcodec. + */ + AV_FRAME_DATA_PANSCAN, + /** + * ATSC A53 Part 4 Closed Captions. + * A53 CC bitstream is stored as uint8_t in AVFrameSideData.data. + * The number of bytes of CC data is AVFrameSideData.size. + */ + AV_FRAME_DATA_A53_CC, + /** + * Stereoscopic 3d metadata. + * The data is the AVStereo3D struct defined in libavutil/stereo3d.h. + */ + AV_FRAME_DATA_STEREO3D, + /** + * The data is the AVMatrixEncoding enum defined in libavutil/channel_layout.h. + */ + AV_FRAME_DATA_MATRIXENCODING, + /** + * Metadata relevant to a downmix procedure. + * The data is the AVDownmixInfo struct defined in libavutil/downmix_info.h. + */ + AV_FRAME_DATA_DOWNMIX_INFO, + /** + * ReplayGain information in the form of the AVReplayGain struct. + */ + AV_FRAME_DATA_REPLAYGAIN, + /** + * This side data contains a 3x3 transformation matrix describing an affine + * transformation that needs to be applied to the frame for correct + * presentation. + * + * See libavutil/display.h for a detailed description of the data. + */ + AV_FRAME_DATA_DISPLAYMATRIX, + /** + * Active Format Description data consisting of a single byte as specified + * in ETSI TS 101 154 using AVActiveFormatDescription enum. + */ + AV_FRAME_DATA_AFD, + /** + * Motion vectors exported by some codecs (on demand through the export_mvs + * flag set in the libavcodec AVCodecContext flags2 option). + * The data is the AVMotionVector struct defined in + * libavutil/motion_vector.h. + */ + AV_FRAME_DATA_MOTION_VECTORS, + /** + * Recommmends skipping the specified number of samples. This is exported + * only if the "skip_manual" AVOption is set in libavcodec. + * This has the same format as AV_PKT_DATA_SKIP_SAMPLES. + * @code + * u32le number of samples to skip from start of this packet + * u32le number of samples to skip from end of this packet + * u8 reason for start skip + * u8 reason for end skip (0=padding silence, 1=convergence) + * @endcode + */ + AV_FRAME_DATA_SKIP_SAMPLES, + /** + * This side data must be associated with an audio frame and corresponds to + * enum AVAudioServiceType defined in avcodec.h. + */ + AV_FRAME_DATA_AUDIO_SERVICE_TYPE, + /** + * Mastering display metadata associated with a video frame. The payload is + * an AVMasteringDisplayMetadata type and contains information about the + * mastering display color volume. + */ + AV_FRAME_DATA_MASTERING_DISPLAY_METADATA, + /** + * The GOP timecode in 25 bit timecode format. Data format is 64-bit integer. + * This is set on the first frame of a GOP that has a temporal reference of 0. + */ + AV_FRAME_DATA_GOP_TIMECODE +}; + +enum AVActiveFormatDescription { + AV_AFD_SAME = 8, + AV_AFD_4_3 = 9, + AV_AFD_16_9 = 10, + AV_AFD_14_9 = 11, + AV_AFD_4_3_SP_14_9 = 13, + AV_AFD_16_9_SP_14_9 = 14, + AV_AFD_SP_4_3 = 15, +}; + + +/** + * Structure to hold side data for an AVFrame. + * + * sizeof(AVFrameSideData) is not a part of the public ABI, so new fields may be added + * to the end with a minor bump. + */ +typedef struct AVFrameSideData { + enum AVFrameSideDataType type; + uint8_t *data; + int size; + AVDictionary *metadata; + AVBufferRef *buf; +} AVFrameSideData; + +/** + * This structure describes decoded (raw) audio or video data. + * + * AVFrame must be allocated using av_frame_alloc(). Note that this only + * allocates the AVFrame itself, the buffers for the data must be managed + * through other means (see below). + * AVFrame must be freed with av_frame_free(). + * + * AVFrame is typically allocated once and then reused multiple times to hold + * different data (e.g. a single AVFrame to hold frames received from a + * decoder). In such a case, av_frame_unref() will free any references held by + * the frame and reset it to its original clean state before it + * is reused again. + * + * The data described by an AVFrame is usually reference counted through the + * AVBuffer API. The underlying buffer references are stored in AVFrame.buf / + * AVFrame.extended_buf. An AVFrame is considered to be reference counted if at + * least one reference is set, i.e. if AVFrame.buf[0] != NULL. In such a case, + * every single data plane must be contained in one of the buffers in + * AVFrame.buf or AVFrame.extended_buf. + * There may be a single buffer for all the data, or one separate buffer for + * each plane, or anything in between. + * + * sizeof(AVFrame) is not a part of the public ABI, so new fields may be added + * to the end with a minor bump. + * Similarly fields that are marked as to be only accessed by + * av_opt_ptr() can be reordered. This allows 2 forks to add fields + * without breaking compatibility with each other. + * + * Fields can be accessed through AVOptions, the name string used, matches the + * C structure field name for fields accessable through AVOptions. The AVClass + * for AVFrame can be obtained from avcodec_get_frame_class() + */ +typedef struct AVFrame { +#define AV_NUM_DATA_POINTERS 8 + /** + * pointer to the picture/channel planes. + * This might be different from the first allocated byte + * + * Some decoders access areas outside 0,0 - width,height, please + * see avcodec_align_dimensions2(). Some filters and swscale can read + * up to 16 bytes beyond the planes, if these filters are to be used, + * then 16 extra bytes must be allocated. + * + * NOTE: Except for hwaccel formats, pointers not needed by the format + * MUST be set to NULL. + */ + uint8_t *data[AV_NUM_DATA_POINTERS]; + + /** + * For video, size in bytes of each picture line. + * For audio, size in bytes of each plane. + * + * For audio, only linesize[0] may be set. For planar audio, each channel + * plane must be the same size. + * + * For video the linesizes should be multiples of the CPUs alignment + * preference, this is 16 or 32 for modern desktop CPUs. + * Some code requires such alignment other code can be slower without + * correct alignment, for yet other it makes no difference. + * + * @note The linesize may be larger than the size of usable data -- there + * may be extra padding present for performance reasons. + */ + int linesize[AV_NUM_DATA_POINTERS]; + + /** + * pointers to the data planes/channels. + * + * For video, this should simply point to data[]. + * + * For planar audio, each channel has a separate data pointer, and + * linesize[0] contains the size of each channel buffer. + * For packed audio, there is just one data pointer, and linesize[0] + * contains the total size of the buffer for all channels. + * + * Note: Both data and extended_data should always be set in a valid frame, + * but for planar audio with more channels that can fit in data, + * extended_data must be used in order to access all channels. + */ + uint8_t **extended_data; + + /** + * width and height of the video frame + */ + int width, height; + + /** + * number of audio samples (per channel) described by this frame + */ + int nb_samples; + + /** + * format of the frame, -1 if unknown or unset + * Values correspond to enum AVPixelFormat for video frames, + * enum AVSampleFormat for audio) + */ + int format; + + /** + * 1 -> keyframe, 0-> not + */ + int key_frame; + + /** + * Picture type of the frame. + */ + enum AVPictureType pict_type; + + /** + * Sample aspect ratio for the video frame, 0/1 if unknown/unspecified. + */ + AVRational sample_aspect_ratio; + + /** + * Presentation timestamp in time_base units (time when frame should be shown to user). + */ + int64_t pts; + + /** + * PTS copied from the AVPacket that was decoded to produce this frame. + */ + int64_t pkt_pts; + + /** + * DTS copied from the AVPacket that triggered returning this frame. (if frame threading isn't used) + * This is also the Presentation time of this AVFrame calculated from + * only AVPacket.dts values without pts values. + */ + int64_t pkt_dts; + + /** + * picture number in bitstream order + */ + int coded_picture_number; + /** + * picture number in display order + */ + int display_picture_number; + + /** + * quality (between 1 (good) and FF_LAMBDA_MAX (bad)) + */ + int quality; + + /** + * for some private data of the user + */ + void *opaque; + +#if FF_API_ERROR_FRAME + /** + * @deprecated unused + */ + attribute_deprecated + uint64_t error[AV_NUM_DATA_POINTERS]; +#endif + + /** + * When decoding, this signals how much the picture must be delayed. + * extra_delay = repeat_pict / (2*fps) + */ + int repeat_pict; + + /** + * The content of the picture is interlaced. + */ + int interlaced_frame; + + /** + * If the content is interlaced, is top field displayed first. + */ + int top_field_first; + + /** + * Tell user application that palette has changed from previous frame. + */ + int palette_has_changed; + + /** + * reordered opaque 64 bits (generally an integer or a double precision float + * PTS but can be anything). + * The user sets AVCodecContext.reordered_opaque to represent the input at + * that time, + * the decoder reorders values as needed and sets AVFrame.reordered_opaque + * to exactly one of the values provided by the user through AVCodecContext.reordered_opaque + * @deprecated in favor of pkt_pts + */ + int64_t reordered_opaque; + + /** + * Sample rate of the audio data. + */ + int sample_rate; + + /** + * Channel layout of the audio data. + */ + uint64_t channel_layout; + + /** + * AVBuffer references backing the data for this frame. If all elements of + * this array are NULL, then this frame is not reference counted. This array + * must be filled contiguously -- if buf[i] is non-NULL then buf[j] must + * also be non-NULL for all j < i. + * + * There may be at most one AVBuffer per data plane, so for video this array + * always contains all the references. For planar audio with more than + * AV_NUM_DATA_POINTERS channels, there may be more buffers than can fit in + * this array. Then the extra AVBufferRef pointers are stored in the + * extended_buf array. + */ + AVBufferRef *buf[AV_NUM_DATA_POINTERS]; + + /** + * For planar audio which requires more than AV_NUM_DATA_POINTERS + * AVBufferRef pointers, this array will hold all the references which + * cannot fit into AVFrame.buf. + * + * Note that this is different from AVFrame.extended_data, which always + * contains all the pointers. This array only contains the extra pointers, + * which cannot fit into AVFrame.buf. + * + * This array is always allocated using av_malloc() by whoever constructs + * the frame. It is freed in av_frame_unref(). + */ + AVBufferRef **extended_buf; + /** + * Number of elements in extended_buf. + */ + int nb_extended_buf; + + AVFrameSideData **side_data; + int nb_side_data; + +/** + * @defgroup lavu_frame_flags AV_FRAME_FLAGS + * Flags describing additional frame properties. + * + * @{ + */ + +/** + * The frame data may be corrupted, e.g. due to decoding errors. + */ +#define AV_FRAME_FLAG_CORRUPT (1 << 0) +/** + * @} + */ + + /** + * Frame flags, a combination of @ref lavu_frame_flags + */ + int flags; + + /** + * MPEG vs JPEG YUV range. + * It must be accessed using av_frame_get_color_range() and + * av_frame_set_color_range(). + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorRange color_range; + + enum AVColorPrimaries color_primaries; + + enum AVColorTransferCharacteristic color_trc; + + /** + * YUV colorspace type. + * It must be accessed using av_frame_get_colorspace() and + * av_frame_set_colorspace(). + * - encoding: Set by user + * - decoding: Set by libavcodec + */ + enum AVColorSpace colorspace; + + enum AVChromaLocation chroma_location; + + /** + * frame timestamp estimated using various heuristics, in stream time base + * Code outside libavutil should access this field using: + * av_frame_get_best_effort_timestamp(frame) + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int64_t best_effort_timestamp; + + /** + * reordered pos from the last AVPacket that has been input into the decoder + * Code outside libavutil should access this field using: + * av_frame_get_pkt_pos(frame) + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_pos; + + /** + * duration of the corresponding packet, expressed in + * AVStream->time_base units, 0 if unknown. + * Code outside libavutil should access this field using: + * av_frame_get_pkt_duration(frame) + * - encoding: unused + * - decoding: Read by user. + */ + int64_t pkt_duration; + + /** + * metadata. + * Code outside libavutil should access this field using: + * av_frame_get_metadata(frame) + * - encoding: Set by user. + * - decoding: Set by libavcodec. + */ + AVDictionary *metadata; + + /** + * decode error flags of the frame, set to a combination of + * FF_DECODE_ERROR_xxx flags if the decoder produced a frame, but there + * were errors during the decoding. + * Code outside libavutil should access this field using: + * av_frame_get_decode_error_flags(frame) + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int decode_error_flags; +#define FF_DECODE_ERROR_INVALID_BITSTREAM 1 +#define FF_DECODE_ERROR_MISSING_REFERENCE 2 + + /** + * number of audio channels, only used for audio. + * Code outside libavutil should access this field using: + * av_frame_get_channels(frame) + * - encoding: unused + * - decoding: Read by user. + */ + int channels; + + /** + * size of the corresponding packet containing the compressed + * frame. It must be accessed using av_frame_get_pkt_size() and + * av_frame_set_pkt_size(). + * It is set to a negative value if unknown. + * - encoding: unused + * - decoding: set by libavcodec, read by user. + */ + int pkt_size; + +#if FF_API_FRAME_QP + /** + * QP table + * Not to be accessed directly from outside libavutil + */ + attribute_deprecated + int8_t *qscale_table; + /** + * QP store stride + * Not to be accessed directly from outside libavutil + */ + attribute_deprecated + int qstride; + + attribute_deprecated + int qscale_type; + + /** + * Not to be accessed directly from outside libavutil + */ + AVBufferRef *qp_table_buf; +#endif + /** + * For hwaccel-format frames, this should be a reference to the + * AVHWFramesContext describing the frame. + */ + AVBufferRef *hw_frames_ctx; +} AVFrame; + +/** + * Accessors for some AVFrame fields. + * The position of these field in the structure is not part of the ABI, + * they should not be accessed directly outside libavutil. + */ +int64_t av_frame_get_best_effort_timestamp(const AVFrame *frame); +void av_frame_set_best_effort_timestamp(AVFrame *frame, int64_t val); +int64_t av_frame_get_pkt_duration (const AVFrame *frame); +void av_frame_set_pkt_duration (AVFrame *frame, int64_t val); +int64_t av_frame_get_pkt_pos (const AVFrame *frame); +void av_frame_set_pkt_pos (AVFrame *frame, int64_t val); +int64_t av_frame_get_channel_layout (const AVFrame *frame); +void av_frame_set_channel_layout (AVFrame *frame, int64_t val); +int av_frame_get_channels (const AVFrame *frame); +void av_frame_set_channels (AVFrame *frame, int val); +int av_frame_get_sample_rate (const AVFrame *frame); +void av_frame_set_sample_rate (AVFrame *frame, int val); +AVDictionary *av_frame_get_metadata (const AVFrame *frame); +void av_frame_set_metadata (AVFrame *frame, AVDictionary *val); +int av_frame_get_decode_error_flags (const AVFrame *frame); +void av_frame_set_decode_error_flags (AVFrame *frame, int val); +int av_frame_get_pkt_size(const AVFrame *frame); +void av_frame_set_pkt_size(AVFrame *frame, int val); +AVDictionary **avpriv_frame_get_metadatap(AVFrame *frame); +#if FF_API_FRAME_QP +int8_t *av_frame_get_qp_table(AVFrame *f, int *stride, int *type); +int av_frame_set_qp_table(AVFrame *f, AVBufferRef *buf, int stride, int type); +#endif +enum AVColorSpace av_frame_get_colorspace(const AVFrame *frame); +void av_frame_set_colorspace(AVFrame *frame, enum AVColorSpace val); +enum AVColorRange av_frame_get_color_range(const AVFrame *frame); +void av_frame_set_color_range(AVFrame *frame, enum AVColorRange val); + +/** + * Get the name of a colorspace. + * @return a static string identifying the colorspace; can be NULL. + */ +const char *av_get_colorspace_name(enum AVColorSpace val); + +/** + * Allocate an AVFrame and set its fields to default values. The resulting + * struct must be freed using av_frame_free(). + * + * @return An AVFrame filled with default values or NULL on failure. + * + * @note this only allocates the AVFrame itself, not the data buffers. Those + * must be allocated through other means, e.g. with av_frame_get_buffer() or + * manually. + */ +AVFrame *av_frame_alloc(void); + +/** + * Free the frame and any dynamically allocated objects in it, + * e.g. extended_data. If the frame is reference counted, it will be + * unreferenced first. + * + * @param frame frame to be freed. The pointer will be set to NULL. + */ +void av_frame_free(AVFrame **frame); + +/** + * Set up a new reference to the data described by the source frame. + * + * Copy frame properties from src to dst and create a new reference for each + * AVBufferRef from src. + * + * If src is not reference counted, new buffers are allocated and the data is + * copied. + * + * @warning: dst MUST have been either unreferenced with av_frame_unref(dst), + * or newly allocated with av_frame_alloc() before calling this + * function, or undefined behavior will occur. + * + * @return 0 on success, a negative AVERROR on error + */ +int av_frame_ref(AVFrame *dst, const AVFrame *src); + +/** + * Create a new frame that references the same data as src. + * + * This is a shortcut for av_frame_alloc()+av_frame_ref(). + * + * @return newly created AVFrame on success, NULL on error. + */ +AVFrame *av_frame_clone(const AVFrame *src); + +/** + * Unreference all the buffers referenced by frame and reset the frame fields. + */ +void av_frame_unref(AVFrame *frame); + +/** + * Move everything contained in src to dst and reset src. + * + * @warning: dst is not unreferenced, but directly overwritten without reading + * or deallocating its contents. Call av_frame_unref(dst) manually + * before calling this function to ensure that no memory is leaked. + */ +void av_frame_move_ref(AVFrame *dst, AVFrame *src); + +/** + * Allocate new buffer(s) for audio or video data. + * + * The following fields must be set on frame before calling this function: + * - format (pixel format for video, sample format for audio) + * - width and height for video + * - nb_samples and channel_layout for audio + * + * This function will fill AVFrame.data and AVFrame.buf arrays and, if + * necessary, allocate and fill AVFrame.extended_data and AVFrame.extended_buf. + * For planar formats, one buffer will be allocated for each plane. + * + * @warning: if frame already has been allocated, calling this function will + * leak memory. In addition, undefined behavior can occur in certain + * cases. + * + * @param frame frame in which to store the new buffers. + * @param align required buffer size alignment + * + * @return 0 on success, a negative AVERROR on error. + */ +int av_frame_get_buffer(AVFrame *frame, int align); + +/** + * Check if the frame data is writable. + * + * @return A positive value if the frame data is writable (which is true if and + * only if each of the underlying buffers has only one reference, namely the one + * stored in this frame). Return 0 otherwise. + * + * If 1 is returned the answer is valid until av_buffer_ref() is called on any + * of the underlying AVBufferRefs (e.g. through av_frame_ref() or directly). + * + * @see av_frame_make_writable(), av_buffer_is_writable() + */ +int av_frame_is_writable(AVFrame *frame); + +/** + * Ensure that the frame data is writable, avoiding data copy if possible. + * + * Do nothing if the frame is writable, allocate new buffers and copy the data + * if it is not. + * + * @return 0 on success, a negative AVERROR on error. + * + * @see av_frame_is_writable(), av_buffer_is_writable(), + * av_buffer_make_writable() + */ +int av_frame_make_writable(AVFrame *frame); + +/** + * Copy the frame data from src to dst. + * + * This function does not allocate anything, dst must be already initialized and + * allocated with the same parameters as src. + * + * This function only copies the frame data (i.e. the contents of the data / + * extended data arrays), not any other properties. + * + * @return >= 0 on success, a negative AVERROR on error. + */ +int av_frame_copy(AVFrame *dst, const AVFrame *src); + +/** + * Copy only "metadata" fields from src to dst. + * + * Metadata for the purpose of this function are those fields that do not affect + * the data layout in the buffers. E.g. pts, sample rate (for audio) or sample + * aspect ratio (for video), but not width/height or channel layout. + * Side data is also copied. + */ +int av_frame_copy_props(AVFrame *dst, const AVFrame *src); + +/** + * Get the buffer reference a given data plane is stored in. + * + * @param plane index of the data plane of interest in frame->extended_data. + * + * @return the buffer reference that contains the plane or NULL if the input + * frame is not valid. + */ +AVBufferRef *av_frame_get_plane_buffer(AVFrame *frame, int plane); + +/** + * Add a new side data to a frame. + * + * @param frame a frame to which the side data should be added + * @param type type of the added side data + * @param size size of the side data + * + * @return newly added side data on success, NULL on error + */ +AVFrameSideData *av_frame_new_side_data(AVFrame *frame, + enum AVFrameSideDataType type, + int size); + +/** + * @return a pointer to the side data of a given type on success, NULL if there + * is no side data with such type in this frame. + */ +AVFrameSideData *av_frame_get_side_data(const AVFrame *frame, + enum AVFrameSideDataType type); + +/** + * If side data of the supplied type exists in the frame, free it and remove it + * from the frame. + */ +void av_frame_remove_side_data(AVFrame *frame, enum AVFrameSideDataType type); + +/** + * @return a string identifying the side data type + */ +const char *av_frame_side_data_name(enum AVFrameSideDataType type); + +/** + * @} + */ + +#endif /* AVUTIL_FRAME_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/hash.h b/third-party/FFmpeg-iOS/include/libavutil/hash.h new file mode 100644 index 0000000000..d4bcbf8cc8 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/hash.h @@ -0,0 +1,112 @@ +/* + * Copyright (C) 2013 Reimar Döffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HASH_H +#define AVUTIL_HASH_H + +#include + +struct AVHashContext; + +/** + * Allocate a hash context for the algorithm specified by name. + * + * @return >= 0 for success, a negative error code for failure + * @note The context is not initialized, you must call av_hash_init(). + */ +int av_hash_alloc(struct AVHashContext **ctx, const char *name); + +/** + * Get the names of available hash algorithms. + * + * This function can be used to enumerate the algorithms. + * + * @param i index of the hash algorithm, starting from 0 + * @return a pointer to a static string or NULL if i is out of range + */ +const char *av_hash_names(int i); + +/** + * Get the name of the algorithm corresponding to the given hash context. + */ +const char *av_hash_get_name(const struct AVHashContext *ctx); + +/** + * Maximum value that av_hash_get_size will currently return. + * + * You can use this if you absolutely want or need to use static allocation + * and are fine with not supporting hashes newly added to libavutil without + * recompilation. + * Note that you still need to check against av_hash_get_size, adding new hashes + * with larger sizes will not be considered an ABI change and should not cause + * your code to overflow a buffer. + */ +#define AV_HASH_MAX_SIZE 64 + +/** + * Get the size of the resulting hash value in bytes. + * + * The pointer passed to av_hash_final have space for at least this many bytes. + */ +int av_hash_get_size(const struct AVHashContext *ctx); + +/** + * Initialize or reset a hash context. + */ +void av_hash_init(struct AVHashContext *ctx); + +/** + * Update a hash context with additional data. + */ +void av_hash_update(struct AVHashContext *ctx, const uint8_t *src, int len); + +/** + * Finalize a hash context and compute the actual hash value. + */ +void av_hash_final(struct AVHashContext *ctx, uint8_t *dst); + +/** + * Finalize a hash context and compute the actual hash value. + * If size is smaller than the hash size, the hash is truncated; + * if size is larger, the buffer is padded with 0. + */ +void av_hash_final_bin(struct AVHashContext *ctx, uint8_t *dst, int size); + +/** + * Finalize a hash context and compute the actual hash value as a hex string. + * The string is always 0-terminated. + * If size is smaller than 2 * hash_size + 1, the hex string is truncated. + */ +void av_hash_final_hex(struct AVHashContext *ctx, uint8_t *dst, int size); + +/** + * Finalize a hash context and compute the actual hash value as a base64 string. + * The string is always 0-terminated. + * If size is smaller than AV_BASE64_SIZE(hash_size), the base64 string is + * truncated. + */ +void av_hash_final_b64(struct AVHashContext *ctx, uint8_t *dst, int size); + +/** + * Free hash context. + */ +void av_hash_freep(struct AVHashContext **ctx); + +#endif /* AVUTIL_HASH_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/hmac.h b/third-party/FFmpeg-iOS/include/libavutil/hmac.h new file mode 100644 index 0000000000..576a0a4fb9 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/hmac.h @@ -0,0 +1,100 @@ +/* + * Copyright (C) 2012 Martin Storsjo + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HMAC_H +#define AVUTIL_HMAC_H + +#include + +#include "version.h" +/** + * @defgroup lavu_hmac HMAC + * @ingroup lavu_crypto + * @{ + */ + +enum AVHMACType { + AV_HMAC_MD5, + AV_HMAC_SHA1, + AV_HMAC_SHA224, + AV_HMAC_SHA256, + AV_HMAC_SHA384 = 12, + AV_HMAC_SHA512, +}; + +typedef struct AVHMAC AVHMAC; + +/** + * Allocate an AVHMAC context. + * @param type The hash function used for the HMAC. + */ +AVHMAC *av_hmac_alloc(enum AVHMACType type); + +/** + * Free an AVHMAC context. + * @param ctx The context to free, may be NULL + */ +void av_hmac_free(AVHMAC *ctx); + +/** + * Initialize an AVHMAC context with an authentication key. + * @param ctx The HMAC context + * @param key The authentication key + * @param keylen The length of the key, in bytes + */ +void av_hmac_init(AVHMAC *ctx, const uint8_t *key, unsigned int keylen); + +/** + * Hash data with the HMAC. + * @param ctx The HMAC context + * @param data The data to hash + * @param len The length of the data, in bytes + */ +void av_hmac_update(AVHMAC *ctx, const uint8_t *data, unsigned int len); + +/** + * Finish hashing and output the HMAC digest. + * @param ctx The HMAC context + * @param out The output buffer to write the digest into + * @param outlen The length of the out buffer, in bytes + * @return The number of bytes written to out, or a negative error code. + */ +int av_hmac_final(AVHMAC *ctx, uint8_t *out, unsigned int outlen); + +/** + * Hash an array of data with a key. + * @param ctx The HMAC context + * @param data The data to hash + * @param len The length of the data, in bytes + * @param key The authentication key + * @param keylen The length of the key, in bytes + * @param out The output buffer to write the digest into + * @param outlen The length of the out buffer, in bytes + * @return The number of bytes written to out, or a negative error code. + */ +int av_hmac_calc(AVHMAC *ctx, const uint8_t *data, unsigned int len, + const uint8_t *key, unsigned int keylen, + uint8_t *out, unsigned int outlen); + +/** + * @} + */ + +#endif /* AVUTIL_HMAC_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/hwcontext.h b/third-party/FFmpeg-iOS/include/libavutil/hwcontext.h new file mode 100644 index 0000000000..4e9da0224d --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/hwcontext.h @@ -0,0 +1,428 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_H +#define AVUTIL_HWCONTEXT_H + +#include "buffer.h" +#include "frame.h" +#include "log.h" +#include "pixfmt.h" + +enum AVHWDeviceType { + AV_HWDEVICE_TYPE_VDPAU, + AV_HWDEVICE_TYPE_CUDA, + AV_HWDEVICE_TYPE_VAAPI, + AV_HWDEVICE_TYPE_DXVA2, +}; + +typedef struct AVHWDeviceInternal AVHWDeviceInternal; + +/** + * This struct aggregates all the (hardware/vendor-specific) "high-level" state, + * i.e. state that is not tied to a concrete processing configuration. + * E.g., in an API that supports hardware-accelerated encoding and decoding, + * this struct will (if possible) wrap the state that is common to both encoding + * and decoding and from which specific instances of encoders or decoders can be + * derived. + * + * This struct is reference-counted with the AVBuffer mechanism. The + * av_hwdevice_ctx_alloc() constructor yields a reference, whose data field + * points to the actual AVHWDeviceContext. Further objects derived from + * AVHWDeviceContext (such as AVHWFramesContext, describing a frame pool with + * specific properties) will hold an internal reference to it. After all the + * references are released, the AVHWDeviceContext itself will be freed, + * optionally invoking a user-specified callback for uninitializing the hardware + * state. + */ +typedef struct AVHWDeviceContext { + /** + * A class for logging. Set by av_hwdevice_ctx_alloc(). + */ + const AVClass *av_class; + + /** + * Private data used internally by libavutil. Must not be accessed in any + * way by the caller. + */ + AVHWDeviceInternal *internal; + + /** + * This field identifies the underlying API used for hardware access. + * + * This field is set when this struct is allocated and never changed + * afterwards. + */ + enum AVHWDeviceType type; + + /** + * The format-specific data, allocated and freed by libavutil along with + * this context. + * + * Should be cast by the user to the format-specific context defined in the + * corresponding header (hwcontext_*.h) and filled as described in the + * documentation before calling av_hwdevice_ctx_init(). + * + * After calling av_hwdevice_ctx_init() this struct should not be modified + * by the caller. + */ + void *hwctx; + + /** + * This field may be set by the caller before calling av_hwdevice_ctx_init(). + * + * If non-NULL, this callback will be called when the last reference to + * this context is unreferenced, immediately before it is freed. + * + * @note when other objects (e.g an AVHWFramesContext) are derived from this + * struct, this callback will be invoked after all such child objects + * are fully uninitialized and their respective destructors invoked. + */ + void (*free)(struct AVHWDeviceContext *ctx); + + /** + * Arbitrary user data, to be used e.g. by the free() callback. + */ + void *user_opaque; +} AVHWDeviceContext; + +typedef struct AVHWFramesInternal AVHWFramesInternal; + +/** + * This struct describes a set or pool of "hardware" frames (i.e. those with + * data not located in normal system memory). All the frames in the pool are + * assumed to be allocated in the same way and interchangeable. + * + * This struct is reference-counted with the AVBuffer mechanism and tied to a + * given AVHWDeviceContext instance. The av_hwframe_ctx_alloc() constructor + * yields a reference, whose data field points to the actual AVHWFramesContext + * struct. + */ +typedef struct AVHWFramesContext { + /** + * A class for logging. + */ + const AVClass *av_class; + + /** + * Private data used internally by libavutil. Must not be accessed in any + * way by the caller. + */ + AVHWFramesInternal *internal; + + /** + * A reference to the parent AVHWDeviceContext. This reference is owned and + * managed by the enclosing AVHWFramesContext, but the caller may derive + * additional references from it. + */ + AVBufferRef *device_ref; + + /** + * The parent AVHWDeviceContext. This is simply a pointer to + * device_ref->data provided for convenience. + * + * Set by libavutil in av_hwframe_ctx_init(). + */ + AVHWDeviceContext *device_ctx; + + /** + * The format-specific data, allocated and freed automatically along with + * this context. + * + * Should be cast by the user to the format-specific context defined in the + * corresponding header (hwframe_*.h) and filled as described in the + * documentation before calling av_hwframe_ctx_init(). + * + * After any frames using this context are created, the contents of this + * struct should not be modified by the caller. + */ + void *hwctx; + + /** + * This field may be set by the caller before calling av_hwframe_ctx_init(). + * + * If non-NULL, this callback will be called when the last reference to + * this context is unreferenced, immediately before it is freed. + */ + void (*free)(struct AVHWFramesContext *ctx); + + /** + * Arbitrary user data, to be used e.g. by the free() callback. + */ + void *user_opaque; + + /** + * A pool from which the frames are allocated by av_hwframe_get_buffer(). + * This field may be set by the caller before calling av_hwframe_ctx_init(). + * The buffers returned by calling av_buffer_pool_get() on this pool must + * have the properties described in the documentation in the corresponding hw + * type's header (hwcontext_*.h). The pool will be freed strictly before + * this struct's free() callback is invoked. + * + * This field may be NULL, then libavutil will attempt to allocate a pool + * internally. Note that certain device types enforce pools allocated at + * fixed size (frame count), which cannot be extended dynamically. In such a + * case, initial_pool_size must be set appropriately. + */ + AVBufferPool *pool; + + /** + * Initial size of the frame pool. If a device type does not support + * dynamically resizing the pool, then this is also the maximum pool size. + * + * May be set by the caller before calling av_hwframe_ctx_init(). Must be + * set if pool is NULL and the device type does not support dynamic pools. + */ + int initial_pool_size; + + /** + * The pixel format identifying the underlying HW surface type. + * + * Must be a hwaccel format, i.e. the corresponding descriptor must have the + * AV_PIX_FMT_FLAG_HWACCEL flag set. + * + * Must be set by the user before calling av_hwframe_ctx_init(). + */ + enum AVPixelFormat format; + + /** + * The pixel format identifying the actual data layout of the hardware + * frames. + * + * Must be set by the caller before calling av_hwframe_ctx_init(). + * + * @note when the underlying API does not provide the exact data layout, but + * only the colorspace/bit depth, this field should be set to the fully + * planar version of that format (e.g. for 8-bit 420 YUV it should be + * AV_PIX_FMT_YUV420P, not AV_PIX_FMT_NV12 or anything else). + */ + enum AVPixelFormat sw_format; + + /** + * The allocated dimensions of the frames in this pool. + * + * Must be set by the user before calling av_hwframe_ctx_init(). + */ + int width, height; +} AVHWFramesContext; + +/** + * Allocate an AVHWDeviceContext for a given pixel format. + * + * @param format a hwaccel pixel format (AV_PIX_FMT_FLAG_HWACCEL must be set + * on the corresponding format descriptor) + * @return a reference to the newly created AVHWDeviceContext on success or NULL + * on failure. + */ +AVBufferRef *av_hwdevice_ctx_alloc(enum AVHWDeviceType type); + +/** + * Finalize the device context before use. This function must be called after + * the context is filled with all the required information and before it is + * used in any way. + * + * @param ref a reference to the AVHWDeviceContext + * @return 0 on success, a negative AVERROR code on failure + */ +int av_hwdevice_ctx_init(AVBufferRef *ref); + +/** + * Open a device of the specified type and create an AVHWDeviceContext for it. + * + * This is a convenience function intended to cover the simple cases. Callers + * who need to fine-tune device creation/management should open the device + * manually and then wrap it in an AVHWDeviceContext using + * av_hwdevice_ctx_alloc()/av_hwdevice_ctx_init(). + * + * The returned context is already initialized and ready for use, the caller + * should not call av_hwdevice_ctx_init() on it. The user_opaque/free fields of + * the created AVHWDeviceContext are set by this function and should not be + * touched by the caller. + * + * @param device_ctx On success, a reference to the newly-created device context + * will be written here. The reference is owned by the caller + * and must be released with av_buffer_unref() when no longer + * needed. On failure, NULL will be written to this pointer. + * @param type The type of the device to create. + * @param device A type-specific string identifying the device to open. + * @param opts A dictionary of additional (type-specific) options to use in + * opening the device. The dictionary remains owned by the caller. + * @param flags currently unused + * + * @return 0 on success, a negative AVERROR code on failure. + */ +int av_hwdevice_ctx_create(AVBufferRef **device_ctx, enum AVHWDeviceType type, + const char *device, AVDictionary *opts, int flags); + +/** + * Allocate an AVHWFramesContext tied to a given device context. + * + * @param device_ctx a reference to a AVHWDeviceContext. This function will make + * a new reference for internal use, the one passed to the + * function remains owned by the caller. + * @return a reference to the newly created AVHWFramesContext on success or NULL + * on failure. + */ +AVBufferRef *av_hwframe_ctx_alloc(AVBufferRef *device_ctx); + +/** + * Finalize the context before use. This function must be called after the + * context is filled with all the required information and before it is attached + * to any frames. + * + * @param ref a reference to the AVHWFramesContext + * @return 0 on success, a negative AVERROR code on failure + */ +int av_hwframe_ctx_init(AVBufferRef *ref); + +/** + * Allocate a new frame attached to the given AVHWFramesContext. + * + * @param hwframe_ctx a reference to an AVHWFramesContext + * @param frame an empty (freshly allocated or unreffed) frame to be filled with + * newly allocated buffers. + * @param flags currently unused, should be set to zero + * @return 0 on success, a negative AVERROR code on failure + */ +int av_hwframe_get_buffer(AVBufferRef *hwframe_ctx, AVFrame *frame, int flags); + +/** + * Copy data to or from a hw surface. At least one of dst/src must have an + * AVHWFramesContext attached. + * + * If src has an AVHWFramesContext attached, then the format of dst (if set) + * must use one of the formats returned by av_hwframe_transfer_get_formats(src, + * AV_HWFRAME_TRANSFER_DIRECTION_FROM). + * If dst has an AVHWFramesContext attached, then the format of src must use one + * of the formats returned by av_hwframe_transfer_get_formats(dst, + * AV_HWFRAME_TRANSFER_DIRECTION_TO) + * + * dst may be "clean" (i.e. with data/buf pointers unset), in which case the + * data buffers will be allocated by this function using av_frame_get_buffer(). + * If dst->format is set, then this format will be used, otherwise (when + * dst->format is AV_PIX_FMT_NONE) the first acceptable format will be chosen. + * + * @param dst the destination frame. dst is not touched on failure. + * @param src the source frame. + * @param flags currently unused, should be set to zero + * @return 0 on success, a negative AVERROR error code on failure. + */ +int av_hwframe_transfer_data(AVFrame *dst, const AVFrame *src, int flags); + +enum AVHWFrameTransferDirection { + /** + * Transfer the data from the queried hw frame. + */ + AV_HWFRAME_TRANSFER_DIRECTION_FROM, + + /** + * Transfer the data to the queried hw frame. + */ + AV_HWFRAME_TRANSFER_DIRECTION_TO, +}; + +/** + * Get a list of possible source or target formats usable in + * av_hwframe_transfer_data(). + * + * @param hwframe_ctx the frame context to obtain the information for + * @param dir the direction of the transfer + * @param formats the pointer to the output format list will be written here. + * The list is terminated with AV_PIX_FMT_NONE and must be freed + * by the caller when no longer needed using av_free(). + * If this function returns successfully, the format list will + * have at least one item (not counting the terminator). + * On failure, the contents of this pointer are unspecified. + * @param flags currently unused, should be set to zero + * @return 0 on success, a negative AVERROR code on failure. + */ +int av_hwframe_transfer_get_formats(AVBufferRef *hwframe_ctx, + enum AVHWFrameTransferDirection dir, + enum AVPixelFormat **formats, int flags); + + +/** + * This struct describes the constraints on hardware frames attached to + * a given device with a hardware-specific configuration. This is returned + * by av_hwdevice_get_hwframe_constraints() and must be freed by + * av_hwframe_constraints_free() after use. + */ +typedef struct AVHWFramesConstraints { + /** + * A list of possible values for format in the hw_frames_ctx, + * terminated by AV_PIX_FMT_NONE. This member will always be filled. + */ + enum AVPixelFormat *valid_hw_formats; + + /** + * A list of possible values for sw_format in the hw_frames_ctx, + * terminated by AV_PIX_FMT_NONE. Can be NULL if this information is + * not known. + */ + enum AVPixelFormat *valid_sw_formats; + + /** + * The minimum size of frames in this hw_frames_ctx. + * (Zero if not known.) + */ + int min_width; + int min_height; + + /** + * The maximum size of frames in this hw_frames_ctx. + * (INT_MAX if not known / no limit.) + */ + int max_width; + int max_height; +} AVHWFramesConstraints; + +/** + * Allocate a HW-specific configuration structure for a given HW device. + * After use, the user must free all members as required by the specific + * hardware structure being used, then free the structure itself with + * av_free(). + * + * @param device_ctx a reference to the associated AVHWDeviceContext. + * @return The newly created HW-specific configuration structure on + * success or NULL on failure. + */ +void *av_hwdevice_hwconfig_alloc(AVBufferRef *device_ctx); + +/** + * Get the constraints on HW frames given a device and the HW-specific + * configuration to be used with that device. If no HW-specific + * configuration is provided, returns the maximum possible capabilities + * of the device. + * + * @param device_ctx a reference to the associated AVHWDeviceContext. + * @param hwconfig a filled HW-specific configuration structure, or NULL + * to return the maximum possible capabilities of the device. + * @return AVHWFramesConstraints structure describing the constraints + * on the device, or NULL if not available. + */ +AVHWFramesConstraints *av_hwdevice_get_hwframe_constraints(AVBufferRef *ref, + const void *hwconfig); + +/** + * Free an AVHWFrameConstraints structure. + * + * @param constraints The (filled or unfilled) AVHWFrameConstraints structure. + */ +void av_hwframe_constraints_free(AVHWFramesConstraints **constraints); + +#endif /* AVUTIL_HWCONTEXT_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/hwcontext_cuda.h b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_cuda.h new file mode 100644 index 0000000000..23a77cee73 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_cuda.h @@ -0,0 +1,46 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +#ifndef AVUTIL_HWCONTEXT_CUDA_H +#define AVUTIL_HWCONTEXT_CUDA_H + +#include + +#include "pixfmt.h" + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_CUDA. + * + * This API supports dynamic frame pools. AVHWFramesContext.pool must return + * AVBufferRefs whose data pointer is a CUdeviceptr. + */ + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVCUDADeviceContext { + CUcontext cuda_ctx; +} AVCUDADeviceContext; + +/** + * AVHWFramesContext.hwctx is currently not used + */ + +#endif /* AVUTIL_HWCONTEXT_CUDA_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/hwcontext_dxva2.h b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_dxva2.h new file mode 100644 index 0000000000..6c36cb4b6b --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_dxva2.h @@ -0,0 +1,72 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + + +#ifndef AVUTIL_HWCONTEXT_DXVA2_H +#define AVUTIL_HWCONTEXT_DXVA2_H + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_DXVA2. + * + * Only fixed-size pools are supported. + * + * For user-allocated pools, AVHWFramesContext.pool must return AVBufferRefs + * with the data pointer set to a pointer to IDirect3DSurface9. + */ + +#include +#include + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVDXVA2DeviceContext { + IDirect3DDeviceManager9 *devmgr; +} AVDXVA2DeviceContext; + +/** + * This struct is allocated as AVHWFramesContext.hwctx + */ +typedef struct AVDXVA2FramesContext { + /** + * The surface type (e.g. DXVA2_VideoProcessorRenderTarget or + * DXVA2_VideoDecoderRenderTarget). Must be set by the caller. + */ + DWORD surface_type; + + /** + * The surface pool. When an external pool is not provided by the caller, + * this will be managed (allocated and filled on init, freed on uninit) by + * libavutil. + */ + IDirect3DSurface9 **surfaces; + int nb_surfaces; + + /** + * Certain drivers require the decoder to be destroyed before the surfaces. + * To allow internally managed pools to work properly in such cases, this + * field is provided. + * + * If it is non-NULL, libavutil will call IDirectXVideoDecoder_Release() on + * it just before the internal surface pool is freed. + */ + IDirectXVideoDecoder *decoder_to_release; +} AVDXVA2FramesContext; + +#endif /* AVUTIL_HWCONTEXT_DXVA2_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/hwcontext_vaapi.h b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_vaapi.h new file mode 100644 index 0000000000..7fd1a36e8f --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_vaapi.h @@ -0,0 +1,82 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_VAAPI_H +#define AVUTIL_HWCONTEXT_VAAPI_H + +#include + +/** + * @file + * API-specific header for AV_HWDEVICE_TYPE_VAAPI. + * + * Dynamic frame pools are supported, but note that any pool used as a render + * target is required to be of fixed size in order to be be usable as an + * argument to vaCreateContext(). + * + * For user-allocated pools, AVHWFramesContext.pool must return AVBufferRefs + * with the data pointer set to a VASurfaceID. + */ + +/** + * VAAPI connection details. + * + * Allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVVAAPIDeviceContext { + /** + * The VADisplay handle, to be filled by the user. + */ + VADisplay display; +} AVVAAPIDeviceContext; + +/** + * VAAPI-specific data associated with a frame pool. + * + * Allocated as AVHWFramesContext.hwctx. + */ +typedef struct AVVAAPIFramesContext { + /** + * Set by the user to apply surface attributes to all surfaces in + * the frame pool. If null, default settings are used. + */ + VASurfaceAttrib *attributes; + int nb_attributes; + /** + * The surfaces IDs of all surfaces in the pool after creation. + * Only valid if AVHWFramesContext.initial_pool_size was positive. + * These are intended to be used as the render_targets arguments to + * vaCreateContext(). + */ + VASurfaceID *surface_ids; + int nb_surfaces; +} AVVAAPIFramesContext; + +/** + * VAAPI hardware pipeline configuration details. + * + * Allocated with av_hwdevice_hwconfig_alloc(). + */ +typedef struct AVVAAPIHWConfig { + /** + * ID of a VAAPI pipeline configuration. + */ + VAConfigID config_id; +} AVVAAPIHWConfig; + +#endif /* AVUTIL_HWCONTEXT_VAAPI_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/hwcontext_vdpau.h b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_vdpau.h new file mode 100644 index 0000000000..1b7ea1e443 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/hwcontext_vdpau.h @@ -0,0 +1,44 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_HWCONTEXT_VDPAU_H +#define AVUTIL_HWCONTEXT_VDPAU_H + +#include + +/** + * @file + * An API-specific header for AV_HWDEVICE_TYPE_VDPAU. + * + * This API supports dynamic frame pools. AVHWFramesContext.pool must return + * AVBufferRefs whose data pointer is a VdpVideoSurface. + */ + +/** + * This struct is allocated as AVHWDeviceContext.hwctx + */ +typedef struct AVVDPAUDeviceContext { + VdpDevice device; + VdpGetProcAddress *get_proc_address; +} AVVDPAUDeviceContext; + +/** + * AVHWFramesContext.hwctx is currently not used + */ + +#endif /* AVUTIL_HWCONTEXT_VDPAU_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/imgutils.h b/third-party/FFmpeg-iOS/include/libavutil/imgutils.h new file mode 100644 index 0000000000..23282a38fa --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/imgutils.h @@ -0,0 +1,213 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_IMGUTILS_H +#define AVUTIL_IMGUTILS_H + +/** + * @file + * misc image utilities + * + * @addtogroup lavu_picture + * @{ + */ + +#include "avutil.h" +#include "pixdesc.h" +#include "rational.h" + +/** + * Compute the max pixel step for each plane of an image with a + * format described by pixdesc. + * + * The pixel step is the distance in bytes between the first byte of + * the group of bytes which describe a pixel component and the first + * byte of the successive group in the same plane for the same + * component. + * + * @param max_pixsteps an array which is filled with the max pixel step + * for each plane. Since a plane may contain different pixel + * components, the computed max_pixsteps[plane] is relative to the + * component in the plane with the max pixel step. + * @param max_pixstep_comps an array which is filled with the component + * for each plane which has the max pixel step. May be NULL. + */ +void av_image_fill_max_pixsteps(int max_pixsteps[4], int max_pixstep_comps[4], + const AVPixFmtDescriptor *pixdesc); + +/** + * Compute the size of an image line with format pix_fmt and width + * width for the plane plane. + * + * @return the computed size in bytes + */ +int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane); + +/** + * Fill plane linesizes for an image with pixel format pix_fmt and + * width width. + * + * @param linesizes array to be filled with the linesize for each plane + * @return >= 0 in case of success, a negative error code otherwise + */ +int av_image_fill_linesizes(int linesizes[4], enum AVPixelFormat pix_fmt, int width); + +/** + * Fill plane data pointers for an image with pixel format pix_fmt and + * height height. + * + * @param data pointers array to be filled with the pointer for each image plane + * @param ptr the pointer to a buffer which will contain the image + * @param linesizes the array containing the linesize for each + * plane, should be filled by av_image_fill_linesizes() + * @return the size in bytes required for the image buffer, a negative + * error code in case of failure + */ +int av_image_fill_pointers(uint8_t *data[4], enum AVPixelFormat pix_fmt, int height, + uint8_t *ptr, const int linesizes[4]); + +/** + * Allocate an image with size w and h and pixel format pix_fmt, and + * fill pointers and linesizes accordingly. + * The allocated image buffer has to be freed by using + * av_freep(&pointers[0]). + * + * @param align the value to use for buffer size alignment + * @return the size in bytes required for the image buffer, a negative + * error code in case of failure + */ +int av_image_alloc(uint8_t *pointers[4], int linesizes[4], + int w, int h, enum AVPixelFormat pix_fmt, int align); + +/** + * Copy image plane from src to dst. + * That is, copy "height" number of lines of "bytewidth" bytes each. + * The first byte of each successive line is separated by *_linesize + * bytes. + * + * bytewidth must be contained by both absolute values of dst_linesize + * and src_linesize, otherwise the function behavior is undefined. + * + * @param dst_linesize linesize for the image plane in dst + * @param src_linesize linesize for the image plane in src + */ +void av_image_copy_plane(uint8_t *dst, int dst_linesize, + const uint8_t *src, int src_linesize, + int bytewidth, int height); + +/** + * Copy image in src_data to dst_data. + * + * @param dst_linesizes linesizes for the image in dst_data + * @param src_linesizes linesizes for the image in src_data + */ +void av_image_copy(uint8_t *dst_data[4], int dst_linesizes[4], + const uint8_t *src_data[4], const int src_linesizes[4], + enum AVPixelFormat pix_fmt, int width, int height); + +/** + * Setup the data pointers and linesizes based on the specified image + * parameters and the provided array. + * + * The fields of the given image are filled in by using the src + * address which points to the image data buffer. Depending on the + * specified pixel format, one or multiple image data pointers and + * line sizes will be set. If a planar format is specified, several + * pointers will be set pointing to the different picture planes and + * the line sizes of the different planes will be stored in the + * lines_sizes array. Call with src == NULL to get the required + * size for the src buffer. + * + * To allocate the buffer and fill in the dst_data and dst_linesize in + * one call, use av_image_alloc(). + * + * @param dst_data data pointers to be filled in + * @param dst_linesizes linesizes for the image in dst_data to be filled in + * @param src buffer which will contain or contains the actual image data, can be NULL + * @param pix_fmt the pixel format of the image + * @param width the width of the image in pixels + * @param height the height of the image in pixels + * @param align the value used in src for linesize alignment + * @return the size in bytes required for src, a negative error code + * in case of failure + */ +int av_image_fill_arrays(uint8_t *dst_data[4], int dst_linesize[4], + const uint8_t *src, + enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Return the size in bytes of the amount of data required to store an + * image with the given parameters. + * + * @param[in] align the assumed linesize alignment + */ +int av_image_get_buffer_size(enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Copy image data from an image into a buffer. + * + * av_image_get_buffer_size() can be used to compute the required size + * for the buffer to fill. + * + * @param dst a buffer into which picture data will be copied + * @param dst_size the size in bytes of dst + * @param src_data pointers containing the source image data + * @param src_linesizes linesizes for the image in src_data + * @param pix_fmt the pixel format of the source image + * @param width the width of the source image in pixels + * @param height the height of the source image in pixels + * @param align the assumed linesize alignment for dst + * @return the number of bytes written to dst, or a negative value + * (error code) on error + */ +int av_image_copy_to_buffer(uint8_t *dst, int dst_size, + const uint8_t * const src_data[4], const int src_linesize[4], + enum AVPixelFormat pix_fmt, int width, int height, int align); + +/** + * Check if the given dimension of an image is valid, meaning that all + * bytes of the image can be addressed with a signed int. + * + * @param w the width of the picture + * @param h the height of the picture + * @param log_offset the offset to sum to the log level for logging with log_ctx + * @param log_ctx the parent logging context, it may be NULL + * @return >= 0 if valid, a negative error code otherwise + */ +int av_image_check_size(unsigned int w, unsigned int h, int log_offset, void *log_ctx); + +/** + * Check if the given sample aspect ratio of an image is valid. + * + * It is considered invalid if the denominator is 0 or if applying the ratio + * to the image size would make the smaller dimension less than 1. If the + * sar numerator is 0, it is considered unknown and will return as valid. + * + * @param w width of the image + * @param h height of the image + * @param sar sample aspect ratio of the image + * @return 0 if valid, a negative AVERROR code otherwise + */ +int av_image_check_sar(unsigned int w, unsigned int h, AVRational sar); + +/** + * @} + */ + + +#endif /* AVUTIL_IMGUTILS_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/intfloat.h b/third-party/FFmpeg-iOS/include/libavutil/intfloat.h new file mode 100644 index 0000000000..fe3d7ec4a5 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/intfloat.h @@ -0,0 +1,77 @@ +/* + * Copyright (c) 2011 Mans Rullgard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTFLOAT_H +#define AVUTIL_INTFLOAT_H + +#include +#include "attributes.h" + +union av_intfloat32 { + uint32_t i; + float f; +}; + +union av_intfloat64 { + uint64_t i; + double f; +}; + +/** + * Reinterpret a 32-bit integer as a float. + */ +static av_always_inline float av_int2float(uint32_t i) +{ + union av_intfloat32 v; + v.i = i; + return v.f; +} + +/** + * Reinterpret a float as a 32-bit integer. + */ +static av_always_inline uint32_t av_float2int(float f) +{ + union av_intfloat32 v; + v.f = f; + return v.i; +} + +/** + * Reinterpret a 64-bit integer as a double. + */ +static av_always_inline double av_int2double(uint64_t i) +{ + union av_intfloat64 v; + v.i = i; + return v.f; +} + +/** + * Reinterpret a double as a 64-bit integer. + */ +static av_always_inline uint64_t av_double2int(double f) +{ + union av_intfloat64 v; + v.f = f; + return v.i; +} + +#endif /* AVUTIL_INTFLOAT_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/intreadwrite.h b/third-party/FFmpeg-iOS/include/libavutil/intreadwrite.h new file mode 100644 index 0000000000..07f717692a --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/intreadwrite.h @@ -0,0 +1,629 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTREADWRITE_H +#define AVUTIL_INTREADWRITE_H + +#include +#include "../libavutilavconfig.h" +#include "attributes.h" +#include "bswap.h" + +typedef union { + uint64_t u64; + uint32_t u32[2]; + uint16_t u16[4]; + uint8_t u8 [8]; + double f64; + float f32[2]; +} av_alias av_alias64; + +typedef union { + uint32_t u32; + uint16_t u16[2]; + uint8_t u8 [4]; + float f32; +} av_alias av_alias32; + +typedef union { + uint16_t u16; + uint8_t u8 [2]; +} av_alias av_alias16; + +/* + * Arch-specific headers can provide any combination of + * AV_[RW][BLN](16|24|32|48|64) and AV_(COPY|SWAP|ZERO)(64|128) macros. + * Preprocessor symbols must be defined, even if these are implemented + * as inline functions. + * + * R/W means read/write, B/L/N means big/little/native endianness. + * The following macros require aligned access, compared to their + * unaligned variants: AV_(COPY|SWAP|ZERO)(64|128), AV_[RW]N[8-64]A. + * Incorrect usage may range from abysmal performance to crash + * depending on the platform. + * + * The unaligned variants are AV_[RW][BLN][8-64] and AV_COPY*U. + */ + +#ifdef HAVE_AV_CONFIG_H + +#include "config.h" + +#if ARCH_ARM +# include "arm/intreadwrite.h" +#elif ARCH_AVR32 +# include "avr32/intreadwrite.h" +#elif ARCH_MIPS +# include "mips/intreadwrite.h" +#elif ARCH_PPC +# include "ppc/intreadwrite.h" +#elif ARCH_TOMI +# include "tomi/intreadwrite.h" +#elif ARCH_X86 +# include "x86/intreadwrite.h" +#endif + +#endif /* HAVE_AV_CONFIG_H */ + +/* + * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers. + */ + +#if AV_HAVE_BIGENDIAN + +# if defined(AV_RN16) && !defined(AV_RB16) +# define AV_RB16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RB16) +# define AV_RN16(p) AV_RB16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WB16) +# define AV_WB16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WB16) +# define AV_WN16(p, v) AV_WB16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RB24) +# define AV_RB24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RB24) +# define AV_RN24(p) AV_RB24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WB24) +# define AV_WB24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WB24) +# define AV_WN24(p, v) AV_WB24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RB32) +# define AV_RB32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RB32) +# define AV_RN32(p) AV_RB32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WB32) +# define AV_WB32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WB32) +# define AV_WN32(p, v) AV_WB32(p, v) +# endif + +# if defined(AV_RN48) && !defined(AV_RB48) +# define AV_RB48(p) AV_RN48(p) +# elif !defined(AV_RN48) && defined(AV_RB48) +# define AV_RN48(p) AV_RB48(p) +# endif + +# if defined(AV_WN48) && !defined(AV_WB48) +# define AV_WB48(p, v) AV_WN48(p, v) +# elif !defined(AV_WN48) && defined(AV_WB48) +# define AV_WN48(p, v) AV_WB48(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RB64) +# define AV_RB64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RB64) +# define AV_RN64(p) AV_RB64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WB64) +# define AV_WB64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WB64) +# define AV_WN64(p, v) AV_WB64(p, v) +# endif + +#else /* AV_HAVE_BIGENDIAN */ + +# if defined(AV_RN16) && !defined(AV_RL16) +# define AV_RL16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RL16) +# define AV_RN16(p) AV_RL16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WL16) +# define AV_WL16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WL16) +# define AV_WN16(p, v) AV_WL16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RL24) +# define AV_RL24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RL24) +# define AV_RN24(p) AV_RL24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WL24) +# define AV_WL24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WL24) +# define AV_WN24(p, v) AV_WL24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RL32) +# define AV_RL32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RL32) +# define AV_RN32(p) AV_RL32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WL32) +# define AV_WL32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WL32) +# define AV_WN32(p, v) AV_WL32(p, v) +# endif + +# if defined(AV_RN48) && !defined(AV_RL48) +# define AV_RL48(p) AV_RN48(p) +# elif !defined(AV_RN48) && defined(AV_RL48) +# define AV_RN48(p) AV_RL48(p) +# endif + +# if defined(AV_WN48) && !defined(AV_WL48) +# define AV_WL48(p, v) AV_WN48(p, v) +# elif !defined(AV_WN48) && defined(AV_WL48) +# define AV_WN48(p, v) AV_WL48(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RL64) +# define AV_RL64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RL64) +# define AV_RN64(p) AV_RL64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WL64) +# define AV_WL64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WL64) +# define AV_WN64(p, v) AV_WL64(p, v) +# endif + +#endif /* !AV_HAVE_BIGENDIAN */ + +/* + * Define AV_[RW]N helper macros to simplify definitions not provided + * by per-arch headers. + */ + +#if defined(__GNUC__) && !defined(__TI_COMPILER_VERSION__) + +union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias; +union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias; +union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; + +# define AV_RN(s, p) (((const union unaligned_##s *) (p))->l) +# define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v)) + +#elif defined(__DECC) + +# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p))) +# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v)) + +#elif AV_HAVE_FAST_UNALIGNED + +# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s) +# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#else + +#ifndef AV_RB16 +# define AV_RB16(x) \ + ((((const uint8_t*)(x))[0] << 8) | \ + ((const uint8_t*)(x))[1]) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[1] = (d); \ + ((uint8_t*)(p))[0] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RL16 +# define AV_RL16(x) \ + ((((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RB32 +# define AV_RB32(x) \ + (((uint32_t)((const uint8_t*)(x))[0] << 24) | \ + (((const uint8_t*)(x))[1] << 16) | \ + (((const uint8_t*)(x))[2] << 8) | \ + ((const uint8_t*)(x))[3]) +#endif +#ifndef AV_WB32 +# define AV_WB32(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[3] = (d); \ + ((uint8_t*)(p))[2] = (d)>>8; \ + ((uint8_t*)(p))[1] = (d)>>16; \ + ((uint8_t*)(p))[0] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RL32 +# define AV_RL32(x) \ + (((uint32_t)((const uint8_t*)(x))[3] << 24) | \ + (((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, darg) do { \ + unsigned d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RB64 +# define AV_RB64(x) \ + (((uint64_t)((const uint8_t*)(x))[0] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 8) | \ + (uint64_t)((const uint8_t*)(x))[7]) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[7] = (d); \ + ((uint8_t*)(p))[6] = (d)>>8; \ + ((uint8_t*)(p))[5] = (d)>>16; \ + ((uint8_t*)(p))[4] = (d)>>24; \ + ((uint8_t*)(p))[3] = (d)>>32; \ + ((uint8_t*)(p))[2] = (d)>>40; \ + ((uint8_t*)(p))[1] = (d)>>48; \ + ((uint8_t*)(p))[0] = (d)>>56; \ + } while(0) +#endif + +#ifndef AV_RL64 +# define AV_RL64(x) \ + (((uint64_t)((const uint8_t*)(x))[7] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ + (uint64_t)((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + ((uint8_t*)(p))[4] = (d)>>32; \ + ((uint8_t*)(p))[5] = (d)>>40; \ + ((uint8_t*)(p))[6] = (d)>>48; \ + ((uint8_t*)(p))[7] = (d)>>56; \ + } while(0) +#endif + +#if AV_HAVE_BIGENDIAN +# define AV_RN(s, p) AV_RB##s(p) +# define AV_WN(s, p, v) AV_WB##s(p, v) +#else +# define AV_RN(s, p) AV_RL##s(p) +# define AV_WN(s, p, v) AV_WL##s(p, v) +#endif + +#endif /* HAVE_FAST_UNALIGNED */ + +#ifndef AV_RN16 +# define AV_RN16(p) AV_RN(16, p) +#endif + +#ifndef AV_RN32 +# define AV_RN32(p) AV_RN(32, p) +#endif + +#ifndef AV_RN64 +# define AV_RN64(p) AV_RN(64, p) +#endif + +#ifndef AV_WN16 +# define AV_WN16(p, v) AV_WN(16, p, v) +#endif + +#ifndef AV_WN32 +# define AV_WN32(p, v) AV_WN(32, p, v) +#endif + +#ifndef AV_WN64 +# define AV_WN64(p, v) AV_WN(64, p, v) +#endif + +#if AV_HAVE_BIGENDIAN +# define AV_RB(s, p) AV_RN##s(p) +# define AV_WB(s, p, v) AV_WN##s(p, v) +# define AV_RL(s, p) av_bswap##s(AV_RN##s(p)) +# define AV_WL(s, p, v) AV_WN##s(p, av_bswap##s(v)) +#else +# define AV_RB(s, p) av_bswap##s(AV_RN##s(p)) +# define AV_WB(s, p, v) AV_WN##s(p, av_bswap##s(v)) +# define AV_RL(s, p) AV_RN##s(p) +# define AV_WL(s, p, v) AV_WN##s(p, v) +#endif + +#define AV_RB8(x) (((const uint8_t*)(x))[0]) +#define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0) + +#define AV_RL8(x) AV_RB8(x) +#define AV_WL8(p, d) AV_WB8(p, d) + +#ifndef AV_RB16 +# define AV_RB16(p) AV_RB(16, p) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, v) AV_WB(16, p, v) +#endif + +#ifndef AV_RL16 +# define AV_RL16(p) AV_RL(16, p) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, v) AV_WL(16, p, v) +#endif + +#ifndef AV_RB32 +# define AV_RB32(p) AV_RB(32, p) +#endif +#ifndef AV_WB32 +# define AV_WB32(p, v) AV_WB(32, p, v) +#endif + +#ifndef AV_RL32 +# define AV_RL32(p) AV_RL(32, p) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, v) AV_WL(32, p, v) +#endif + +#ifndef AV_RB64 +# define AV_RB64(p) AV_RB(64, p) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, v) AV_WB(64, p, v) +#endif + +#ifndef AV_RL64 +# define AV_RL64(p) AV_RL(64, p) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, v) AV_WL(64, p, v) +#endif + +#ifndef AV_RB24 +# define AV_RB24(x) \ + ((((const uint8_t*)(x))[0] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[2]) +#endif +#ifndef AV_WB24 +# define AV_WB24(p, d) do { \ + ((uint8_t*)(p))[2] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[0] = (d)>>16; \ + } while(0) +#endif + +#ifndef AV_RL24 +# define AV_RL24(x) \ + ((((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL24 +# define AV_WL24(p, d) do { \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + } while(0) +#endif + +#ifndef AV_RB48 +# define AV_RB48(x) \ + (((uint64_t)((const uint8_t*)(x))[0] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 8) | \ + (uint64_t)((const uint8_t*)(x))[5]) +#endif +#ifndef AV_WB48 +# define AV_WB48(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[5] = (d); \ + ((uint8_t*)(p))[4] = (d)>>8; \ + ((uint8_t*)(p))[3] = (d)>>16; \ + ((uint8_t*)(p))[2] = (d)>>24; \ + ((uint8_t*)(p))[1] = (d)>>32; \ + ((uint8_t*)(p))[0] = (d)>>40; \ + } while(0) +#endif + +#ifndef AV_RL48 +# define AV_RL48(x) \ + (((uint64_t)((const uint8_t*)(x))[5] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ + (uint64_t)((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL48 +# define AV_WL48(p, darg) do { \ + uint64_t d = (darg); \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + ((uint8_t*)(p))[4] = (d)>>32; \ + ((uint8_t*)(p))[5] = (d)>>40; \ + } while(0) +#endif + +/* + * The AV_[RW]NA macros access naturally aligned data + * in a type-safe way. + */ + +#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s) +#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#ifndef AV_RN16A +# define AV_RN16A(p) AV_RNA(16, p) +#endif + +#ifndef AV_RN32A +# define AV_RN32A(p) AV_RNA(32, p) +#endif + +#ifndef AV_RN64A +# define AV_RN64A(p) AV_RNA(64, p) +#endif + +#ifndef AV_WN16A +# define AV_WN16A(p, v) AV_WNA(16, p, v) +#endif + +#ifndef AV_WN32A +# define AV_WN32A(p, v) AV_WNA(32, p, v) +#endif + +#ifndef AV_WN64A +# define AV_WN64A(p, v) AV_WNA(64, p, v) +#endif + +/* + * The AV_COPYxxU macros are suitable for copying data to/from unaligned + * memory locations. + */ + +#define AV_COPYU(n, d, s) AV_WN##n(d, AV_RN##n(s)); + +#ifndef AV_COPY16U +# define AV_COPY16U(d, s) AV_COPYU(16, d, s) +#endif + +#ifndef AV_COPY32U +# define AV_COPY32U(d, s) AV_COPYU(32, d, s) +#endif + +#ifndef AV_COPY64U +# define AV_COPY64U(d, s) AV_COPYU(64, d, s) +#endif + +#ifndef AV_COPY128U +# define AV_COPY128U(d, s) \ + do { \ + AV_COPY64U(d, s); \ + AV_COPY64U((char *)(d) + 8, (const char *)(s) + 8); \ + } while(0) +#endif + +/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be + * naturally aligned. They may be implemented using MMX, + * so emms_c() must be called before using any float code + * afterwards. + */ + +#define AV_COPY(n, d, s) \ + (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n) + +#ifndef AV_COPY16 +# define AV_COPY16(d, s) AV_COPY(16, d, s) +#endif + +#ifndef AV_COPY32 +# define AV_COPY32(d, s) AV_COPY(32, d, s) +#endif + +#ifndef AV_COPY64 +# define AV_COPY64(d, s) AV_COPY(64, d, s) +#endif + +#ifndef AV_COPY128 +# define AV_COPY128(d, s) \ + do { \ + AV_COPY64(d, s); \ + AV_COPY64((char*)(d)+8, (char*)(s)+8); \ + } while(0) +#endif + +#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b)) + +#ifndef AV_SWAP64 +# define AV_SWAP64(a, b) AV_SWAP(64, a, b) +#endif + +#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0) + +#ifndef AV_ZERO16 +# define AV_ZERO16(d) AV_ZERO(16, d) +#endif + +#ifndef AV_ZERO32 +# define AV_ZERO32(d) AV_ZERO(32, d) +#endif + +#ifndef AV_ZERO64 +# define AV_ZERO64(d) AV_ZERO(64, d) +#endif + +#ifndef AV_ZERO128 +# define AV_ZERO128(d) \ + do { \ + AV_ZERO64(d); \ + AV_ZERO64((char*)(d)+8); \ + } while(0) +#endif + +#endif /* AVUTIL_INTREADWRITE_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/lfg.h b/third-party/FFmpeg-iOS/include/libavutil/lfg.h new file mode 100644 index 0000000000..ec90562cf2 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/lfg.h @@ -0,0 +1,62 @@ +/* + * Lagged Fibonacci PRNG + * Copyright (c) 2008 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LFG_H +#define AVUTIL_LFG_H + +typedef struct AVLFG { + unsigned int state[64]; + int index; +} AVLFG; + +void av_lfg_init(AVLFG *c, unsigned int seed); + +/** + * Get the next random unsigned 32-bit number using an ALFG. + * + * Please also consider a simple LCG like state= state*1664525+1013904223, + * it may be good enough and faster for your specific use case. + */ +static inline unsigned int av_lfg_get(AVLFG *c){ + c->state[c->index & 63] = c->state[(c->index-24) & 63] + c->state[(c->index-55) & 63]; + return c->state[c->index++ & 63]; +} + +/** + * Get the next random unsigned 32-bit number using a MLFG. + * + * Please also consider av_lfg_get() above, it is faster. + */ +static inline unsigned int av_mlfg_get(AVLFG *c){ + unsigned int a= c->state[(c->index-55) & 63]; + unsigned int b= c->state[(c->index-24) & 63]; + return c->state[c->index++ & 63] = 2*a*b+a+b; +} + +/** + * Get the next two numbers generated by a Box-Muller Gaussian + * generator using the random numbers issued by lfg. + * + * @param out array where the two generated numbers are placed + */ +void av_bmg_get(AVLFG *lfg, double out[2]); + +#endif /* AVUTIL_LFG_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/log.h b/third-party/FFmpeg-iOS/include/libavutil/log.h new file mode 100644 index 0000000000..0acc1b9214 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/log.h @@ -0,0 +1,376 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_LOG_H +#define AVUTIL_LOG_H + +#include +#include "avutil.h" +#include "attributes.h" +#include "version.h" + +typedef enum { + AV_CLASS_CATEGORY_NA = 0, + AV_CLASS_CATEGORY_INPUT, + AV_CLASS_CATEGORY_OUTPUT, + AV_CLASS_CATEGORY_MUXER, + AV_CLASS_CATEGORY_DEMUXER, + AV_CLASS_CATEGORY_ENCODER, + AV_CLASS_CATEGORY_DECODER, + AV_CLASS_CATEGORY_FILTER, + AV_CLASS_CATEGORY_BITSTREAM_FILTER, + AV_CLASS_CATEGORY_SWSCALER, + AV_CLASS_CATEGORY_SWRESAMPLER, + AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT = 40, + AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT, + AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT, + AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT, + AV_CLASS_CATEGORY_DEVICE_OUTPUT, + AV_CLASS_CATEGORY_DEVICE_INPUT, + AV_CLASS_CATEGORY_NB, ///< not part of ABI/API +}AVClassCategory; + +#define AV_IS_INPUT_DEVICE(category) \ + (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_INPUT)) + +#define AV_IS_OUTPUT_DEVICE(category) \ + (((category) == AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT) || \ + ((category) == AV_CLASS_CATEGORY_DEVICE_OUTPUT)) + +struct AVOptionRanges; + +/** + * Describe the class of an AVClass context structure. That is an + * arbitrary struct of which the first field is a pointer to an + * AVClass struct (e.g. AVCodecContext, AVFormatContext etc.). + */ +typedef struct AVClass { + /** + * The name of the class; usually it is the same name as the + * context structure type to which the AVClass is associated. + */ + const char* class_name; + + /** + * A pointer to a function which returns the name of a context + * instance ctx associated with the class. + */ + const char* (*item_name)(void* ctx); + + /** + * a pointer to the first option specified in the class if any or NULL + * + * @see av_set_default_options() + */ + const struct AVOption *option; + + /** + * LIBAVUTIL_VERSION with which this structure was created. + * This is used to allow fields to be added without requiring major + * version bumps everywhere. + */ + + int version; + + /** + * Offset in the structure where log_level_offset is stored. + * 0 means there is no such variable + */ + int log_level_offset_offset; + + /** + * Offset in the structure where a pointer to the parent context for + * logging is stored. For example a decoder could pass its AVCodecContext + * to eval as such a parent context, which an av_log() implementation + * could then leverage to display the parent context. + * The offset can be NULL. + */ + int parent_log_context_offset; + + /** + * Return next AVOptions-enabled child or NULL + */ + void* (*child_next)(void *obj, void *prev); + + /** + * Return an AVClass corresponding to the next potential + * AVOptions-enabled child. + * + * The difference between child_next and this is that + * child_next iterates over _already existing_ objects, while + * child_class_next iterates over _all possible_ children. + */ + const struct AVClass* (*child_class_next)(const struct AVClass *prev); + + /** + * Category used for visualization (like color) + * This is only set if the category is equal for all objects using this class. + * available since version (51 << 16 | 56 << 8 | 100) + */ + AVClassCategory category; + + /** + * Callback to return the category. + * available since version (51 << 16 | 59 << 8 | 100) + */ + AVClassCategory (*get_category)(void* ctx); + + /** + * Callback to return the supported/allowed ranges. + * available since version (52.12) + */ + int (*query_ranges)(struct AVOptionRanges **, void *obj, const char *key, int flags); +} AVClass; + +/** + * @addtogroup lavu_log + * + * @{ + * + * @defgroup lavu_log_constants Logging Constants + * + * @{ + */ + +/** + * Print no output. + */ +#define AV_LOG_QUIET -8 + +/** + * Something went really wrong and we will crash now. + */ +#define AV_LOG_PANIC 0 + +/** + * Something went wrong and recovery is not possible. + * For example, no header was found for a format which depends + * on headers or an illegal combination of parameters is used. + */ +#define AV_LOG_FATAL 8 + +/** + * Something went wrong and cannot losslessly be recovered. + * However, not all future data is affected. + */ +#define AV_LOG_ERROR 16 + +/** + * Something somehow does not look correct. This may or may not + * lead to problems. An example would be the use of '-vstrict -2'. + */ +#define AV_LOG_WARNING 24 + +/** + * Standard information. + */ +#define AV_LOG_INFO 32 + +/** + * Detailed information. + */ +#define AV_LOG_VERBOSE 40 + +/** + * Stuff which is only useful for libav* developers. + */ +#define AV_LOG_DEBUG 48 + +/** + * Extremely verbose debugging, useful for libav* development. + */ +#define AV_LOG_TRACE 56 + +#define AV_LOG_MAX_OFFSET (AV_LOG_TRACE - AV_LOG_QUIET) + +/** + * @} + */ + +/** + * Sets additional colors for extended debugging sessions. + * @code + av_log(ctx, AV_LOG_DEBUG|AV_LOG_C(134), "Message in purple\n"); + @endcode + * Requires 256color terminal support. Uses outside debugging is not + * recommended. + */ +#define AV_LOG_C(x) ((x) << 8) + +/** + * Send the specified message to the log if the level is less than or equal + * to the current av_log_level. By default, all logging messages are sent to + * stderr. This behavior can be altered by setting a different logging callback + * function. + * @see av_log_set_callback + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct or NULL if general log. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + */ +void av_log(void *avcl, int level, const char *fmt, ...) av_printf_format(3, 4); + + +/** + * Send the specified message to the log if the level is less than or equal + * to the current av_log_level. By default, all logging messages are sent to + * stderr. This behavior can be altered by setting a different logging callback + * function. + * @see av_log_set_callback + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param vl The arguments referenced by the format string. + */ +void av_vlog(void *avcl, int level, const char *fmt, va_list vl); + +/** + * Get the current log level + * + * @see lavu_log_constants + * + * @return Current log level + */ +int av_log_get_level(void); + +/** + * Set the log level + * + * @see lavu_log_constants + * + * @param level Logging level + */ +void av_log_set_level(int level); + +/** + * Set the logging callback + * + * @note The callback must be thread safe, even if the application does not use + * threads itself as some codecs are multithreaded. + * + * @see av_log_default_callback + * + * @param callback A logging function with a compatible signature. + */ +void av_log_set_callback(void (*callback)(void*, int, const char*, va_list)); + +/** + * Default logging callback + * + * It prints the message to stderr, optionally colorizing it. + * + * @param avcl A pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct. + * @param level The importance level of the message expressed using a @ref + * lavu_log_constants "Logging Constant". + * @param fmt The format string (printf-compatible) that specifies how + * subsequent arguments are converted to output. + * @param vl The arguments referenced by the format string. + */ +void av_log_default_callback(void *avcl, int level, const char *fmt, + va_list vl); + +/** + * Return the context name + * + * @param ctx The AVClass context + * + * @return The AVClass class_name + */ +const char* av_default_item_name(void* ctx); +AVClassCategory av_default_get_category(void *ptr); + +/** + * Format a line of log the same way as the default callback. + * @param line buffer to receive the formatted line + * @param line_size size of the buffer + * @param print_prefix used to store whether the prefix must be printed; + * must point to a persistent integer initially set to 1 + */ +void av_log_format_line(void *ptr, int level, const char *fmt, va_list vl, + char *line, int line_size, int *print_prefix); + +/** + * Format a line of log the same way as the default callback. + * @param line buffer to receive the formatted line; + * may be NULL if line_size is 0 + * @param line_size size of the buffer; at most line_size-1 characters will + * be written to the buffer, plus one null terminator + * @param print_prefix used to store whether the prefix must be printed; + * must point to a persistent integer initially set to 1 + * @return Returns a negative value if an error occurred, otherwise returns + * the number of characters that would have been written for a + * sufficiently large buffer, not including the terminating null + * character. If the return value is not less than line_size, it means + * that the log message was truncated to fit the buffer. + */ +int av_log_format_line2(void *ptr, int level, const char *fmt, va_list vl, + char *line, int line_size, int *print_prefix); + +#if FF_API_DLOG +/** + * av_dlog macros + * @deprecated unused + * Useful to print debug messages that shouldn't get compiled in normally. + */ + +#ifdef DEBUG +# define av_dlog(pctx, ...) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__) +#else +# define av_dlog(pctx, ...) do { if (0) av_log(pctx, AV_LOG_DEBUG, __VA_ARGS__); } while (0) +#endif +#endif /* FF_API_DLOG */ + +/** + * Skip repeated messages, this requires the user app to use av_log() instead of + * (f)printf as the 2 would otherwise interfere and lead to + * "Last message repeated x times" messages below (f)printf messages with some + * bad luck. + * Also to receive the last, "last repeated" line if any, the user app must + * call av_log(NULL, AV_LOG_QUIET, "%s", ""); at the end + */ +#define AV_LOG_SKIP_REPEATED 1 + +/** + * Include the log severity in messages originating from codecs. + * + * Results in messages such as: + * [rawvideo @ 0xDEADBEEF] [error] encode did not produce valid pts + */ +#define AV_LOG_PRINT_LEVEL 2 + +void av_log_set_flags(int arg); +int av_log_get_flags(void); + +/** + * @} + */ + +#endif /* AVUTIL_LOG_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/macros.h b/third-party/FFmpeg-iOS/include/libavutil/macros.h new file mode 100644 index 0000000000..2007ee5619 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/macros.h @@ -0,0 +1,50 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * @ingroup lavu + * Utility Preprocessor macros + */ + +#ifndef AVUTIL_MACROS_H +#define AVUTIL_MACROS_H + +/** + * @addtogroup preproc_misc Preprocessor String Macros + * + * String manipulation macros + * + * @{ + */ + +#define AV_STRINGIFY(s) AV_TOSTRING(s) +#define AV_TOSTRING(s) #s + +#define AV_GLUE(a, b) a ## b +#define AV_JOIN(a, b) AV_GLUE(a, b) + +/** + * @} + */ + +#define AV_PRAGMA(s) _Pragma(#s) + +#define FFALIGN(x, a) (((x)+(a)-1)&~((a)-1)) + +#endif /* AVUTIL_MACROS_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/mastering_display_metadata.h b/third-party/FFmpeg-iOS/include/libavutil/mastering_display_metadata.h new file mode 100644 index 0000000000..936533fec4 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/mastering_display_metadata.h @@ -0,0 +1,89 @@ +/** + * Copyright (c) 2016 Neil Birkbeck + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MASTERING_DISPLAY_METADATA_H +#define AVUTIL_MASTERING_DISPLAY_METADATA_H + +#include "frame.h" +#include "rational.h" + + +/** + * Mastering display metadata capable of representing the color volume of + * the display used to master the content (SMPTE 2086:2014). + * + * To be used as payload of a AVFrameSideData or AVPacketSideData with the + * appropriate type. + * + * @note The struct should be allocated with av_mastering_display_metadata_alloc() + * and its size is not a part of the public ABI. + */ +typedef struct AVMasteringDisplayMetadata { + /** + * CIE 1931 xy chromaticity coords of color primaries (r, g, b order). + */ + AVRational display_primaries[3][2]; + + /** + * CIE 1931 xy chromaticity coords of white point. + */ + AVRational white_point[2]; + + /** + * Min luminance of mastering display (cd/m^2). + */ + AVRational min_luminance; + + /** + * Max luminance of mastering display (cd/m^2). + */ + AVRational max_luminance; + + /** + * Flag indicating whether the display primaries (and white point) are set. + */ + int has_primaries; + + /** + * Flag indicating whether the luminance (min_ and max_) have been set. + */ + int has_luminance; + +} AVMasteringDisplayMetadata; + +/** + * Allocate an AVMasteringDisplayMetadata structure and set its fields to + * default values. The resulting struct can be freed using av_freep(). + * + * @return An AVMasteringDisplayMetadata filled with default values or NULL + * on failure. + */ +AVMasteringDisplayMetadata *av_mastering_display_metadata_alloc(void); + +/** + * Allocate a complete AVMasteringDisplayMetadata and add it to the frame. + * + * @param frame The frame which side data is added to. + * + * @return The AVMasteringDisplayMetadata structure to be filled by caller. + */ +AVMasteringDisplayMetadata *av_mastering_display_metadata_create_side_data(AVFrame *frame); + +#endif /* AVUTIL_MASTERING_DISPLAY_METADATA_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/mathematics.h b/third-party/FFmpeg-iOS/include/libavutil/mathematics.h new file mode 100644 index 0000000000..57c44f845d --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/mathematics.h @@ -0,0 +1,165 @@ +/* + * copyright (c) 2005-2012 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MATHEMATICS_H +#define AVUTIL_MATHEMATICS_H + +#include +#include +#include "attributes.h" +#include "rational.h" +#include "intfloat.h" + +#ifndef M_E +#define M_E 2.7182818284590452354 /* e */ +#endif +#ifndef M_LN2 +#define M_LN2 0.69314718055994530942 /* log_e 2 */ +#endif +#ifndef M_LN10 +#define M_LN10 2.30258509299404568402 /* log_e 10 */ +#endif +#ifndef M_LOG2_10 +#define M_LOG2_10 3.32192809488736234787 /* log_2 10 */ +#endif +#ifndef M_PHI +#define M_PHI 1.61803398874989484820 /* phi / golden ratio */ +#endif +#ifndef M_PI +#define M_PI 3.14159265358979323846 /* pi */ +#endif +#ifndef M_PI_2 +#define M_PI_2 1.57079632679489661923 /* pi/2 */ +#endif +#ifndef M_SQRT1_2 +#define M_SQRT1_2 0.70710678118654752440 /* 1/sqrt(2) */ +#endif +#ifndef M_SQRT2 +#define M_SQRT2 1.41421356237309504880 /* sqrt(2) */ +#endif +#ifndef NAN +#define NAN av_int2float(0x7fc00000) +#endif +#ifndef INFINITY +#define INFINITY av_int2float(0x7f800000) +#endif + +/** + * @addtogroup lavu_math + * @{ + */ + + +enum AVRounding { + AV_ROUND_ZERO = 0, ///< Round toward zero. + AV_ROUND_INF = 1, ///< Round away from zero. + AV_ROUND_DOWN = 2, ///< Round toward -infinity. + AV_ROUND_UP = 3, ///< Round toward +infinity. + AV_ROUND_NEAR_INF = 5, ///< Round to nearest and halfway cases away from zero. + AV_ROUND_PASS_MINMAX = 8192, ///< Flag to pass INT64_MIN/MAX through instead of rescaling, this avoids special cases for AV_NOPTS_VALUE +}; + +/** + * Compute the greatest common divisor of a and b. + * + * @return gcd of a and b up to sign; if a >= 0 and b >= 0, return value is >= 0; + * if a == 0 and b == 0, returns 0. + */ +int64_t av_const av_gcd(int64_t a, int64_t b); + +/** + * Rescale a 64-bit integer with rounding to nearest. + * A simple a*b/c isn't possible as it can overflow. + */ +int64_t av_rescale(int64_t a, int64_t b, int64_t c) av_const; + +/** + * Rescale a 64-bit integer with specified rounding. + * A simple a*b/c isn't possible as it can overflow. + * + * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is + * INT64_MIN or INT64_MAX then a is passed through unchanged. + */ +int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding) av_const; + +/** + * Rescale a 64-bit integer by 2 rational numbers. + */ +int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq) av_const; + +/** + * Rescale a 64-bit integer by 2 rational numbers with specified rounding. + * + * @return rescaled value a, or if AV_ROUND_PASS_MINMAX is set and a is + * INT64_MIN or INT64_MAX then a is passed through unchanged. + */ +int64_t av_rescale_q_rnd(int64_t a, AVRational bq, AVRational cq, + enum AVRounding) av_const; + +/** + * Compare 2 timestamps each in its own timebases. + * The result of the function is undefined if one of the timestamps + * is outside the int64_t range when represented in the others timebase. + * @return -1 if ts_a is before ts_b, 1 if ts_a is after ts_b or 0 if they represent the same position + */ +int av_compare_ts(int64_t ts_a, AVRational tb_a, int64_t ts_b, AVRational tb_b); + +/** + * Compare 2 integers modulo mod. + * That is we compare integers a and b for which only the least + * significant log2(mod) bits are known. + * + * @param mod must be a power of 2 + * @return a negative value if a is smaller than b + * a positive value if a is greater than b + * 0 if a equals b + */ +int64_t av_compare_mod(uint64_t a, uint64_t b, uint64_t mod); + +/** + * Rescale a timestamp while preserving known durations. + * + * @param in_ts Input timestamp + * @param in_tb Input timebase + * @param fs_tb Duration and *last timebase + * @param duration duration till the next call + * @param out_tb Output timebase + */ +int64_t av_rescale_delta(AVRational in_tb, int64_t in_ts, AVRational fs_tb, int duration, int64_t *last, AVRational out_tb); + +/** + * Add a value to a timestamp. + * + * This function guarantees that when the same value is repeatly added that + * no accumulation of rounding errors occurs. + * + * @param ts Input timestamp + * @param ts_tb Input timestamp timebase + * @param inc value to add to ts + * @param inc_tb inc timebase + */ +int64_t av_add_stable(AVRational ts_tb, int64_t ts, AVRational inc_tb, int64_t inc); + + + /** + * @} + */ + +#endif /* AVUTIL_MATHEMATICS_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/md5.h b/third-party/FFmpeg-iOS/include/libavutil/md5.h new file mode 100644 index 0000000000..79702c88c2 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/md5.h @@ -0,0 +1,81 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MD5_H +#define AVUTIL_MD5_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_md5 MD5 + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_md5_size; + +struct AVMD5; + +/** + * Allocate an AVMD5 context. + */ +struct AVMD5 *av_md5_alloc(void); + +/** + * Initialize MD5 hashing. + * + * @param ctx pointer to the function context (of size av_md5_size) + */ +void av_md5_init(struct AVMD5 *ctx); + +/** + * Update hash value. + * + * @param ctx hash function context + * @param src input data to update hash with + * @param len input data length + */ +void av_md5_update(struct AVMD5 *ctx, const uint8_t *src, int len); + +/** + * Finish hashing and output digest value. + * + * @param ctx hash function context + * @param dst buffer where output digest value is stored + */ +void av_md5_final(struct AVMD5 *ctx, uint8_t *dst); + +/** + * Hash an array of data. + * + * @param dst The output buffer to write the digest into + * @param src The data to hash + * @param len The length of the data, in bytes + */ +void av_md5_sum(uint8_t *dst, const uint8_t *src, const int len); + +/** + * @} + */ + +#endif /* AVUTIL_MD5_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/mem.h b/third-party/FFmpeg-iOS/include/libavutil/mem.h new file mode 100644 index 0000000000..d25b3229b7 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/mem.h @@ -0,0 +1,406 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * memory handling functions + */ + +#ifndef AVUTIL_MEM_H +#define AVUTIL_MEM_H + +#include +#include + +#include "attributes.h" +#include "error.h" +#include "avutil.h" + +/** + * @addtogroup lavu_mem + * @{ + */ + + +#if defined(__INTEL_COMPILER) && __INTEL_COMPILER < 1110 || defined(__SUNPRO_C) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_CONST(n,t,v) const t __attribute__ ((aligned (n))) v +#elif defined(__TI_COMPILER_VERSION__) + #define DECLARE_ALIGNED(n,t,v) \ + AV_PRAGMA(DATA_ALIGN(v,n)) \ + t __attribute__((aligned(n))) v + #define DECLARE_ASM_CONST(n,t,v) \ + AV_PRAGMA(DATA_ALIGN(v,n)) \ + static const t __attribute__((aligned(n))) v +#elif defined(__GNUC__) + #define DECLARE_ALIGNED(n,t,v) t __attribute__ ((aligned (n))) v + #define DECLARE_ASM_CONST(n,t,v) static const t av_used __attribute__ ((aligned (n))) v +#elif defined(_MSC_VER) + #define DECLARE_ALIGNED(n,t,v) __declspec(align(n)) t v + #define DECLARE_ASM_CONST(n,t,v) __declspec(align(n)) static const t v +#else + #define DECLARE_ALIGNED(n,t,v) t v + #define DECLARE_ASM_CONST(n,t,v) static const t v +#endif + +#if AV_GCC_VERSION_AT_LEAST(3,1) + #define av_malloc_attrib __attribute__((__malloc__)) +#else + #define av_malloc_attrib +#endif + +#if AV_GCC_VERSION_AT_LEAST(4,3) + #define av_alloc_size(...) __attribute__((alloc_size(__VA_ARGS__))) +#else + #define av_alloc_size(...) +#endif + +/** + * Allocate a block of size bytes with alignment suitable for all + * memory accesses (including vectors if available on the CPU). + * @param size Size in bytes for the memory block to be allocated. + * @return Pointer to the allocated block, NULL if the block cannot + * be allocated. + * @see av_mallocz() + */ +void *av_malloc(size_t size) av_malloc_attrib av_alloc_size(1); + +/** + * Allocate a block of size * nmemb bytes with av_malloc(). + * @param nmemb Number of elements + * @param size Size of the single element + * @return Pointer to the allocated block, NULL if the block cannot + * be allocated. + * @see av_malloc() + */ +av_alloc_size(1, 2) static inline void *av_malloc_array(size_t nmemb, size_t size) +{ + if (!size || nmemb >= INT_MAX / size) + return NULL; + return av_malloc(nmemb * size); +} + +/** + * Allocate or reallocate a block of memory. + * If ptr is NULL and size > 0, allocate a new block. If + * size is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a memory block already allocated with + * av_realloc() or NULL. + * @param size Size in bytes of the memory block to be allocated or + * reallocated. + * @return Pointer to a newly-reallocated block or NULL if the block + * cannot be reallocated or the function is used to free the memory block. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_realloc(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + * @see av_fast_realloc() + */ +void *av_realloc(void *ptr, size_t size) av_alloc_size(2); + +/** + * Allocate or reallocate a block of memory. + * This function does the same thing as av_realloc, except: + * - It takes two arguments and checks the result of the multiplication for + * integer overflow. + * - It frees the input block in case of failure, thus avoiding the memory + * leak with the classic "buf = realloc(buf); if (!buf) return -1;". + */ +void *av_realloc_f(void *ptr, size_t nelem, size_t elsize); + +/** + * Allocate or reallocate a block of memory. + * If *ptr is NULL and size > 0, allocate a new block. If + * size is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a pointer to a memory block already allocated + * with av_realloc(), or pointer to a pointer to NULL. + * The pointer is updated on success, or freed on failure. + * @param size Size in bytes for the memory block to be allocated or + * reallocated + * @return Zero on success, an AVERROR error code on failure. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_reallocp(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + */ +av_warn_unused_result +int av_reallocp(void *ptr, size_t size); + +/** + * Allocate or reallocate an array. + * If ptr is NULL and nmemb > 0, allocate a new block. If + * nmemb is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a memory block already allocated with + * av_realloc() or NULL. + * @param nmemb Number of elements + * @param size Size of the single element + * @return Pointer to a newly-reallocated block or NULL if the block + * cannot be reallocated or the function is used to free the memory block. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_realloc(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + */ +av_alloc_size(2, 3) void *av_realloc_array(void *ptr, size_t nmemb, size_t size); + +/** + * Allocate or reallocate an array through a pointer to a pointer. + * If *ptr is NULL and nmemb > 0, allocate a new block. If + * nmemb is zero, free the memory block pointed to by ptr. + * @param ptr Pointer to a pointer to a memory block already allocated + * with av_realloc(), or pointer to a pointer to NULL. + * The pointer is updated on success, or freed on failure. + * @param nmemb Number of elements + * @param size Size of the single element + * @return Zero on success, an AVERROR error code on failure. + * @warning Pointers originating from the av_malloc() family of functions must + * not be passed to av_realloc(). The former can be implemented using + * memalign() (or other functions), and there is no guarantee that + * pointers from such functions can be passed to realloc() at all. + * The situation is undefined according to POSIX and may crash with + * some libc implementations. + */ +av_alloc_size(2, 3) int av_reallocp_array(void *ptr, size_t nmemb, size_t size); + +/** + * Free a memory block which has been allocated with av_malloc(z)() or + * av_realloc(). + * @param ptr Pointer to the memory block which should be freed. + * @note ptr = NULL is explicitly allowed. + * @note It is recommended that you use av_freep() instead. + * @see av_freep() + */ +void av_free(void *ptr); + +/** + * Allocate a block of size bytes with alignment suitable for all + * memory accesses (including vectors if available on the CPU) and + * zero all the bytes of the block. + * @param size Size in bytes for the memory block to be allocated. + * @return Pointer to the allocated block, NULL if it cannot be allocated. + * @see av_malloc() + */ +void *av_mallocz(size_t size) av_malloc_attrib av_alloc_size(1); + +/** + * Allocate a block of nmemb * size bytes with alignment suitable for all + * memory accesses (including vectors if available on the CPU) and + * zero all the bytes of the block. + * The allocation will fail if nmemb * size is greater than or equal + * to INT_MAX. + * @param nmemb + * @param size + * @return Pointer to the allocated block, NULL if it cannot be allocated. + */ +void *av_calloc(size_t nmemb, size_t size) av_malloc_attrib; + +/** + * Allocate a block of size * nmemb bytes with av_mallocz(). + * @param nmemb Number of elements + * @param size Size of the single element + * @return Pointer to the allocated block, NULL if the block cannot + * be allocated. + * @see av_mallocz() + * @see av_malloc_array() + */ +av_alloc_size(1, 2) static inline void *av_mallocz_array(size_t nmemb, size_t size) +{ + if (!size || nmemb >= INT_MAX / size) + return NULL; + return av_mallocz(nmemb * size); +} + +/** + * Duplicate the string s. + * @param s string to be duplicated + * @return Pointer to a newly-allocated string containing a + * copy of s or NULL if the string cannot be allocated. + */ +char *av_strdup(const char *s) av_malloc_attrib; + +/** + * Duplicate a substring of the string s. + * @param s string to be duplicated + * @param len the maximum length of the resulting string (not counting the + * terminating byte). + * @return Pointer to a newly-allocated string containing a + * copy of s or NULL if the string cannot be allocated. + */ +char *av_strndup(const char *s, size_t len) av_malloc_attrib; + +/** + * Duplicate the buffer p. + * @param p buffer to be duplicated + * @return Pointer to a newly allocated buffer containing a + * copy of p or NULL if the buffer cannot be allocated. + */ +void *av_memdup(const void *p, size_t size); + +/** + * Free a memory block which has been allocated with av_malloc(z)() or + * av_realloc() and set the pointer pointing to it to NULL. + * @param ptr Pointer to the pointer to the memory block which should + * be freed. + * @note passing a pointer to a NULL pointer is safe and leads to no action. + * @see av_free() + */ +void av_freep(void *ptr); + +/** + * Add an element to a dynamic array. + * + * The array to grow is supposed to be an array of pointers to + * structures, and the element to add must be a pointer to an already + * allocated structure. + * + * The array is reallocated when its size reaches powers of 2. + * Therefore, the amortized cost of adding an element is constant. + * + * In case of success, the pointer to the array is updated in order to + * point to the new grown array, and the number pointed to by nb_ptr + * is incremented. + * In case of failure, the array is freed, *tab_ptr is set to NULL and + * *nb_ptr is set to 0. + * + * @param tab_ptr pointer to the array to grow + * @param nb_ptr pointer to the number of elements in the array + * @param elem element to add + * @see av_dynarray_add_nofree(), av_dynarray2_add() + */ +void av_dynarray_add(void *tab_ptr, int *nb_ptr, void *elem); + +/** + * Add an element to a dynamic array. + * + * Function has the same functionality as av_dynarray_add(), + * but it doesn't free memory on fails. It returns error code + * instead and leave current buffer untouched. + * + * @param tab_ptr pointer to the array to grow + * @param nb_ptr pointer to the number of elements in the array + * @param elem element to add + * @return >=0 on success, negative otherwise. + * @see av_dynarray_add(), av_dynarray2_add() + */ +av_warn_unused_result +int av_dynarray_add_nofree(void *tab_ptr, int *nb_ptr, void *elem); + +/** + * Add an element of size elem_size to a dynamic array. + * + * The array is reallocated when its number of elements reaches powers of 2. + * Therefore, the amortized cost of adding an element is constant. + * + * In case of success, the pointer to the array is updated in order to + * point to the new grown array, and the number pointed to by nb_ptr + * is incremented. + * In case of failure, the array is freed, *tab_ptr is set to NULL and + * *nb_ptr is set to 0. + * + * @param tab_ptr pointer to the array to grow + * @param nb_ptr pointer to the number of elements in the array + * @param elem_size size in bytes of the elements in the array + * @param elem_data pointer to the data of the element to add. If NULL, the space of + * the new added element is not filled. + * @return pointer to the data of the element to copy in the new allocated space. + * If NULL, the new allocated space is left uninitialized." + * @see av_dynarray_add(), av_dynarray_add_nofree() + */ +void *av_dynarray2_add(void **tab_ptr, int *nb_ptr, size_t elem_size, + const uint8_t *elem_data); + +/** + * Multiply two size_t values checking for overflow. + * @return 0 if success, AVERROR(EINVAL) if overflow. + */ +static inline int av_size_mult(size_t a, size_t b, size_t *r) +{ + size_t t = a * b; + /* Hack inspired from glibc: only try the division if nelem and elsize + * are both greater than sqrt(SIZE_MAX). */ + if ((a | b) >= ((size_t)1 << (sizeof(size_t) * 4)) && a && t / a != b) + return AVERROR(EINVAL); + *r = t; + return 0; +} + +/** + * Set the maximum size that may me allocated in one block. + */ +void av_max_alloc(size_t max); + +/** + * deliberately overlapping memcpy implementation + * @param dst destination buffer + * @param back how many bytes back we start (the initial size of the overlapping window), must be > 0 + * @param cnt number of bytes to copy, must be >= 0 + * + * cnt > back is valid, this will copy the bytes we just copied, + * thus creating a repeating pattern with a period length of back. + */ +void av_memcpy_backptr(uint8_t *dst, int back, int cnt); + +/** + * Reallocate the given block if it is not large enough, otherwise do nothing. + * + * @see av_realloc + */ +void *av_fast_realloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Allocate a buffer, reusing the given one if large enough. + * + * Contrary to av_fast_realloc the current buffer contents might not be + * preserved and on error the old buffer is freed, thus no special + * handling to avoid memleaks is necessary. + * + * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer + * @param size size of the buffer *ptr points to + * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and + * *size 0 if an error occurred. + */ +void av_fast_malloc(void *ptr, unsigned int *size, size_t min_size); + +/** + * Allocate a buffer, reusing the given one if large enough. + * + * All newly allocated space is initially cleared + * Contrary to av_fast_realloc the current buffer contents might not be + * preserved and on error the old buffer is freed, thus no special + * handling to avoid memleaks is necessary. + * + * @param ptr pointer to pointer to already allocated buffer, overwritten with pointer to new buffer + * @param size size of the buffer *ptr points to + * @param min_size minimum size of *ptr buffer after returning, *ptr will be NULL and + * *size 0 if an error occurred. + */ +void av_fast_mallocz(void *ptr, unsigned int *size, size_t min_size); + +/** + * @} + */ + +#endif /* AVUTIL_MEM_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/motion_vector.h b/third-party/FFmpeg-iOS/include/libavutil/motion_vector.h new file mode 100644 index 0000000000..ec29556388 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/motion_vector.h @@ -0,0 +1,57 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MOTION_VECTOR_H +#define AVUTIL_MOTION_VECTOR_H + +#include + +typedef struct AVMotionVector { + /** + * Where the current macroblock comes from; negative value when it comes + * from the past, positive value when it comes from the future. + * XXX: set exact relative ref frame reference instead of a +/- 1 "direction". + */ + int32_t source; + /** + * Width and height of the block. + */ + uint8_t w, h; + /** + * Absolute source position. Can be outside the frame area. + */ + int16_t src_x, src_y; + /** + * Absolute destination position. Can be outside the frame area. + */ + int16_t dst_x, dst_y; + /** + * Extra flag information. + * Currently unused. + */ + uint64_t flags; + /** + * Motion vector + * src_x = dst_x + motion_x / motion_scale + * src_y = dst_y + motion_y / motion_scale + */ + int32_t motion_x, motion_y; + uint16_t motion_scale; +} AVMotionVector; + +#endif /* AVUTIL_MOTION_VECTOR_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/murmur3.h b/third-party/FFmpeg-iOS/include/libavutil/murmur3.h new file mode 100644 index 0000000000..f29ed973e9 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/murmur3.h @@ -0,0 +1,32 @@ +/* + * Copyright (C) 2013 Reimar Döffinger + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_MURMUR3_H +#define AVUTIL_MURMUR3_H + +#include + +struct AVMurMur3 *av_murmur3_alloc(void); +void av_murmur3_init_seeded(struct AVMurMur3 *c, uint64_t seed); +void av_murmur3_init(struct AVMurMur3 *c); +void av_murmur3_update(struct AVMurMur3 *c, const uint8_t *src, int len); +void av_murmur3_final(struct AVMurMur3 *c, uint8_t dst[16]); + +#endif /* AVUTIL_MURMUR3_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/opt.h b/third-party/FFmpeg-iOS/include/libavutil/opt.h new file mode 100644 index 0000000000..9a76a47f75 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/opt.h @@ -0,0 +1,865 @@ +/* + * AVOptions + * copyright (c) 2005 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_OPT_H +#define AVUTIL_OPT_H + +/** + * @file + * AVOptions + */ + +#include "rational.h" +#include "avutil.h" +#include "dict.h" +#include "log.h" +#include "pixfmt.h" +#include "samplefmt.h" +#include "version.h" + +/** + * @defgroup avoptions AVOptions + * @ingroup lavu_data + * @{ + * AVOptions provide a generic system to declare options on arbitrary structs + * ("objects"). An option can have a help text, a type and a range of possible + * values. Options may then be enumerated, read and written to. + * + * @section avoptions_implement Implementing AVOptions + * This section describes how to add AVOptions capabilities to a struct. + * + * All AVOptions-related information is stored in an AVClass. Therefore + * the first member of the struct should be a pointer to an AVClass describing it. + * The option field of the AVClass must be set to a NULL-terminated static array + * of AVOptions. Each AVOption must have a non-empty name, a type, a default + * value and for number-type AVOptions also a range of allowed values. It must + * also declare an offset in bytes from the start of the struct, where the field + * associated with this AVOption is located. Other fields in the AVOption struct + * should also be set when applicable, but are not required. + * + * The following example illustrates an AVOptions-enabled struct: + * @code + * typedef struct test_struct { + * AVClass *class; + * int int_opt; + * char *str_opt; + * uint8_t *bin_opt; + * int bin_len; + * } test_struct; + * + * static const AVOption test_options[] = { + * { "test_int", "This is a test option of int type.", offsetof(test_struct, int_opt), + * AV_OPT_TYPE_INT, { .i64 = -1 }, INT_MIN, INT_MAX }, + * { "test_str", "This is a test option of string type.", offsetof(test_struct, str_opt), + * AV_OPT_TYPE_STRING }, + * { "test_bin", "This is a test option of binary type.", offsetof(test_struct, bin_opt), + * AV_OPT_TYPE_BINARY }, + * { NULL }, + * }; + * + * static const AVClass test_class = { + * .class_name = "test class", + * .item_name = av_default_item_name, + * .option = test_options, + * .version = LIBAVUTIL_VERSION_INT, + * }; + * @endcode + * + * Next, when allocating your struct, you must ensure that the AVClass pointer + * is set to the correct value. Then, av_opt_set_defaults() can be called to + * initialize defaults. After that the struct is ready to be used with the + * AVOptions API. + * + * When cleaning up, you may use the av_opt_free() function to automatically + * free all the allocated string and binary options. + * + * Continuing with the above example: + * + * @code + * test_struct *alloc_test_struct(void) + * { + * test_struct *ret = av_malloc(sizeof(*ret)); + * ret->class = &test_class; + * av_opt_set_defaults(ret); + * return ret; + * } + * void free_test_struct(test_struct **foo) + * { + * av_opt_free(*foo); + * av_freep(foo); + * } + * @endcode + * + * @subsection avoptions_implement_nesting Nesting + * It may happen that an AVOptions-enabled struct contains another + * AVOptions-enabled struct as a member (e.g. AVCodecContext in + * libavcodec exports generic options, while its priv_data field exports + * codec-specific options). In such a case, it is possible to set up the + * parent struct to export a child's options. To do that, simply + * implement AVClass.child_next() and AVClass.child_class_next() in the + * parent struct's AVClass. + * Assuming that the test_struct from above now also contains a + * child_struct field: + * + * @code + * typedef struct child_struct { + * AVClass *class; + * int flags_opt; + * } child_struct; + * static const AVOption child_opts[] = { + * { "test_flags", "This is a test option of flags type.", + * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX }, + * { NULL }, + * }; + * static const AVClass child_class = { + * .class_name = "child class", + * .item_name = av_default_item_name, + * .option = child_opts, + * .version = LIBAVUTIL_VERSION_INT, + * }; + * + * void *child_next(void *obj, void *prev) + * { + * test_struct *t = obj; + * if (!prev && t->child_struct) + * return t->child_struct; + * return NULL + * } + * const AVClass child_class_next(const AVClass *prev) + * { + * return prev ? NULL : &child_class; + * } + * @endcode + * Putting child_next() and child_class_next() as defined above into + * test_class will now make child_struct's options accessible through + * test_struct (again, proper setup as described above needs to be done on + * child_struct right after it is created). + * + * From the above example it might not be clear why both child_next() + * and child_class_next() are needed. The distinction is that child_next() + * iterates over actually existing objects, while child_class_next() + * iterates over all possible child classes. E.g. if an AVCodecContext + * was initialized to use a codec which has private options, then its + * child_next() will return AVCodecContext.priv_data and finish + * iterating. OTOH child_class_next() on AVCodecContext.av_class will + * iterate over all available codecs with private options. + * + * @subsection avoptions_implement_named_constants Named constants + * It is possible to create named constants for options. Simply set the unit + * field of the option the constants should apply to a string and + * create the constants themselves as options of type AV_OPT_TYPE_CONST + * with their unit field set to the same string. + * Their default_val field should contain the value of the named + * constant. + * For example, to add some named constants for the test_flags option + * above, put the following into the child_opts array: + * @code + * { "test_flags", "This is a test option of flags type.", + * offsetof(child_struct, flags_opt), AV_OPT_TYPE_FLAGS, { .i64 = 0 }, INT_MIN, INT_MAX, "test_unit" }, + * { "flag1", "This is a flag with value 16", 0, AV_OPT_TYPE_CONST, { .i64 = 16 }, 0, 0, "test_unit" }, + * @endcode + * + * @section avoptions_use Using AVOptions + * This section deals with accessing options in an AVOptions-enabled struct. + * Such structs in FFmpeg are e.g. AVCodecContext in libavcodec or + * AVFormatContext in libavformat. + * + * @subsection avoptions_use_examine Examining AVOptions + * The basic functions for examining options are av_opt_next(), which iterates + * over all options defined for one object, and av_opt_find(), which searches + * for an option with the given name. + * + * The situation is more complicated with nesting. An AVOptions-enabled struct + * may have AVOptions-enabled children. Passing the AV_OPT_SEARCH_CHILDREN flag + * to av_opt_find() will make the function search children recursively. + * + * For enumerating there are basically two cases. The first is when you want to + * get all options that may potentially exist on the struct and its children + * (e.g. when constructing documentation). In that case you should call + * av_opt_child_class_next() recursively on the parent struct's AVClass. The + * second case is when you have an already initialized struct with all its + * children and you want to get all options that can be actually written or read + * from it. In that case you should call av_opt_child_next() recursively (and + * av_opt_next() on each result). + * + * @subsection avoptions_use_get_set Reading and writing AVOptions + * When setting options, you often have a string read directly from the + * user. In such a case, simply passing it to av_opt_set() is enough. For + * non-string type options, av_opt_set() will parse the string according to the + * option type. + * + * Similarly av_opt_get() will read any option type and convert it to a string + * which will be returned. Do not forget that the string is allocated, so you + * have to free it with av_free(). + * + * In some cases it may be more convenient to put all options into an + * AVDictionary and call av_opt_set_dict() on it. A specific case of this + * are the format/codec open functions in lavf/lavc which take a dictionary + * filled with option as a parameter. This makes it possible to set some options + * that cannot be set otherwise, since e.g. the input file format is not known + * before the file is actually opened. + */ + +enum AVOptionType{ + AV_OPT_TYPE_FLAGS, + AV_OPT_TYPE_INT, + AV_OPT_TYPE_INT64, + AV_OPT_TYPE_DOUBLE, + AV_OPT_TYPE_FLOAT, + AV_OPT_TYPE_STRING, + AV_OPT_TYPE_RATIONAL, + AV_OPT_TYPE_BINARY, ///< offset must point to a pointer immediately followed by an int for the length + AV_OPT_TYPE_DICT, + AV_OPT_TYPE_CONST = 128, + AV_OPT_TYPE_IMAGE_SIZE = MKBETAG('S','I','Z','E'), ///< offset must point to two consecutive integers + AV_OPT_TYPE_PIXEL_FMT = MKBETAG('P','F','M','T'), + AV_OPT_TYPE_SAMPLE_FMT = MKBETAG('S','F','M','T'), + AV_OPT_TYPE_VIDEO_RATE = MKBETAG('V','R','A','T'), ///< offset must point to AVRational + AV_OPT_TYPE_DURATION = MKBETAG('D','U','R',' '), + AV_OPT_TYPE_COLOR = MKBETAG('C','O','L','R'), + AV_OPT_TYPE_CHANNEL_LAYOUT = MKBETAG('C','H','L','A'), + AV_OPT_TYPE_BOOL = MKBETAG('B','O','O','L'), +}; + +/** + * AVOption + */ +typedef struct AVOption { + const char *name; + + /** + * short English help text + * @todo What about other languages? + */ + const char *help; + + /** + * The offset relative to the context structure where the option + * value is stored. It should be 0 for named constants. + */ + int offset; + enum AVOptionType type; + + /** + * the default value for scalar options + */ + union { + int64_t i64; + double dbl; + const char *str; + /* TODO those are unused now */ + AVRational q; + } default_val; + double min; ///< minimum valid value for the option + double max; ///< maximum valid value for the option + + int flags; +#define AV_OPT_FLAG_ENCODING_PARAM 1 ///< a generic parameter which can be set by the user for muxing or encoding +#define AV_OPT_FLAG_DECODING_PARAM 2 ///< a generic parameter which can be set by the user for demuxing or decoding +#if FF_API_OPT_TYPE_METADATA +#define AV_OPT_FLAG_METADATA 4 ///< some data extracted or inserted into the file like title, comment, ... +#endif +#define AV_OPT_FLAG_AUDIO_PARAM 8 +#define AV_OPT_FLAG_VIDEO_PARAM 16 +#define AV_OPT_FLAG_SUBTITLE_PARAM 32 +/** + * The option is intended for exporting values to the caller. + */ +#define AV_OPT_FLAG_EXPORT 64 +/** + * The option may not be set through the AVOptions API, only read. + * This flag only makes sense when AV_OPT_FLAG_EXPORT is also set. + */ +#define AV_OPT_FLAG_READONLY 128 +#define AV_OPT_FLAG_FILTERING_PARAM (1<<16) ///< a generic parameter which can be set by the user for filtering +//FIXME think about enc-audio, ... style flags + + /** + * The logical unit to which the option belongs. Non-constant + * options and corresponding named constants share the same + * unit. May be NULL. + */ + const char *unit; +} AVOption; + +/** + * A single allowed range of values, or a single allowed value. + */ +typedef struct AVOptionRange { + const char *str; + /** + * Value range. + * For string ranges this represents the min/max length. + * For dimensions this represents the min/max pixel count or width/height in multi-component case. + */ + double value_min, value_max; + /** + * Value's component range. + * For string this represents the unicode range for chars, 0-127 limits to ASCII. + */ + double component_min, component_max; + /** + * Range flag. + * If set to 1 the struct encodes a range, if set to 0 a single value. + */ + int is_range; +} AVOptionRange; + +/** + * List of AVOptionRange structs. + */ +typedef struct AVOptionRanges { + /** + * Array of option ranges. + * + * Most of option types use just one component. + * Following describes multi-component option types: + * + * AV_OPT_TYPE_IMAGE_SIZE: + * component index 0: range of pixel count (width * height). + * component index 1: range of width. + * component index 2: range of height. + * + * @note To obtain multi-component version of this structure, user must + * provide AV_OPT_MULTI_COMPONENT_RANGE to av_opt_query_ranges or + * av_opt_query_ranges_default function. + * + * Multi-component range can be read as in following example: + * + * @code + * int range_index, component_index; + * AVOptionRanges *ranges; + * AVOptionRange *range[3]; //may require more than 3 in the future. + * av_opt_query_ranges(&ranges, obj, key, AV_OPT_MULTI_COMPONENT_RANGE); + * for (range_index = 0; range_index < ranges->nb_ranges; range_index++) { + * for (component_index = 0; component_index < ranges->nb_components; component_index++) + * range[component_index] = ranges->range[ranges->nb_ranges * component_index + range_index]; + * //do something with range here. + * } + * av_opt_freep_ranges(&ranges); + * @endcode + */ + AVOptionRange **range; + /** + * Number of ranges per component. + */ + int nb_ranges; + /** + * Number of componentes. + */ + int nb_components; +} AVOptionRanges; + +/** + * Show the obj options. + * + * @param req_flags requested flags for the options to show. Show only the + * options for which it is opt->flags & req_flags. + * @param rej_flags rejected flags for the options to show. Show only the + * options for which it is !(opt->flags & req_flags). + * @param av_log_obj log context to use for showing the options + */ +int av_opt_show2(void *obj, void *av_log_obj, int req_flags, int rej_flags); + +/** + * Set the values of all AVOption fields to their default values. + * + * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass) + */ +void av_opt_set_defaults(void *s); + +/** + * Set the values of all AVOption fields to their default values. Only these + * AVOption fields for which (opt->flags & mask) == flags will have their + * default applied to s. + * + * @param s an AVOption-enabled struct (its first member must be a pointer to AVClass) + * @param mask combination of AV_OPT_FLAG_* + * @param flags combination of AV_OPT_FLAG_* + */ +void av_opt_set_defaults2(void *s, int mask, int flags); + +/** + * Parse the key/value pairs list in opts. For each key/value pair + * found, stores the value in the field in ctx that is named like the + * key. ctx must be an AVClass context, storing is done using + * AVOptions. + * + * @param opts options string to parse, may be NULL + * @param key_val_sep a 0-terminated list of characters used to + * separate key from value + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other + * @return the number of successfully set key/value pairs, or a negative + * value corresponding to an AVERROR code in case of error: + * AVERROR(EINVAL) if opts cannot be parsed, + * the error code issued by av_opt_set() if a key/value pair + * cannot be set + */ +int av_set_options_string(void *ctx, const char *opts, + const char *key_val_sep, const char *pairs_sep); + +/** + * Parse the key-value pairs list in opts. For each key=value pair found, + * set the value of the corresponding option in ctx. + * + * @param ctx the AVClass object to set options on + * @param opts the options string, key-value pairs separated by a + * delimiter + * @param shorthand a NULL-terminated array of options names for shorthand + * notation: if the first field in opts has no key part, + * the key is taken from the first element of shorthand; + * then again for the second, etc., until either opts is + * finished, shorthand is finished or a named option is + * found; after that, all options must be named + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value, for example '=' + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other, for example ':' or ',' + * @return the number of successfully set key=value pairs, or a negative + * value corresponding to an AVERROR code in case of error: + * AVERROR(EINVAL) if opts cannot be parsed, + * the error code issued by av_set_string3() if a key/value pair + * cannot be set + * + * Options names must use only the following characters: a-z A-Z 0-9 - . / _ + * Separators must use characters distinct from option names and from each + * other. + */ +int av_opt_set_from_string(void *ctx, const char *opts, + const char *const *shorthand, + const char *key_val_sep, const char *pairs_sep); +/** + * Free all allocated objects in obj. + */ +void av_opt_free(void *obj); + +/** + * Check whether a particular flag is set in a flags field. + * + * @param field_name the name of the flag field option + * @param flag_name the name of the flag to check + * @return non-zero if the flag is set, zero if the flag isn't set, + * isn't of the right type, or the flags field doesn't exist. + */ +int av_opt_flag_is_set(void *obj, const char *field_name, const char *flag_name); + +/** + * Set all the options from a given dictionary on an object. + * + * @param obj a struct whose first element is a pointer to AVClass + * @param options options to process. This dictionary will be freed and replaced + * by a new one containing all options not found in obj. + * Of course this new dictionary needs to be freed by caller + * with av_dict_free(). + * + * @return 0 on success, a negative AVERROR if some option was found in obj, + * but could not be set. + * + * @see av_dict_copy() + */ +int av_opt_set_dict(void *obj, struct AVDictionary **options); + + +/** + * Set all the options from a given dictionary on an object. + * + * @param obj a struct whose first element is a pointer to AVClass + * @param options options to process. This dictionary will be freed and replaced + * by a new one containing all options not found in obj. + * Of course this new dictionary needs to be freed by caller + * with av_dict_free(). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * + * @return 0 on success, a negative AVERROR if some option was found in obj, + * but could not be set. + * + * @see av_dict_copy() + */ +int av_opt_set_dict2(void *obj, struct AVDictionary **options, int search_flags); + +/** + * Extract a key-value pair from the beginning of a string. + * + * @param ropts pointer to the options string, will be updated to + * point to the rest of the string (one of the pairs_sep + * or the final NUL) + * @param key_val_sep a 0-terminated list of characters used to separate + * key from value, for example '=' + * @param pairs_sep a 0-terminated list of characters used to separate + * two pairs from each other, for example ':' or ',' + * @param flags flags; see the AV_OPT_FLAG_* values below + * @param rkey parsed key; must be freed using av_free() + * @param rval parsed value; must be freed using av_free() + * + * @return >=0 for success, or a negative value corresponding to an + * AVERROR code in case of error; in particular: + * AVERROR(EINVAL) if no key is present + * + */ +int av_opt_get_key_value(const char **ropts, + const char *key_val_sep, const char *pairs_sep, + unsigned flags, + char **rkey, char **rval); + +enum { + + /** + * Accept to parse a value without a key; the key will then be returned + * as NULL. + */ + AV_OPT_FLAG_IMPLICIT_KEY = 1, +}; + +/** + * @defgroup opt_eval_funcs Evaluating option strings + * @{ + * This group of functions can be used to evaluate option strings + * and get numbers out of them. They do the same thing as av_opt_set(), + * except the result is written into the caller-supplied pointer. + * + * @param obj a struct whose first element is a pointer to AVClass. + * @param o an option for which the string is to be evaluated. + * @param val string to be evaluated. + * @param *_out value of the string will be written here. + * + * @return 0 on success, a negative number on failure. + */ +int av_opt_eval_flags (void *obj, const AVOption *o, const char *val, int *flags_out); +int av_opt_eval_int (void *obj, const AVOption *o, const char *val, int *int_out); +int av_opt_eval_int64 (void *obj, const AVOption *o, const char *val, int64_t *int64_out); +int av_opt_eval_float (void *obj, const AVOption *o, const char *val, float *float_out); +int av_opt_eval_double(void *obj, const AVOption *o, const char *val, double *double_out); +int av_opt_eval_q (void *obj, const AVOption *o, const char *val, AVRational *q_out); +/** + * @} + */ + +#define AV_OPT_SEARCH_CHILDREN (1 << 0) /**< Search in possible children of the + given object first. */ +/** + * The obj passed to av_opt_find() is fake -- only a double pointer to AVClass + * instead of a required pointer to a struct containing AVClass. This is + * useful for searching for options without needing to allocate the corresponding + * object. + */ +#define AV_OPT_SEARCH_FAKE_OBJ (1 << 1) + +/** + * In av_opt_get, return NULL if the option has a pointer type and is set to NULL, + * rather than returning an empty string. + */ +#define AV_OPT_ALLOW_NULL (1 << 2) + +/** + * Allows av_opt_query_ranges and av_opt_query_ranges_default to return more than + * one component for certain option types. + * @see AVOptionRanges for details. + */ +#define AV_OPT_MULTI_COMPONENT_RANGE (1 << 12) + +/** + * Look for an option in an object. Consider only options which + * have all the specified flags set. + * + * @param[in] obj A pointer to a struct whose first element is a + * pointer to an AVClass. + * Alternatively a double pointer to an AVClass, if + * AV_OPT_SEARCH_FAKE_OBJ search flag is set. + * @param[in] name The name of the option to look for. + * @param[in] unit When searching for named constants, name of the unit + * it belongs to. + * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * + * @return A pointer to the option found, or NULL if no option + * was found. + * + * @note Options found with AV_OPT_SEARCH_CHILDREN flag may not be settable + * directly with av_opt_set(). Use special calls which take an options + * AVDictionary (e.g. avformat_open_input()) to set options found with this + * flag. + */ +const AVOption *av_opt_find(void *obj, const char *name, const char *unit, + int opt_flags, int search_flags); + +/** + * Look for an option in an object. Consider only options which + * have all the specified flags set. + * + * @param[in] obj A pointer to a struct whose first element is a + * pointer to an AVClass. + * Alternatively a double pointer to an AVClass, if + * AV_OPT_SEARCH_FAKE_OBJ search flag is set. + * @param[in] name The name of the option to look for. + * @param[in] unit When searching for named constants, name of the unit + * it belongs to. + * @param opt_flags Find only options with all the specified flags set (AV_OPT_FLAG). + * @param search_flags A combination of AV_OPT_SEARCH_*. + * @param[out] target_obj if non-NULL, an object to which the option belongs will be + * written here. It may be different from obj if AV_OPT_SEARCH_CHILDREN is present + * in search_flags. This parameter is ignored if search_flags contain + * AV_OPT_SEARCH_FAKE_OBJ. + * + * @return A pointer to the option found, or NULL if no option + * was found. + */ +const AVOption *av_opt_find2(void *obj, const char *name, const char *unit, + int opt_flags, int search_flags, void **target_obj); + +/** + * Iterate over all AVOptions belonging to obj. + * + * @param obj an AVOptions-enabled struct or a double pointer to an + * AVClass describing it. + * @param prev result of the previous call to av_opt_next() on this object + * or NULL + * @return next AVOption or NULL + */ +const AVOption *av_opt_next(const void *obj, const AVOption *prev); + +/** + * Iterate over AVOptions-enabled children of obj. + * + * @param prev result of a previous call to this function or NULL + * @return next AVOptions-enabled child or NULL + */ +void *av_opt_child_next(void *obj, void *prev); + +/** + * Iterate over potential AVOptions-enabled children of parent. + * + * @param prev result of a previous call to this function or NULL + * @return AVClass corresponding to next potential child or NULL + */ +const AVClass *av_opt_child_class_next(const AVClass *parent, const AVClass *prev); + +/** + * @defgroup opt_set_funcs Option setting functions + * @{ + * Those functions set the field of obj with the given name to value. + * + * @param[in] obj A struct whose first element is a pointer to an AVClass. + * @param[in] name the name of the field to set + * @param[in] val The value to set. In case of av_opt_set() if the field is not + * of a string type, then the given string is parsed. + * SI postfixes and some named scalars are supported. + * If the field is of a numeric type, it has to be a numeric or named + * scalar. Behavior with more than one scalar and +- infix operators + * is undefined. + * If the field is of a flags type, it has to be a sequence of numeric + * scalars or named flags separated by '+' or '-'. Prefixing a flag + * with '+' causes it to be set without affecting the other flags; + * similarly, '-' unsets a flag. + * @param search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN + * is passed here, then the option may be set on a child of obj. + * + * @return 0 if the value has been set, or an AVERROR code in case of + * error: + * AVERROR_OPTION_NOT_FOUND if no matching option exists + * AVERROR(ERANGE) if the value is out of range + * AVERROR(EINVAL) if the value is not valid + */ +int av_opt_set (void *obj, const char *name, const char *val, int search_flags); +int av_opt_set_int (void *obj, const char *name, int64_t val, int search_flags); +int av_opt_set_double (void *obj, const char *name, double val, int search_flags); +int av_opt_set_q (void *obj, const char *name, AVRational val, int search_flags); +int av_opt_set_bin (void *obj, const char *name, const uint8_t *val, int size, int search_flags); +int av_opt_set_image_size(void *obj, const char *name, int w, int h, int search_flags); +int av_opt_set_pixel_fmt (void *obj, const char *name, enum AVPixelFormat fmt, int search_flags); +int av_opt_set_sample_fmt(void *obj, const char *name, enum AVSampleFormat fmt, int search_flags); +int av_opt_set_video_rate(void *obj, const char *name, AVRational val, int search_flags); +int av_opt_set_channel_layout(void *obj, const char *name, int64_t ch_layout, int search_flags); +/** + * @note Any old dictionary present is discarded and replaced with a copy of the new one. The + * caller still owns val is and responsible for freeing it. + */ +int av_opt_set_dict_val(void *obj, const char *name, const AVDictionary *val, int search_flags); + +/** + * Set a binary option to an integer list. + * + * @param obj AVClass object to set options on + * @param name name of the binary option + * @param val pointer to an integer list (must have the correct type with + * regard to the contents of the list) + * @param term list terminator (usually 0 or -1) + * @param flags search flags + */ +#define av_opt_set_int_list(obj, name, val, term, flags) \ + (av_int_list_length(val, term) > INT_MAX / sizeof(*(val)) ? \ + AVERROR(EINVAL) : \ + av_opt_set_bin(obj, name, (const uint8_t *)(val), \ + av_int_list_length(val, term) * sizeof(*(val)), flags)) + +/** + * @} + */ + +/** + * @defgroup opt_get_funcs Option getting functions + * @{ + * Those functions get a value of the option with the given name from an object. + * + * @param[in] obj a struct whose first element is a pointer to an AVClass. + * @param[in] name name of the option to get. + * @param[in] search_flags flags passed to av_opt_find2. I.e. if AV_OPT_SEARCH_CHILDREN + * is passed here, then the option may be found in a child of obj. + * @param[out] out_val value of the option will be written here + * @return >=0 on success, a negative error code otherwise + */ +/** + * @note the returned string will be av_malloc()ed and must be av_free()ed by the caller + * + * @note if AV_OPT_ALLOW_NULL is set in search_flags in av_opt_get, and the option has + * AV_OPT_TYPE_STRING or AV_OPT_TYPE_BINARY and is set to NULL, *out_val will be set + * to NULL instead of an allocated empty string. + */ +int av_opt_get (void *obj, const char *name, int search_flags, uint8_t **out_val); +int av_opt_get_int (void *obj, const char *name, int search_flags, int64_t *out_val); +int av_opt_get_double (void *obj, const char *name, int search_flags, double *out_val); +int av_opt_get_q (void *obj, const char *name, int search_flags, AVRational *out_val); +int av_opt_get_image_size(void *obj, const char *name, int search_flags, int *w_out, int *h_out); +int av_opt_get_pixel_fmt (void *obj, const char *name, int search_flags, enum AVPixelFormat *out_fmt); +int av_opt_get_sample_fmt(void *obj, const char *name, int search_flags, enum AVSampleFormat *out_fmt); +int av_opt_get_video_rate(void *obj, const char *name, int search_flags, AVRational *out_val); +int av_opt_get_channel_layout(void *obj, const char *name, int search_flags, int64_t *ch_layout); +/** + * @param[out] out_val The returned dictionary is a copy of the actual value and must + * be freed with av_dict_free() by the caller + */ +int av_opt_get_dict_val(void *obj, const char *name, int search_flags, AVDictionary **out_val); +/** + * @} + */ +/** + * Gets a pointer to the requested field in a struct. + * This function allows accessing a struct even when its fields are moved or + * renamed since the application making the access has been compiled, + * + * @returns a pointer to the field, it can be cast to the correct type and read + * or written to. + */ +void *av_opt_ptr(const AVClass *avclass, void *obj, const char *name); + +/** + * Free an AVOptionRanges struct and set it to NULL. + */ +void av_opt_freep_ranges(AVOptionRanges **ranges); + +/** + * Get a list of allowed ranges for the given option. + * + * The returned list may depend on other fields in obj like for example profile. + * + * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored + * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance + * AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges + * + * The result must be freed with av_opt_freep_ranges. + * + * @return number of compontents returned on success, a negative errro code otherwise + */ +int av_opt_query_ranges(AVOptionRanges **, void *obj, const char *key, int flags); + +/** + * Copy options from src object into dest object. + * + * Options that require memory allocation (e.g. string or binary) are malloc'ed in dest object. + * Original memory allocated for such options is freed unless both src and dest options points to the same memory. + * + * @param dest Object to copy from + * @param src Object to copy into + * @return 0 on success, negative on error + */ +int av_opt_copy(void *dest, const void *src); + +/** + * Get a default list of allowed ranges for the given option. + * + * This list is constructed without using the AVClass.query_ranges() callback + * and can be used as fallback from within the callback. + * + * @param flags is a bitmask of flags, undefined flags should not be set and should be ignored + * AV_OPT_SEARCH_FAKE_OBJ indicates that the obj is a double pointer to a AVClass instead of a full instance + * AV_OPT_MULTI_COMPONENT_RANGE indicates that function may return more than one component, @see AVOptionRanges + * + * The result must be freed with av_opt_free_ranges. + * + * @return number of compontents returned on success, a negative errro code otherwise + */ +int av_opt_query_ranges_default(AVOptionRanges **, void *obj, const char *key, int flags); + +/** + * Check if given option is set to its default value. + * + * Options o must belong to the obj. This function must not be called to check child's options state. + * @see av_opt_is_set_to_default_by_name(). + * + * @param obj AVClass object to check option on + * @param o option to be checked + * @return >0 when option is set to its default, + * 0 when option is not set its default, + * <0 on error + */ +int av_opt_is_set_to_default(void *obj, const AVOption *o); + +/** + * Check if given option is set to its default value. + * + * @param obj AVClass object to check option on + * @param name option name + * @param search_flags combination of AV_OPT_SEARCH_* + * @return >0 when option is set to its default, + * 0 when option is not set its default, + * <0 on error + */ +int av_opt_is_set_to_default_by_name(void *obj, const char *name, int search_flags); + + +#define AV_OPT_SERIALIZE_SKIP_DEFAULTS 0x00000001 ///< Serialize options that are not set to default values only. +#define AV_OPT_SERIALIZE_OPT_FLAGS_EXACT 0x00000002 ///< Serialize options that exactly match opt_flags only. + +/** + * Serialize object's options. + * + * Create a string containing object's serialized options. + * Such string may be passed back to av_opt_set_from_string() in order to restore option values. + * A key/value or pairs separator occurring in the serialized value or + * name string are escaped through the av_escape() function. + * + * @param[in] obj AVClass object to serialize + * @param[in] opt_flags serialize options with all the specified flags set (AV_OPT_FLAG) + * @param[in] flags combination of AV_OPT_SERIALIZE_* flags + * @param[out] buffer Pointer to buffer that will be allocated with string containg serialized options. + * Buffer must be freed by the caller when is no longer needed. + * @param[in] key_val_sep character used to separate key from value + * @param[in] pairs_sep character used to separate two pairs from each other + * @return >= 0 on success, negative on error + * @warning Separators cannot be neither '\\' nor '\0'. They also cannot be the same. + */ +int av_opt_serialize(void *obj, int opt_flags, int flags, char **buffer, + const char key_val_sep, const char pairs_sep); +/** + * @} + */ + +#endif /* AVUTIL_OPT_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/parseutils.h b/third-party/FFmpeg-iOS/include/libavutil/parseutils.h new file mode 100644 index 0000000000..e66d24b76e --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/parseutils.h @@ -0,0 +1,193 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PARSEUTILS_H +#define AVUTIL_PARSEUTILS_H + +#include + +#include "rational.h" + +/** + * @file + * misc parsing utilities + */ + +/** + * Parse str and store the parsed ratio in q. + * + * Note that a ratio with infinite (1/0) or negative value is + * considered valid, so you should check on the returned value if you + * want to exclude those values. + * + * The undefined value can be expressed using the "0:0" string. + * + * @param[in,out] q pointer to the AVRational which will contain the ratio + * @param[in] str the string to parse: it has to be a string in the format + * num:den, a float number or an expression + * @param[in] max the maximum allowed numerator and denominator + * @param[in] log_offset log level offset which is applied to the log + * level of log_ctx + * @param[in] log_ctx parent logging context + * @return >= 0 on success, a negative error code otherwise + */ +int av_parse_ratio(AVRational *q, const char *str, int max, + int log_offset, void *log_ctx); + +#define av_parse_ratio_quiet(rate, str, max) \ + av_parse_ratio(rate, str, max, AV_LOG_MAX_OFFSET, NULL) + +/** + * Parse str and put in width_ptr and height_ptr the detected values. + * + * @param[in,out] width_ptr pointer to the variable which will contain the detected + * width value + * @param[in,out] height_ptr pointer to the variable which will contain the detected + * height value + * @param[in] str the string to parse: it has to be a string in the format + * width x height or a valid video size abbreviation. + * @return >= 0 on success, a negative error code otherwise + */ +int av_parse_video_size(int *width_ptr, int *height_ptr, const char *str); + +/** + * Parse str and store the detected values in *rate. + * + * @param[in,out] rate pointer to the AVRational which will contain the detected + * frame rate + * @param[in] str the string to parse: it has to be a string in the format + * rate_num / rate_den, a float number or a valid video rate abbreviation + * @return >= 0 on success, a negative error code otherwise + */ +int av_parse_video_rate(AVRational *rate, const char *str); + +/** + * Put the RGBA values that correspond to color_string in rgba_color. + * + * @param color_string a string specifying a color. It can be the name of + * a color (case insensitive match) or a [0x|#]RRGGBB[AA] sequence, + * possibly followed by "@" and a string representing the alpha + * component. + * The alpha component may be a string composed by "0x" followed by an + * hexadecimal number or a decimal number between 0.0 and 1.0, which + * represents the opacity value (0x00/0.0 means completely transparent, + * 0xff/1.0 completely opaque). + * If the alpha component is not specified then 0xff is assumed. + * The string "random" will result in a random color. + * @param slen length of the initial part of color_string containing the + * color. It can be set to -1 if color_string is a null terminated string + * containing nothing else than the color. + * @return >= 0 in case of success, a negative value in case of + * failure (for example if color_string cannot be parsed). + */ +int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, + void *log_ctx); + +/** + * Get the name of a color from the internal table of hard-coded named + * colors. + * + * This function is meant to enumerate the color names recognized by + * av_parse_color(). + * + * @param color_idx index of the requested color, starting from 0 + * @param rgbp if not NULL, will point to a 3-elements array with the color value in RGB + * @return the color name string or NULL if color_idx is not in the array + */ +const char *av_get_known_color_name(int color_idx, const uint8_t **rgb); + +/** + * Parse timestr and return in *time a corresponding number of + * microseconds. + * + * @param timeval puts here the number of microseconds corresponding + * to the string in timestr. If the string represents a duration, it + * is the number of microseconds contained in the time interval. If + * the string is a date, is the number of microseconds since 1st of + * January, 1970 up to the time of the parsed date. If timestr cannot + * be successfully parsed, set *time to INT64_MIN. + + * @param timestr a string representing a date or a duration. + * - If a date the syntax is: + * @code + * [{YYYY-MM-DD|YYYYMMDD}[T|t| ]]{{HH:MM:SS[.m...]]]}|{HHMMSS[.m...]]]}}[Z] + * now + * @endcode + * If the value is "now" it takes the current time. + * Time is local time unless Z is appended, in which case it is + * interpreted as UTC. + * If the year-month-day part is not specified it takes the current + * year-month-day. + * - If a duration the syntax is: + * @code + * [-][HH:]MM:SS[.m...] + * [-]S+[.m...] + * @endcode + * @param duration flag which tells how to interpret timestr, if not + * zero timestr is interpreted as a duration, otherwise as a date + * @return >= 0 in case of success, a negative value corresponding to an + * AVERROR code otherwise + */ +int av_parse_time(int64_t *timeval, const char *timestr, int duration); + +/** + * Attempt to find a specific tag in a URL. + * + * syntax: '?tag1=val1&tag2=val2...'. Little URL decoding is done. + * Return 1 if found. + */ +int av_find_info_tag(char *arg, int arg_size, const char *tag1, const char *info); + +/** + * Simplified version of strptime + * + * Parse the input string p according to the format string fmt and + * store its results in the structure dt. + * This implementation supports only a subset of the formats supported + * by the standard strptime(). + * + * The supported input field descriptors are listed below. + * - %H: the hour as a decimal number, using a 24-hour clock, in the + * range '00' through '23' + * - %J: hours as a decimal number, in the range '0' through INT_MAX + * - %M: the minute as a decimal number, using a 24-hour clock, in the + * range '00' through '59' + * - %S: the second as a decimal number, using a 24-hour clock, in the + * range '00' through '59' + * - %Y: the year as a decimal number, using the Gregorian calendar + * - %m: the month as a decimal number, in the range '1' through '12' + * - %d: the day of the month as a decimal number, in the range '1' + * through '31' + * - %T: alias for '%H:%M:%S' + * - %%: a literal '%' + * + * @return a pointer to the first character not processed in this function + * call. In case the input string contains more characters than + * required by the format string the return value points right after + * the last consumed input character. In case the whole input string + * is consumed the return value points to the null byte at the end of + * the string. On failure NULL is returned. + */ +char *av_small_strptime(const char *p, const char *fmt, struct tm *dt); + +/** + * Convert the decomposed UTC time in tm to a time_t value. + */ +time_t av_timegm(struct tm *tm); + +#endif /* AVUTIL_PARSEUTILS_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/pixdesc.h b/third-party/FFmpeg-iOS/include/libavutil/pixdesc.h new file mode 100644 index 0000000000..3b0bcdb3d8 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/pixdesc.h @@ -0,0 +1,394 @@ +/* + * pixel format descriptor + * Copyright (c) 2009 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXDESC_H +#define AVUTIL_PIXDESC_H + +#include + +#include "attributes.h" +#include "pixfmt.h" +#include "version.h" + +typedef struct AVComponentDescriptor { + /** + * Which of the 4 planes contains the component. + */ + int plane; + + /** + * Number of elements between 2 horizontally consecutive pixels. + * Elements are bits for bitstream formats, bytes otherwise. + */ + int step; + + /** + * Number of elements before the component of the first pixel. + * Elements are bits for bitstream formats, bytes otherwise. + */ + int offset; + + /** + * Number of least significant bits that must be shifted away + * to get the value. + */ + int shift; + + /** + * Number of bits in the component. + */ + int depth; + +#if FF_API_PLUS1_MINUS1 + /** deprecated, use step instead */ + attribute_deprecated int step_minus1; + + /** deprecated, use depth instead */ + attribute_deprecated int depth_minus1; + + /** deprecated, use offset instead */ + attribute_deprecated int offset_plus1; +#endif +} AVComponentDescriptor; + +/** + * Descriptor that unambiguously describes how the bits of a pixel are + * stored in the up to 4 data planes of an image. It also stores the + * subsampling factors and number of components. + * + * @note This is separate of the colorspace (RGB, YCbCr, YPbPr, JPEG-style YUV + * and all the YUV variants) AVPixFmtDescriptor just stores how values + * are stored not what these values represent. + */ +typedef struct AVPixFmtDescriptor { + const char *name; + uint8_t nb_components; ///< The number of components each pixel has, (1-4) + + /** + * Amount to shift the luma width right to find the chroma width. + * For YV12 this is 1 for example. + * chroma_width = AV_CEIL_RSHIFT(luma_width, log2_chroma_w) + * The note above is needed to ensure rounding up. + * This value only refers to the chroma components. + */ + uint8_t log2_chroma_w; + + /** + * Amount to shift the luma height right to find the chroma height. + * For YV12 this is 1 for example. + * chroma_height= AV_CEIL_RSHIFT(luma_height, log2_chroma_h) + * The note above is needed to ensure rounding up. + * This value only refers to the chroma components. + */ + uint8_t log2_chroma_h; + + /** + * Combination of AV_PIX_FMT_FLAG_... flags. + */ + uint64_t flags; + + /** + * Parameters that describe how pixels are packed. + * If the format has 1 or 2 components, then luma is 0. + * If the format has 3 or 4 components: + * if the RGB flag is set then 0 is red, 1 is green and 2 is blue; + * otherwise 0 is luma, 1 is chroma-U and 2 is chroma-V. + * + * If present, the Alpha channel is always the last component. + */ + AVComponentDescriptor comp[4]; + + /** + * Alternative comma-separated names. + */ + const char *alias; +} AVPixFmtDescriptor; + +/** + * Pixel format is big-endian. + */ +#define AV_PIX_FMT_FLAG_BE (1 << 0) +/** + * Pixel format has a palette in data[1], values are indexes in this palette. + */ +#define AV_PIX_FMT_FLAG_PAL (1 << 1) +/** + * All values of a component are bit-wise packed end to end. + */ +#define AV_PIX_FMT_FLAG_BITSTREAM (1 << 2) +/** + * Pixel format is an HW accelerated format. + */ +#define AV_PIX_FMT_FLAG_HWACCEL (1 << 3) +/** + * At least one pixel component is not in the first data plane. + */ +#define AV_PIX_FMT_FLAG_PLANAR (1 << 4) +/** + * The pixel format contains RGB-like data (as opposed to YUV/grayscale). + */ +#define AV_PIX_FMT_FLAG_RGB (1 << 5) + +/** + * The pixel format is "pseudo-paletted". This means that it contains a + * fixed palette in the 2nd plane but the palette is fixed/constant for each + * PIX_FMT. This allows interpreting the data as if it was PAL8, which can + * in some cases be simpler. Or the data can be interpreted purely based on + * the pixel format without using the palette. + * An example of a pseudo-paletted format is AV_PIX_FMT_GRAY8 + */ +#define AV_PIX_FMT_FLAG_PSEUDOPAL (1 << 6) + +/** + * The pixel format has an alpha channel. This is set on all formats that + * support alpha in some way. The exception is AV_PIX_FMT_PAL8, which can + * carry alpha as part of the palette. Details are explained in the + * AVPixelFormat enum, and are also encoded in the corresponding + * AVPixFmtDescriptor. + * + * The alpha is always straight, never pre-multiplied. + * + * If a codec or a filter does not support alpha, it should set all alpha to + * opaque, or use the equivalent pixel formats without alpha component, e.g. + * AV_PIX_FMT_RGB0 (or AV_PIX_FMT_RGB24 etc.) instead of AV_PIX_FMT_RGBA. + */ +#define AV_PIX_FMT_FLAG_ALPHA (1 << 7) + +/** + * Read a line from an image, and write the values of the + * pixel format component c to dst. + * + * @param data the array containing the pointers to the planes of the image + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to read + * @param y the vertical coordinate of the first pixel to read + * @param w the width of the line to read, that is the number of + * values to write to dst + * @param read_pal_component if not zero and the format is a paletted + * format writes the values corresponding to the palette + * component c in data[1] to dst, rather than the palette indexes in + * data[0]. The behavior is undefined if the format is not paletted. + */ +void av_read_image_line(uint16_t *dst, const uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w, int read_pal_component); + +/** + * Write the values from src to the pixel format component c of an + * image line. + * + * @param src array containing the values to write + * @param data the array containing the pointers to the planes of the + * image to write into. It is supposed to be zeroed. + * @param linesize the array containing the linesizes of the image + * @param desc the pixel format descriptor for the image + * @param x the horizontal coordinate of the first pixel to write + * @param y the vertical coordinate of the first pixel to write + * @param w the width of the line to write, that is the number of + * values to write to the image line + */ +void av_write_image_line(const uint16_t *src, uint8_t *data[4], + const int linesize[4], const AVPixFmtDescriptor *desc, + int x, int y, int c, int w); + +/** + * Return the pixel format corresponding to name. + * + * If there is no pixel format with name name, then looks for a + * pixel format with the name corresponding to the native endian + * format of name. + * For example in a little-endian system, first looks for "gray16", + * then for "gray16le". + * + * Finally if no pixel format has been found, returns AV_PIX_FMT_NONE. + */ +enum AVPixelFormat av_get_pix_fmt(const char *name); + +/** + * Return the short name for a pixel format, NULL in case pix_fmt is + * unknown. + * + * @see av_get_pix_fmt(), av_get_pix_fmt_string() + */ +const char *av_get_pix_fmt_name(enum AVPixelFormat pix_fmt); + +/** + * Print in buf the string corresponding to the pixel format with + * number pix_fmt, or a header if pix_fmt is negative. + * + * @param buf the buffer where to write the string + * @param buf_size the size of buf + * @param pix_fmt the number of the pixel format to print the + * corresponding info string, or a negative value to print the + * corresponding header. + */ +char *av_get_pix_fmt_string(char *buf, int buf_size, + enum AVPixelFormat pix_fmt); + +/** + * Return the number of bits per pixel used by the pixel format + * described by pixdesc. Note that this is not the same as the number + * of bits per sample. + * + * The returned number of bits refers to the number of bits actually + * used for storing the pixel information, that is padding bits are + * not counted. + */ +int av_get_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); + +/** + * Return the number of bits per pixel for the pixel format + * described by pixdesc, including any padding or unused bits. + */ +int av_get_padded_bits_per_pixel(const AVPixFmtDescriptor *pixdesc); + +/** + * @return a pixel format descriptor for provided pixel format or NULL if + * this pixel format is unknown. + */ +const AVPixFmtDescriptor *av_pix_fmt_desc_get(enum AVPixelFormat pix_fmt); + +/** + * Iterate over all pixel format descriptors known to libavutil. + * + * @param prev previous descriptor. NULL to get the first descriptor. + * + * @return next descriptor or NULL after the last descriptor + */ +const AVPixFmtDescriptor *av_pix_fmt_desc_next(const AVPixFmtDescriptor *prev); + +/** + * @return an AVPixelFormat id described by desc, or AV_PIX_FMT_NONE if desc + * is not a valid pointer to a pixel format descriptor. + */ +enum AVPixelFormat av_pix_fmt_desc_get_id(const AVPixFmtDescriptor *desc); + +/** + * Utility function to access log2_chroma_w log2_chroma_h from + * the pixel format AVPixFmtDescriptor. + * + * See av_get_chroma_sub_sample() for a function that asserts a + * valid pixel format instead of returning an error code. + * Its recommended that you use avcodec_get_chroma_sub_sample unless + * you do check the return code! + * + * @param[in] pix_fmt the pixel format + * @param[out] h_shift store log2_chroma_w (horizontal/width shift) + * @param[out] v_shift store log2_chroma_h (vertical/height shift) + * + * @return 0 on success, AVERROR(ENOSYS) on invalid or unknown pixel format + */ +int av_pix_fmt_get_chroma_sub_sample(enum AVPixelFormat pix_fmt, + int *h_shift, int *v_shift); + +/** + * @return number of planes in pix_fmt, a negative AVERROR if pix_fmt is not a + * valid pixel format. + */ +int av_pix_fmt_count_planes(enum AVPixelFormat pix_fmt); + +/** + * Utility function to swap the endianness of a pixel format. + * + * @param[in] pix_fmt the pixel format + * + * @return pixel format with swapped endianness if it exists, + * otherwise AV_PIX_FMT_NONE + */ +enum AVPixelFormat av_pix_fmt_swap_endianness(enum AVPixelFormat pix_fmt); + +#define FF_LOSS_RESOLUTION 0x0001 /**< loss due to resolution change */ +#define FF_LOSS_DEPTH 0x0002 /**< loss due to color depth change */ +#define FF_LOSS_COLORSPACE 0x0004 /**< loss due to color space conversion */ +#define FF_LOSS_ALPHA 0x0008 /**< loss of alpha bits */ +#define FF_LOSS_COLORQUANT 0x0010 /**< loss due to color quantization */ +#define FF_LOSS_CHROMA 0x0020 /**< loss of chroma (e.g. RGB to gray conversion) */ + +/** + * Compute what kind of losses will occur when converting from one specific + * pixel format to another. + * When converting from one pixel format to another, information loss may occur. + * For example, when converting from RGB24 to GRAY, the color information will + * be lost. Similarly, other losses occur when converting from some formats to + * other formats. These losses can involve loss of chroma, but also loss of + * resolution, loss of color depth, loss due to the color space conversion, loss + * of the alpha bits or loss due to color quantization. + * av_get_fix_fmt_loss() informs you about the various types of losses + * which will occur when converting from one pixel format to another. + * + * @param[in] dst_pix_fmt destination pixel format + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @return Combination of flags informing you what kind of losses will occur + * (maximum loss for an invalid dst_pix_fmt). + */ +int av_get_pix_fmt_loss(enum AVPixelFormat dst_pix_fmt, + enum AVPixelFormat src_pix_fmt, + int has_alpha); + +/** + * Compute what kind of losses will occur when converting from one specific + * pixel format to another. + * When converting from one pixel format to another, information loss may occur. + * For example, when converting from RGB24 to GRAY, the color information will + * be lost. Similarly, other losses occur when converting from some formats to + * other formats. These losses can involve loss of chroma, but also loss of + * resolution, loss of color depth, loss due to the color space conversion, loss + * of the alpha bits or loss due to color quantization. + * av_get_fix_fmt_loss() informs you about the various types of losses + * which will occur when converting from one pixel format to another. + * + * @param[in] dst_pix_fmt destination pixel format + * @param[in] src_pix_fmt source pixel format + * @param[in] has_alpha Whether the source pixel format alpha channel is used. + * @return Combination of flags informing you what kind of losses will occur + * (maximum loss for an invalid dst_pix_fmt). + */ +enum AVPixelFormat av_find_best_pix_fmt_of_2(enum AVPixelFormat dst_pix_fmt1, enum AVPixelFormat dst_pix_fmt2, + enum AVPixelFormat src_pix_fmt, int has_alpha, int *loss_ptr); + +/** + * @return the name for provided color range or NULL if unknown. + */ +const char *av_color_range_name(enum AVColorRange range); + +/** + * @return the name for provided color primaries or NULL if unknown. + */ +const char *av_color_primaries_name(enum AVColorPrimaries primaries); + +/** + * @return the name for provided color transfer or NULL if unknown. + */ +const char *av_color_transfer_name(enum AVColorTransferCharacteristic transfer); + +/** + * @return the name for provided color space or NULL if unknown. + */ +const char *av_color_space_name(enum AVColorSpace space); + +/** + * @return the name for provided chroma location or NULL if unknown. + */ +const char *av_chroma_location_name(enum AVChromaLocation location); + +#endif /* AVUTIL_PIXDESC_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/pixelutils.h b/third-party/FFmpeg-iOS/include/libavutil/pixelutils.h new file mode 100644 index 0000000000..a8dbc157e1 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/pixelutils.h @@ -0,0 +1,52 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXELUTILS_H +#define AVUTIL_PIXELUTILS_H + +#include +#include +#include "common.h" + +/** + * Sum of abs(src1[x] - src2[x]) + */ +typedef int (*av_pixelutils_sad_fn)(const uint8_t *src1, ptrdiff_t stride1, + const uint8_t *src2, ptrdiff_t stride2); + +/** + * Get a potentially optimized pointer to a Sum-of-absolute-differences + * function (see the av_pixelutils_sad_fn prototype). + * + * @param w_bits 1< + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_PIXFMT_H +#define AVUTIL_PIXFMT_H + +/** + * @file + * pixel format definitions + */ + +#include "../libavutil/avconfig.h" +#include "version.h" + +#define AVPALETTE_SIZE 1024 +#define AVPALETTE_COUNT 256 + +/** + * Pixel format. + * + * @note + * AV_PIX_FMT_RGB32 is handled in an endian-specific manner. An RGBA + * color is put together as: + * (A << 24) | (R << 16) | (G << 8) | B + * This is stored as BGRA on little-endian CPU architectures and ARGB on + * big-endian CPUs. + * + * @par + * When the pixel format is palettized RGB32 (AV_PIX_FMT_PAL8), the palettized + * image data is stored in AVFrame.data[0]. The palette is transported in + * AVFrame.data[1], is 1024 bytes long (256 4-byte entries) and is + * formatted the same as in AV_PIX_FMT_RGB32 described above (i.e., it is + * also endian-specific). Note also that the individual RGB32 palette + * components stored in AVFrame.data[1] should be in the range 0..255. + * This is important as many custom PAL8 video codecs that were designed + * to run on the IBM VGA graphics adapter use 6-bit palette components. + * + * @par + * For all the 8 bits per pixel formats, an RGB32 palette is in data[1] like + * for pal8. This palette is filled in automatically by the function + * allocating the picture. + */ +enum AVPixelFormat { + AV_PIX_FMT_NONE = -1, + AV_PIX_FMT_YUV420P, ///< planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples) + AV_PIX_FMT_YUYV422, ///< packed YUV 4:2:2, 16bpp, Y0 Cb Y1 Cr + AV_PIX_FMT_RGB24, ///< packed RGB 8:8:8, 24bpp, RGBRGB... + AV_PIX_FMT_BGR24, ///< packed RGB 8:8:8, 24bpp, BGRBGR... + AV_PIX_FMT_YUV422P, ///< planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_YUV444P, ///< planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples) + AV_PIX_FMT_YUV410P, ///< planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples) + AV_PIX_FMT_YUV411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) + AV_PIX_FMT_GRAY8, ///< Y , 8bpp + AV_PIX_FMT_MONOWHITE, ///< Y , 1bpp, 0 is white, 1 is black, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_MONOBLACK, ///< Y , 1bpp, 0 is black, 1 is white, in each byte pixels are ordered from the msb to the lsb + AV_PIX_FMT_PAL8, ///< 8 bits with AV_PIX_FMT_RGB32 palette + AV_PIX_FMT_YUVJ420P, ///< planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting color_range + AV_PIX_FMT_YUVJ422P, ///< planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting color_range + AV_PIX_FMT_YUVJ444P, ///< planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting color_range +#if FF_API_XVMC + AV_PIX_FMT_XVMC_MPEG2_MC,///< XVideo Motion Acceleration via common packet passing + AV_PIX_FMT_XVMC_MPEG2_IDCT, +#define AV_PIX_FMT_XVMC AV_PIX_FMT_XVMC_MPEG2_IDCT +#endif /* FF_API_XVMC */ + AV_PIX_FMT_UYVY422, ///< packed YUV 4:2:2, 16bpp, Cb Y0 Cr Y1 + AV_PIX_FMT_UYYVYY411, ///< packed YUV 4:1:1, 12bpp, Cb Y0 Y1 Cr Y2 Y3 + AV_PIX_FMT_BGR8, ///< packed RGB 3:3:2, 8bpp, (msb)2B 3G 3R(lsb) + AV_PIX_FMT_BGR4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1B 2G 1R(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_BGR4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1B 2G 1R(lsb) + AV_PIX_FMT_RGB8, ///< packed RGB 3:3:2, 8bpp, (msb)2R 3G 3B(lsb) + AV_PIX_FMT_RGB4, ///< packed RGB 1:2:1 bitstream, 4bpp, (msb)1R 2G 1B(lsb), a byte contains two pixels, the first pixel in the byte is the one composed by the 4 msb bits + AV_PIX_FMT_RGB4_BYTE, ///< packed RGB 1:2:1, 8bpp, (msb)1R 2G 1B(lsb) + AV_PIX_FMT_NV12, ///< planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (first byte U and the following byte V) + AV_PIX_FMT_NV21, ///< as above, but U and V bytes are swapped + + AV_PIX_FMT_ARGB, ///< packed ARGB 8:8:8:8, 32bpp, ARGBARGB... + AV_PIX_FMT_RGBA, ///< packed RGBA 8:8:8:8, 32bpp, RGBARGBA... + AV_PIX_FMT_ABGR, ///< packed ABGR 8:8:8:8, 32bpp, ABGRABGR... + AV_PIX_FMT_BGRA, ///< packed BGRA 8:8:8:8, 32bpp, BGRABGRA... + + AV_PIX_FMT_GRAY16BE, ///< Y , 16bpp, big-endian + AV_PIX_FMT_GRAY16LE, ///< Y , 16bpp, little-endian + AV_PIX_FMT_YUV440P, ///< planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples) + AV_PIX_FMT_YUVJ440P, ///< planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range + AV_PIX_FMT_YUVA420P, ///< planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples) +#if FF_API_VDPAU + AV_PIX_FMT_VDPAU_H264,///< H.264 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_MPEG1,///< MPEG-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_MPEG2,///< MPEG-2 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_WMV3,///< WMV3 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers + AV_PIX_FMT_VDPAU_VC1, ///< VC-1 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + AV_PIX_FMT_RGB48BE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_RGB48LE, ///< packed RGB 16:16:16, 48bpp, 16R, 16G, 16B, the 2-byte value for each R/G/B component is stored as little-endian + + AV_PIX_FMT_RGB565BE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), big-endian + AV_PIX_FMT_RGB565LE, ///< packed RGB 5:6:5, 16bpp, (msb) 5R 6G 5B(lsb), little-endian + AV_PIX_FMT_RGB555BE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), big-endian , X=unused/undefined + AV_PIX_FMT_RGB555LE, ///< packed RGB 5:5:5, 16bpp, (msb)1X 5R 5G 5B(lsb), little-endian, X=unused/undefined + + AV_PIX_FMT_BGR565BE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), big-endian + AV_PIX_FMT_BGR565LE, ///< packed BGR 5:6:5, 16bpp, (msb) 5B 6G 5R(lsb), little-endian + AV_PIX_FMT_BGR555BE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), big-endian , X=unused/undefined + AV_PIX_FMT_BGR555LE, ///< packed BGR 5:5:5, 16bpp, (msb)1X 5B 5G 5R(lsb), little-endian, X=unused/undefined + +#if FF_API_VAAPI + /** @name Deprecated pixel formats */ + /**@{*/ + AV_PIX_FMT_VAAPI_MOCO, ///< HW acceleration through VA API at motion compensation entry-point, Picture.data[3] contains a vaapi_render_state struct which contains macroblocks as well as various fields extracted from headers + AV_PIX_FMT_VAAPI_IDCT, ///< HW acceleration through VA API at IDCT entry-point, Picture.data[3] contains a vaapi_render_state struct which contains fields extracted from headers + AV_PIX_FMT_VAAPI_VLD, ///< HW decoding through VA API, Picture.data[3] contains a VASurfaceID + /**@}*/ + AV_PIX_FMT_VAAPI = AV_PIX_FMT_VAAPI_VLD, +#else + /** + * Hardware acceleration through VA-API, data[3] contains a + * VASurfaceID. + */ + AV_PIX_FMT_VAAPI, +#endif + + AV_PIX_FMT_YUV420P16LE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P16BE, ///< planar YUV 4:2:0, 24bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV422P16LE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P16BE, ///< planar YUV 4:2:2, 32bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV444P16LE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P16BE, ///< planar YUV 4:4:4, 48bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian +#if FF_API_VDPAU + AV_PIX_FMT_VDPAU_MPEG4, ///< MPEG-4 HW decoding with VDPAU, data[0] contains a vdpau_render_state struct which contains the bitstream of the slices as well as various fields extracted from headers +#endif + AV_PIX_FMT_DXVA2_VLD, ///< HW decoding through DXVA2, Picture.data[3] contains a LPDIRECT3DSURFACE9 pointer + + AV_PIX_FMT_RGB444LE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), little-endian, X=unused/undefined + AV_PIX_FMT_RGB444BE, ///< packed RGB 4:4:4, 16bpp, (msb)4X 4R 4G 4B(lsb), big-endian, X=unused/undefined + AV_PIX_FMT_BGR444LE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), little-endian, X=unused/undefined + AV_PIX_FMT_BGR444BE, ///< packed BGR 4:4:4, 16bpp, (msb)4X 4B 4G 4R(lsb), big-endian, X=unused/undefined + AV_PIX_FMT_YA8, ///< 8 bits gray, 8 bits alpha + + AV_PIX_FMT_Y400A = AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8 + AV_PIX_FMT_GRAY8A= AV_PIX_FMT_YA8, ///< alias for AV_PIX_FMT_YA8 + + AV_PIX_FMT_BGR48BE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as big-endian + AV_PIX_FMT_BGR48LE, ///< packed RGB 16:16:16, 48bpp, 16B, 16G, 16R, the 2-byte value for each R/G/B component is stored as little-endian + + /** + * The following 12 formats have the disadvantage of needing 1 format for each bit depth. + * Notice that each 9/10 bits sample is stored in 16 bits with extra padding. + * If you want to support multiple bit depths, then using AV_PIX_FMT_YUV420P16* with the bpp stored separately is better. + */ + AV_PIX_FMT_YUV420P9BE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P9LE, ///< planar YUV 4:2:0, 13.5bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P10BE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P10LE,///< planar YUV 4:2:0, 15bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P10BE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P10LE,///< planar YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P9BE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P9LE, ///< planar YUV 4:4:4, 27bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P10BE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P10LE,///< planar YUV 4:4:4, 30bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV422P9BE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P9LE, ///< planar YUV 4:2:2, 18bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_VDA_VLD, ///< hardware decoding through VDA + AV_PIX_FMT_GBRP, ///< planar GBR 4:4:4 24bpp + AV_PIX_FMT_GBRP9BE, ///< planar GBR 4:4:4 27bpp, big-endian + AV_PIX_FMT_GBRP9LE, ///< planar GBR 4:4:4 27bpp, little-endian + AV_PIX_FMT_GBRP10BE, ///< planar GBR 4:4:4 30bpp, big-endian + AV_PIX_FMT_GBRP10LE, ///< planar GBR 4:4:4 30bpp, little-endian + AV_PIX_FMT_GBRP16BE, ///< planar GBR 4:4:4 48bpp, big-endian + AV_PIX_FMT_GBRP16LE, ///< planar GBR 4:4:4 48bpp, little-endian + AV_PIX_FMT_YUVA422P, ///< planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples) + AV_PIX_FMT_YUVA444P, ///< planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples) + AV_PIX_FMT_YUVA420P9BE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), big-endian + AV_PIX_FMT_YUVA420P9LE, ///< planar YUV 4:2:0 22.5bpp, (1 Cr & Cb sample per 2x2 Y & A samples), little-endian + AV_PIX_FMT_YUVA422P9BE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA422P9LE, ///< planar YUV 4:2:2 27bpp, (1 Cr & Cb sample per 2x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA444P9BE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + AV_PIX_FMT_YUVA444P9LE, ///< planar YUV 4:4:4 36bpp, (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + AV_PIX_FMT_YUVA420P10BE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P10LE, ///< planar YUV 4:2:0 25bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P10BE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P10LE, ///< planar YUV 4:2:2 30bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P10BE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P10LE, ///< planar YUV 4:4:4 40bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA420P16BE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, big-endian) + AV_PIX_FMT_YUVA420P16LE, ///< planar YUV 4:2:0 40bpp, (1 Cr & Cb sample per 2x2 Y & A samples, little-endian) + AV_PIX_FMT_YUVA422P16BE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA422P16LE, ///< planar YUV 4:2:2 48bpp, (1 Cr & Cb sample per 2x1 Y & A samples, little-endian) + AV_PIX_FMT_YUVA444P16BE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, big-endian) + AV_PIX_FMT_YUVA444P16LE, ///< planar YUV 4:4:4 64bpp, (1 Cr & Cb sample per 1x1 Y & A samples, little-endian) + + AV_PIX_FMT_VDPAU, ///< HW acceleration through VDPAU, Picture.data[3] contains a VdpVideoSurface + + AV_PIX_FMT_XYZ12LE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as little-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_XYZ12BE, ///< packed XYZ 4:4:4, 36 bpp, (msb) 12X, 12Y, 12Z (lsb), the 2-byte value for each X/Y/Z is stored as big-endian, the 4 lower bits are set to 0 + AV_PIX_FMT_NV16, ///< interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples) + AV_PIX_FMT_NV20LE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_NV20BE, ///< interleaved chroma YUV 4:2:2, 20bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + + AV_PIX_FMT_RGBA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_RGBA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16R, 16G, 16B, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + AV_PIX_FMT_BGRA64BE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as big-endian + AV_PIX_FMT_BGRA64LE, ///< packed RGBA 16:16:16:16, 64bpp, 16B, 16G, 16R, 16A, the 2-byte value for each R/G/B/A component is stored as little-endian + + AV_PIX_FMT_YVYU422, ///< packed YUV 4:2:2, 16bpp, Y0 Cr Y1 Cb + + AV_PIX_FMT_VDA, ///< HW acceleration through VDA, data[3] contains a CVPixelBufferRef + + AV_PIX_FMT_YA16BE, ///< 16 bits gray, 16 bits alpha (big-endian) + AV_PIX_FMT_YA16LE, ///< 16 bits gray, 16 bits alpha (little-endian) + + AV_PIX_FMT_GBRAP, ///< planar GBRA 4:4:4:4 32bpp + AV_PIX_FMT_GBRAP16BE, ///< planar GBRA 4:4:4:4 64bpp, big-endian + AV_PIX_FMT_GBRAP16LE, ///< planar GBRA 4:4:4:4 64bpp, little-endian + /** + * HW acceleration through QSV, data[3] contains a pointer to the + * mfxFrameSurface1 structure. + */ + AV_PIX_FMT_QSV, + /** + * HW acceleration though MMAL, data[3] contains a pointer to the + * MMAL_BUFFER_HEADER_T structure. + */ + AV_PIX_FMT_MMAL, + + AV_PIX_FMT_D3D11VA_VLD, ///< HW decoding through Direct3D11, Picture.data[3] contains a ID3D11VideoDecoderOutputView pointer + + /** + * HW acceleration through CUDA. data[i] contain CUdeviceptr pointers + * exactly as for system memory frames. + */ + AV_PIX_FMT_CUDA, + + AV_PIX_FMT_0RGB=0x123+4,///< packed RGB 8:8:8, 32bpp, XRGBXRGB... X=unused/undefined + AV_PIX_FMT_RGB0, ///< packed RGB 8:8:8, 32bpp, RGBXRGBX... X=unused/undefined + AV_PIX_FMT_0BGR, ///< packed BGR 8:8:8, 32bpp, XBGRXBGR... X=unused/undefined + AV_PIX_FMT_BGR0, ///< packed BGR 8:8:8, 32bpp, BGRXBGRX... X=unused/undefined + + AV_PIX_FMT_YUV420P12BE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P12LE, ///< planar YUV 4:2:0,18bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV420P14BE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), big-endian + AV_PIX_FMT_YUV420P14LE, ///< planar YUV 4:2:0,21bpp, (1 Cr & Cb sample per 2x2 Y samples), little-endian + AV_PIX_FMT_YUV422P12BE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P12LE, ///< planar YUV 4:2:2,24bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV422P14BE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), big-endian + AV_PIX_FMT_YUV422P14LE, ///< planar YUV 4:2:2,28bpp, (1 Cr & Cb sample per 2x1 Y samples), little-endian + AV_PIX_FMT_YUV444P12BE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P12LE, ///< planar YUV 4:4:4,36bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_YUV444P14BE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), big-endian + AV_PIX_FMT_YUV444P14LE, ///< planar YUV 4:4:4,42bpp, (1 Cr & Cb sample per 1x1 Y samples), little-endian + AV_PIX_FMT_GBRP12BE, ///< planar GBR 4:4:4 36bpp, big-endian + AV_PIX_FMT_GBRP12LE, ///< planar GBR 4:4:4 36bpp, little-endian + AV_PIX_FMT_GBRP14BE, ///< planar GBR 4:4:4 42bpp, big-endian + AV_PIX_FMT_GBRP14LE, ///< planar GBR 4:4:4 42bpp, little-endian + AV_PIX_FMT_YUVJ411P, ///< planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV411P and setting color_range + + AV_PIX_FMT_BAYER_BGGR8, ///< bayer, BGBG..(odd line), GRGR..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_RGGB8, ///< bayer, RGRG..(odd line), GBGB..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GBRG8, ///< bayer, GBGB..(odd line), RGRG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_GRBG8, ///< bayer, GRGR..(odd line), BGBG..(even line), 8-bit samples */ + AV_PIX_FMT_BAYER_BGGR16LE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_BGGR16BE, ///< bayer, BGBG..(odd line), GRGR..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_RGGB16LE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_RGGB16BE, ///< bayer, RGRG..(odd line), GBGB..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GBRG16LE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GBRG16BE, ///< bayer, GBGB..(odd line), RGRG..(even line), 16-bit samples, big-endian */ + AV_PIX_FMT_BAYER_GRBG16LE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, little-endian */ + AV_PIX_FMT_BAYER_GRBG16BE, ///< bayer, GRGR..(odd line), BGBG..(even line), 16-bit samples, big-endian */ +#if !FF_API_XVMC + AV_PIX_FMT_XVMC,///< XVideo Motion Acceleration via common packet passing +#endif /* !FF_API_XVMC */ + AV_PIX_FMT_YUV440P10LE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian + AV_PIX_FMT_YUV440P10BE, ///< planar YUV 4:4:0,20bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian + AV_PIX_FMT_YUV440P12LE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), little-endian + AV_PIX_FMT_YUV440P12BE, ///< planar YUV 4:4:0,24bpp, (1 Cr & Cb sample per 1x2 Y samples), big-endian + AV_PIX_FMT_AYUV64LE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), little-endian + AV_PIX_FMT_AYUV64BE, ///< packed AYUV 4:4:4,64bpp (1 Cr & Cb sample per 1x1 Y & A samples), big-endian + + AV_PIX_FMT_VIDEOTOOLBOX, ///< hardware decoding through Videotoolbox + + AV_PIX_FMT_P010LE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, little-endian + AV_PIX_FMT_P010BE, ///< like NV12, with 10bpp per component, data in the high bits, zeros in the low bits, big-endian + + AV_PIX_FMT_GBRAP12BE, ///< planar GBR 4:4:4:4 48bpp, big-endian + AV_PIX_FMT_GBRAP12LE, ///< planar GBR 4:4:4:4 48bpp, little-endian + + AV_PIX_FMT_GBRAP10BE, ///< planar GBR 4:4:4:4 40bpp, big-endian + AV_PIX_FMT_GBRAP10LE, ///< planar GBR 4:4:4:4 40bpp, little-endian + + AV_PIX_FMT_NB, ///< number of pixel formats, DO NOT USE THIS if you want to link with shared libav* because the number of formats might differ between versions +}; + +#define AV_PIX_FMT_Y400A AV_PIX_FMT_GRAY8A +#define AV_PIX_FMT_GBR24P AV_PIX_FMT_GBRP + +#if AV_HAVE_BIGENDIAN +# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##be +#else +# define AV_PIX_FMT_NE(be, le) AV_PIX_FMT_##le +#endif + +#define AV_PIX_FMT_RGB32 AV_PIX_FMT_NE(ARGB, BGRA) +#define AV_PIX_FMT_RGB32_1 AV_PIX_FMT_NE(RGBA, ABGR) +#define AV_PIX_FMT_BGR32 AV_PIX_FMT_NE(ABGR, RGBA) +#define AV_PIX_FMT_BGR32_1 AV_PIX_FMT_NE(BGRA, ARGB) +#define AV_PIX_FMT_0RGB32 AV_PIX_FMT_NE(0RGB, BGR0) +#define AV_PIX_FMT_0BGR32 AV_PIX_FMT_NE(0BGR, RGB0) + +#define AV_PIX_FMT_GRAY16 AV_PIX_FMT_NE(GRAY16BE, GRAY16LE) +#define AV_PIX_FMT_YA16 AV_PIX_FMT_NE(YA16BE, YA16LE) +#define AV_PIX_FMT_RGB48 AV_PIX_FMT_NE(RGB48BE, RGB48LE) +#define AV_PIX_FMT_RGB565 AV_PIX_FMT_NE(RGB565BE, RGB565LE) +#define AV_PIX_FMT_RGB555 AV_PIX_FMT_NE(RGB555BE, RGB555LE) +#define AV_PIX_FMT_RGB444 AV_PIX_FMT_NE(RGB444BE, RGB444LE) +#define AV_PIX_FMT_RGBA64 AV_PIX_FMT_NE(RGBA64BE, RGBA64LE) +#define AV_PIX_FMT_BGR48 AV_PIX_FMT_NE(BGR48BE, BGR48LE) +#define AV_PIX_FMT_BGR565 AV_PIX_FMT_NE(BGR565BE, BGR565LE) +#define AV_PIX_FMT_BGR555 AV_PIX_FMT_NE(BGR555BE, BGR555LE) +#define AV_PIX_FMT_BGR444 AV_PIX_FMT_NE(BGR444BE, BGR444LE) +#define AV_PIX_FMT_BGRA64 AV_PIX_FMT_NE(BGRA64BE, BGRA64LE) + +#define AV_PIX_FMT_YUV420P9 AV_PIX_FMT_NE(YUV420P9BE , YUV420P9LE) +#define AV_PIX_FMT_YUV422P9 AV_PIX_FMT_NE(YUV422P9BE , YUV422P9LE) +#define AV_PIX_FMT_YUV444P9 AV_PIX_FMT_NE(YUV444P9BE , YUV444P9LE) +#define AV_PIX_FMT_YUV420P10 AV_PIX_FMT_NE(YUV420P10BE, YUV420P10LE) +#define AV_PIX_FMT_YUV422P10 AV_PIX_FMT_NE(YUV422P10BE, YUV422P10LE) +#define AV_PIX_FMT_YUV440P10 AV_PIX_FMT_NE(YUV440P10BE, YUV440P10LE) +#define AV_PIX_FMT_YUV444P10 AV_PIX_FMT_NE(YUV444P10BE, YUV444P10LE) +#define AV_PIX_FMT_YUV420P12 AV_PIX_FMT_NE(YUV420P12BE, YUV420P12LE) +#define AV_PIX_FMT_YUV422P12 AV_PIX_FMT_NE(YUV422P12BE, YUV422P12LE) +#define AV_PIX_FMT_YUV440P12 AV_PIX_FMT_NE(YUV440P12BE, YUV440P12LE) +#define AV_PIX_FMT_YUV444P12 AV_PIX_FMT_NE(YUV444P12BE, YUV444P12LE) +#define AV_PIX_FMT_YUV420P14 AV_PIX_FMT_NE(YUV420P14BE, YUV420P14LE) +#define AV_PIX_FMT_YUV422P14 AV_PIX_FMT_NE(YUV422P14BE, YUV422P14LE) +#define AV_PIX_FMT_YUV444P14 AV_PIX_FMT_NE(YUV444P14BE, YUV444P14LE) +#define AV_PIX_FMT_YUV420P16 AV_PIX_FMT_NE(YUV420P16BE, YUV420P16LE) +#define AV_PIX_FMT_YUV422P16 AV_PIX_FMT_NE(YUV422P16BE, YUV422P16LE) +#define AV_PIX_FMT_YUV444P16 AV_PIX_FMT_NE(YUV444P16BE, YUV444P16LE) + +#define AV_PIX_FMT_GBRP9 AV_PIX_FMT_NE(GBRP9BE , GBRP9LE) +#define AV_PIX_FMT_GBRP10 AV_PIX_FMT_NE(GBRP10BE, GBRP10LE) +#define AV_PIX_FMT_GBRP12 AV_PIX_FMT_NE(GBRP12BE, GBRP12LE) +#define AV_PIX_FMT_GBRP14 AV_PIX_FMT_NE(GBRP14BE, GBRP14LE) +#define AV_PIX_FMT_GBRP16 AV_PIX_FMT_NE(GBRP16BE, GBRP16LE) +#define AV_PIX_FMT_GBRAP10 AV_PIX_FMT_NE(GBRAP10BE, GBRAP10LE) +#define AV_PIX_FMT_GBRAP12 AV_PIX_FMT_NE(GBRAP12BE, GBRAP12LE) +#define AV_PIX_FMT_GBRAP16 AV_PIX_FMT_NE(GBRAP16BE, GBRAP16LE) + +#define AV_PIX_FMT_BAYER_BGGR16 AV_PIX_FMT_NE(BAYER_BGGR16BE, BAYER_BGGR16LE) +#define AV_PIX_FMT_BAYER_RGGB16 AV_PIX_FMT_NE(BAYER_RGGB16BE, BAYER_RGGB16LE) +#define AV_PIX_FMT_BAYER_GBRG16 AV_PIX_FMT_NE(BAYER_GBRG16BE, BAYER_GBRG16LE) +#define AV_PIX_FMT_BAYER_GRBG16 AV_PIX_FMT_NE(BAYER_GRBG16BE, BAYER_GRBG16LE) + + +#define AV_PIX_FMT_YUVA420P9 AV_PIX_FMT_NE(YUVA420P9BE , YUVA420P9LE) +#define AV_PIX_FMT_YUVA422P9 AV_PIX_FMT_NE(YUVA422P9BE , YUVA422P9LE) +#define AV_PIX_FMT_YUVA444P9 AV_PIX_FMT_NE(YUVA444P9BE , YUVA444P9LE) +#define AV_PIX_FMT_YUVA420P10 AV_PIX_FMT_NE(YUVA420P10BE, YUVA420P10LE) +#define AV_PIX_FMT_YUVA422P10 AV_PIX_FMT_NE(YUVA422P10BE, YUVA422P10LE) +#define AV_PIX_FMT_YUVA444P10 AV_PIX_FMT_NE(YUVA444P10BE, YUVA444P10LE) +#define AV_PIX_FMT_YUVA420P16 AV_PIX_FMT_NE(YUVA420P16BE, YUVA420P16LE) +#define AV_PIX_FMT_YUVA422P16 AV_PIX_FMT_NE(YUVA422P16BE, YUVA422P16LE) +#define AV_PIX_FMT_YUVA444P16 AV_PIX_FMT_NE(YUVA444P16BE, YUVA444P16LE) + +#define AV_PIX_FMT_XYZ12 AV_PIX_FMT_NE(XYZ12BE, XYZ12LE) +#define AV_PIX_FMT_NV20 AV_PIX_FMT_NE(NV20BE, NV20LE) +#define AV_PIX_FMT_AYUV64 AV_PIX_FMT_NE(AYUV64BE, AYUV64LE) +#define AV_PIX_FMT_P010 AV_PIX_FMT_NE(P010BE, P010LE) + +/** + * Chromaticity coordinates of the source primaries. + */ +enum AVColorPrimaries { + AVCOL_PRI_RESERVED0 = 0, + AVCOL_PRI_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 / SMPTE RP177 Annex B + AVCOL_PRI_UNSPECIFIED = 2, + AVCOL_PRI_RESERVED = 3, + AVCOL_PRI_BT470M = 4, ///< also FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + + AVCOL_PRI_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM + AVCOL_PRI_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + AVCOL_PRI_SMPTE240M = 7, ///< functionally identical to above + AVCOL_PRI_FILM = 8, ///< colour filters using Illuminant C + AVCOL_PRI_BT2020 = 9, ///< ITU-R BT2020 + AVCOL_PRI_SMPTEST428_1= 10, ///< SMPTE ST 428-1 (CIE 1931 XYZ) + AVCOL_PRI_NB, ///< Not part of ABI +}; + +/** + * Color Transfer Characteristic. + */ +enum AVColorTransferCharacteristic { + AVCOL_TRC_RESERVED0 = 0, + AVCOL_TRC_BT709 = 1, ///< also ITU-R BT1361 + AVCOL_TRC_UNSPECIFIED = 2, + AVCOL_TRC_RESERVED = 3, + AVCOL_TRC_GAMMA22 = 4, ///< also ITU-R BT470M / ITU-R BT1700 625 PAL & SECAM + AVCOL_TRC_GAMMA28 = 5, ///< also ITU-R BT470BG + AVCOL_TRC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 or 625 / ITU-R BT1358 525 or 625 / ITU-R BT1700 NTSC + AVCOL_TRC_SMPTE240M = 7, + AVCOL_TRC_LINEAR = 8, ///< "Linear transfer characteristics" + AVCOL_TRC_LOG = 9, ///< "Logarithmic transfer characteristic (100:1 range)" + AVCOL_TRC_LOG_SQRT = 10, ///< "Logarithmic transfer characteristic (100 * Sqrt(10) : 1 range)" + AVCOL_TRC_IEC61966_2_4 = 11, ///< IEC 61966-2-4 + AVCOL_TRC_BT1361_ECG = 12, ///< ITU-R BT1361 Extended Colour Gamut + AVCOL_TRC_IEC61966_2_1 = 13, ///< IEC 61966-2-1 (sRGB or sYCC) + AVCOL_TRC_BT2020_10 = 14, ///< ITU-R BT2020 for 10-bit system + AVCOL_TRC_BT2020_12 = 15, ///< ITU-R BT2020 for 12-bit system + AVCOL_TRC_SMPTEST2084 = 16, ///< SMPTE ST 2084 for 10-, 12-, 14- and 16-bit systems + AVCOL_TRC_SMPTEST428_1 = 17, ///< SMPTE ST 428-1 + AVCOL_TRC_ARIB_STD_B67 = 18, ///< ARIB STD-B67, known as "Hybrid log-gamma" + AVCOL_TRC_NB, ///< Not part of ABI +}; + +/** + * YUV colorspace type. + */ +enum AVColorSpace { + AVCOL_SPC_RGB = 0, ///< order of coefficients is actually GBR, also IEC 61966-2-1 (sRGB) + AVCOL_SPC_BT709 = 1, ///< also ITU-R BT1361 / IEC 61966-2-4 xvYCC709 / SMPTE RP177 Annex B + AVCOL_SPC_UNSPECIFIED = 2, + AVCOL_SPC_RESERVED = 3, + AVCOL_SPC_FCC = 4, ///< FCC Title 47 Code of Federal Regulations 73.682 (a)(20) + AVCOL_SPC_BT470BG = 5, ///< also ITU-R BT601-6 625 / ITU-R BT1358 625 / ITU-R BT1700 625 PAL & SECAM / IEC 61966-2-4 xvYCC601 + AVCOL_SPC_SMPTE170M = 6, ///< also ITU-R BT601-6 525 / ITU-R BT1358 525 / ITU-R BT1700 NTSC + AVCOL_SPC_SMPTE240M = 7, ///< functionally identical to above + AVCOL_SPC_YCOCG = 8, ///< Used by Dirac / VC-2 and H.264 FRext, see ITU-T SG16 + AVCOL_SPC_BT2020_NCL = 9, ///< ITU-R BT2020 non-constant luminance system + AVCOL_SPC_BT2020_CL = 10, ///< ITU-R BT2020 constant luminance system + AVCOL_SPC_NB, ///< Not part of ABI +}; +#define AVCOL_SPC_YCGCO AVCOL_SPC_YCOCG + + +/** + * MPEG vs JPEG YUV range. + */ +enum AVColorRange { + AVCOL_RANGE_UNSPECIFIED = 0, + AVCOL_RANGE_MPEG = 1, ///< the normal 219*2^(n-8) "MPEG" YUV ranges + AVCOL_RANGE_JPEG = 2, ///< the normal 2^n-1 "JPEG" YUV ranges + AVCOL_RANGE_NB, ///< Not part of ABI +}; + +/** + * Location of chroma samples. + * + * Illustration showing the location of the first (top left) chroma sample of the + * image, the left shows only luma, the right + * shows the location of the chroma sample, the 2 could be imagined to overlay + * each other but are drawn separately due to limitations of ASCII + * + * 1st 2nd 1st 2nd horizontal luma sample positions + * v v v v + * ______ ______ + *1st luma line > |X X ... |3 4 X ... X are luma samples, + * | |1 2 1-6 are possible chroma positions + *2nd luma line > |X X ... |5 6 X ... 0 is undefined/unknown position + */ +enum AVChromaLocation { + AVCHROMA_LOC_UNSPECIFIED = 0, + AVCHROMA_LOC_LEFT = 1, ///< MPEG-2/4 4:2:0, H.264 default for 4:2:0 + AVCHROMA_LOC_CENTER = 2, ///< MPEG-1 4:2:0, JPEG 4:2:0, H.263 4:2:0 + AVCHROMA_LOC_TOPLEFT = 3, ///< ITU-R 601, SMPTE 274M 296M S314M(DV 4:1:1), mpeg2 4:2:2 + AVCHROMA_LOC_TOP = 4, + AVCHROMA_LOC_BOTTOMLEFT = 5, + AVCHROMA_LOC_BOTTOM = 6, + AVCHROMA_LOC_NB, ///< Not part of ABI +}; + +#endif /* AVUTIL_PIXFMT_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/random_seed.h b/third-party/FFmpeg-iOS/include/libavutil/random_seed.h new file mode 100644 index 0000000000..0462a048e0 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/random_seed.h @@ -0,0 +1,43 @@ +/* + * Copyright (c) 2009 Baptiste Coudurier + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_RANDOM_SEED_H +#define AVUTIL_RANDOM_SEED_H + +#include +/** + * @addtogroup lavu_crypto + * @{ + */ + +/** + * Get a seed to use in conjunction with random functions. + * This function tries to provide a good seed at a best effort bases. + * Its possible to call this function multiple times if more bits are needed. + * It can be quite slow, which is why it should only be used as seed for a faster + * PRNG. The quality of the seed depends on the platform. + */ +uint32_t av_get_random_seed(void); + +/** + * @} + */ + +#endif /* AVUTIL_RANDOM_SEED_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/rational.h b/third-party/FFmpeg-iOS/include/libavutil/rational.h new file mode 100644 index 0000000000..2897469680 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/rational.h @@ -0,0 +1,173 @@ +/* + * rational numbers + * Copyright (c) 2003 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * rational numbers + * @author Michael Niedermayer + */ + +#ifndef AVUTIL_RATIONAL_H +#define AVUTIL_RATIONAL_H + +#include +#include +#include "attributes.h" + +/** + * @addtogroup lavu_math + * @{ + */ + +/** + * rational number numerator/denominator + */ +typedef struct AVRational{ + int num; ///< numerator + int den; ///< denominator +} AVRational; + +/** + * Create a rational. + * Useful for compilers that do not support compound literals. + * @note The return value is not reduced. + */ +static inline AVRational av_make_q(int num, int den) +{ + AVRational r = { num, den }; + return r; +} + +/** + * Compare two rationals. + * @param a first rational + * @param b second rational + * @return 0 if a==b, 1 if a>b, -1 if a>63)|1; + else if(b.den && a.den) return 0; + else if(a.num && b.num) return (a.num>>31) - (b.num>>31); + else return INT_MIN; +} + +/** + * Convert rational to double. + * @param a rational to convert + * @return (double) a + */ +static inline double av_q2d(AVRational a){ + return a.num / (double) a.den; +} + +/** + * Reduce a fraction. + * This is useful for framerate calculations. + * @param dst_num destination numerator + * @param dst_den destination denominator + * @param num source numerator + * @param den source denominator + * @param max the maximum allowed for dst_num & dst_den + * @return 1 if exact, 0 otherwise + */ +int av_reduce(int *dst_num, int *dst_den, int64_t num, int64_t den, int64_t max); + +/** + * Multiply two rationals. + * @param b first rational + * @param c second rational + * @return b*c + */ +AVRational av_mul_q(AVRational b, AVRational c) av_const; + +/** + * Divide one rational by another. + * @param b first rational + * @param c second rational + * @return b/c + */ +AVRational av_div_q(AVRational b, AVRational c) av_const; + +/** + * Add two rationals. + * @param b first rational + * @param c second rational + * @return b+c + */ +AVRational av_add_q(AVRational b, AVRational c) av_const; + +/** + * Subtract one rational from another. + * @param b first rational + * @param c second rational + * @return b-c + */ +AVRational av_sub_q(AVRational b, AVRational c) av_const; + +/** + * Invert a rational. + * @param q value + * @return 1 / q + */ +static av_always_inline AVRational av_inv_q(AVRational q) +{ + AVRational r = { q.den, q.num }; + return r; +} + +/** + * Convert a double precision floating point number to a rational. + * inf is expressed as {1,0} or {-1,0} depending on the sign. + * + * @param d double to convert + * @param max the maximum allowed numerator and denominator + * @return (AVRational) d + */ +AVRational av_d2q(double d, int max) av_const; + +/** + * @return 1 if q1 is nearer to q than q2, -1 if q2 is nearer + * than q1, 0 if they have the same distance. + */ +int av_nearer_q(AVRational q, AVRational q1, AVRational q2); + +/** + * Find the nearest value in q_list to q. + * @param q_list an array of rationals terminated by {0, 0} + * @return the index of the nearest value found in the array + */ +int av_find_nearest_q_idx(AVRational q, const AVRational* q_list); + +/** + * Converts a AVRational to a IEEE 32bit float. + * + * The float is returned in a uint32_t and its value is platform indepenant. + */ +uint32_t av_q2intfloat(AVRational q); + +/** + * @} + */ + +#endif /* AVUTIL_RATIONAL_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/rc4.h b/third-party/FFmpeg-iOS/include/libavutil/rc4.h new file mode 100644 index 0000000000..029cd2ad58 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/rc4.h @@ -0,0 +1,66 @@ +/* + * RC4 encryption/decryption/pseudo-random number generator + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_RC4_H +#define AVUTIL_RC4_H + +#include + +/** + * @defgroup lavu_rc4 RC4 + * @ingroup lavu_crypto + * @{ + */ + +typedef struct AVRC4 { + uint8_t state[256]; + int x, y; +} AVRC4; + +/** + * Allocate an AVRC4 context. + */ +AVRC4 *av_rc4_alloc(void); + +/** + * @brief Initializes an AVRC4 context. + * + * @param key_bits must be a multiple of 8 + * @param decrypt 0 for encryption, 1 for decryption, currently has no effect + * @return zero on success, negative value otherwise + */ +int av_rc4_init(struct AVRC4 *d, const uint8_t *key, int key_bits, int decrypt); + +/** + * @brief Encrypts / decrypts using the RC4 algorithm. + * + * @param count number of bytes + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst, may be NULL + * @param iv not (yet) used for RC4, should be NULL + * @param decrypt 0 for encryption, 1 for decryption, not (yet) used + */ +void av_rc4_crypt(struct AVRC4 *d, uint8_t *dst, const uint8_t *src, int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_RC4_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/replaygain.h b/third-party/FFmpeg-iOS/include/libavutil/replaygain.h new file mode 100644 index 0000000000..b49bf1a3d9 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/replaygain.h @@ -0,0 +1,50 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_REPLAYGAIN_H +#define AVUTIL_REPLAYGAIN_H + +#include + +/** + * ReplayGain information (see + * http://wiki.hydrogenaudio.org/index.php?title=ReplayGain_1.0_specification). + * The size of this struct is a part of the public ABI. + */ +typedef struct AVReplayGain { + /** + * Track replay gain in microbels (divide by 100000 to get the value in dB). + * Should be set to INT32_MIN when unknown. + */ + int32_t track_gain; + /** + * Peak track amplitude, with 100000 representing full scale (but values + * may overflow). 0 when unknown. + */ + uint32_t track_peak; + /** + * Same as track_gain, but for the whole album. + */ + int32_t album_gain; + /** + * Same as track_peak, but for the whole album, + */ + uint32_t album_peak; +} AVReplayGain; + +#endif /* AVUTIL_REPLAYGAIN_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/ripemd.h b/third-party/FFmpeg-iOS/include/libavutil/ripemd.h new file mode 100644 index 0000000000..7b0c8bc89c --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/ripemd.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2007 Michael Niedermayer + * Copyright (C) 2013 James Almer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_RIPEMD_H +#define AVUTIL_RIPEMD_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_ripemd RIPEMD + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_ripemd_size; + +struct AVRIPEMD; + +/** + * Allocate an AVRIPEMD context. + */ +struct AVRIPEMD *av_ripemd_alloc(void); + +/** + * Initialize RIPEMD hashing. + * + * @param context pointer to the function context (of size av_ripemd_size) + * @param bits number of bits in digest (128, 160, 256 or 320 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int av_ripemd_init(struct AVRIPEMD* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +void av_ripemd_update(struct AVRIPEMD* context, const uint8_t* data, unsigned int len); + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void av_ripemd_final(struct AVRIPEMD* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_RIPEMD_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/samplefmt.h b/third-party/FFmpeg-iOS/include/libavutil/samplefmt.h new file mode 100644 index 0000000000..57da2784d2 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/samplefmt.h @@ -0,0 +1,270 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_SAMPLEFMT_H +#define AVUTIL_SAMPLEFMT_H + +#include + +#include "avutil.h" +#include "attributes.h" + +/** + * @addtogroup lavu_audio + * @{ + * + * @defgroup lavu_sampfmts Audio sample formats + * + * Audio sample format enumeration and related convenience functions. + * @{ + */ + +/** + * Audio sample formats + * + * - The data described by the sample format is always in native-endian order. + * Sample values can be expressed by native C types, hence the lack of a signed + * 24-bit sample format even though it is a common raw audio data format. + * + * - The floating-point formats are based on full volume being in the range + * [-1.0, 1.0]. Any values outside this range are beyond full volume level. + * + * - The data layout as used in av_samples_fill_arrays() and elsewhere in FFmpeg + * (such as AVFrame in libavcodec) is as follows: + * + * @par + * For planar sample formats, each audio channel is in a separate data plane, + * and linesize is the buffer size, in bytes, for a single plane. All data + * planes must be the same size. For packed sample formats, only the first data + * plane is used, and samples for each channel are interleaved. In this case, + * linesize is the buffer size, in bytes, for the 1 plane. + * + */ +enum AVSampleFormat { + AV_SAMPLE_FMT_NONE = -1, + AV_SAMPLE_FMT_U8, ///< unsigned 8 bits + AV_SAMPLE_FMT_S16, ///< signed 16 bits + AV_SAMPLE_FMT_S32, ///< signed 32 bits + AV_SAMPLE_FMT_FLT, ///< float + AV_SAMPLE_FMT_DBL, ///< double + + AV_SAMPLE_FMT_U8P, ///< unsigned 8 bits, planar + AV_SAMPLE_FMT_S16P, ///< signed 16 bits, planar + AV_SAMPLE_FMT_S32P, ///< signed 32 bits, planar + AV_SAMPLE_FMT_FLTP, ///< float, planar + AV_SAMPLE_FMT_DBLP, ///< double, planar + + AV_SAMPLE_FMT_NB ///< Number of sample formats. DO NOT USE if linking dynamically +}; + +/** + * Return the name of sample_fmt, or NULL if sample_fmt is not + * recognized. + */ +const char *av_get_sample_fmt_name(enum AVSampleFormat sample_fmt); + +/** + * Return a sample format corresponding to name, or AV_SAMPLE_FMT_NONE + * on error. + */ +enum AVSampleFormat av_get_sample_fmt(const char *name); + +/** + * Return the planar<->packed alternative form of the given sample format, or + * AV_SAMPLE_FMT_NONE on error. If the passed sample_fmt is already in the + * requested planar/packed format, the format returned is the same as the + * input. + */ +enum AVSampleFormat av_get_alt_sample_fmt(enum AVSampleFormat sample_fmt, int planar); + +/** + * Get the packed alternative form of the given sample format. + * + * If the passed sample_fmt is already in packed format, the format returned is + * the same as the input. + * + * @return the packed alternative form of the given sample format or + AV_SAMPLE_FMT_NONE on error. + */ +enum AVSampleFormat av_get_packed_sample_fmt(enum AVSampleFormat sample_fmt); + +/** + * Get the planar alternative form of the given sample format. + * + * If the passed sample_fmt is already in planar format, the format returned is + * the same as the input. + * + * @return the planar alternative form of the given sample format or + AV_SAMPLE_FMT_NONE on error. + */ +enum AVSampleFormat av_get_planar_sample_fmt(enum AVSampleFormat sample_fmt); + +/** + * Generate a string corresponding to the sample format with + * sample_fmt, or a header if sample_fmt is negative. + * + * @param buf the buffer where to write the string + * @param buf_size the size of buf + * @param sample_fmt the number of the sample format to print the + * corresponding info string, or a negative value to print the + * corresponding header. + * @return the pointer to the filled buffer or NULL if sample_fmt is + * unknown or in case of other errors + */ +char *av_get_sample_fmt_string(char *buf, int buf_size, enum AVSampleFormat sample_fmt); + +/** + * Return number of bytes per sample. + * + * @param sample_fmt the sample format + * @return number of bytes per sample or zero if unknown for the given + * sample format + */ +int av_get_bytes_per_sample(enum AVSampleFormat sample_fmt); + +/** + * Check if the sample format is planar. + * + * @param sample_fmt the sample format to inspect + * @return 1 if the sample format is planar, 0 if it is interleaved + */ +int av_sample_fmt_is_planar(enum AVSampleFormat sample_fmt); + +/** + * Get the required buffer size for the given audio parameters. + * + * @param[out] linesize calculated linesize, may be NULL + * @param nb_channels the number of channels + * @param nb_samples the number of samples in a single channel + * @param sample_fmt the sample format + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return required buffer size, or negative error code on failure + */ +int av_samples_get_buffer_size(int *linesize, int nb_channels, int nb_samples, + enum AVSampleFormat sample_fmt, int align); + +/** + * @} + * + * @defgroup lavu_sampmanip Samples manipulation + * + * Functions that manipulate audio samples + * @{ + */ + +/** + * Fill plane data pointers and linesize for samples with sample + * format sample_fmt. + * + * The audio_data array is filled with the pointers to the samples data planes: + * for planar, set the start point of each channel's data within the buffer, + * for packed, set the start point of the entire buffer only. + * + * The value pointed to by linesize is set to the aligned size of each + * channel's data buffer for planar layout, or to the aligned size of the + * buffer for all channels for packed layout. + * + * The buffer in buf must be big enough to contain all the samples + * (use av_samples_get_buffer_size() to compute its minimum size), + * otherwise the audio_data pointers will point to invalid data. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param[out] audio_data array to be filled with the pointer for each channel + * @param[out] linesize calculated linesize, may be NULL + * @param buf the pointer to a buffer containing the samples + * @param nb_channels the number of channels + * @param nb_samples the number of samples in a single channel + * @param sample_fmt the sample format + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return >=0 on success or a negative error code on failure + * @todo return minimum size in bytes required for the buffer in case + * of success at the next bump + */ +int av_samples_fill_arrays(uint8_t **audio_data, int *linesize, + const uint8_t *buf, + int nb_channels, int nb_samples, + enum AVSampleFormat sample_fmt, int align); + +/** + * Allocate a samples buffer for nb_samples samples, and fill data pointers and + * linesize accordingly. + * The allocated samples buffer can be freed by using av_freep(&audio_data[0]) + * Allocated data will be initialized to silence. + * + * @see enum AVSampleFormat + * The documentation for AVSampleFormat describes the data layout. + * + * @param[out] audio_data array to be filled with the pointer for each channel + * @param[out] linesize aligned size for audio buffer(s), may be NULL + * @param nb_channels number of audio channels + * @param nb_samples number of samples per channel + * @param align buffer size alignment (0 = default, 1 = no alignment) + * @return >=0 on success or a negative error code on failure + * @todo return the size of the allocated buffer in case of success at the next bump + * @see av_samples_fill_arrays() + * @see av_samples_alloc_array_and_samples() + */ +int av_samples_alloc(uint8_t **audio_data, int *linesize, int nb_channels, + int nb_samples, enum AVSampleFormat sample_fmt, int align); + +/** + * Allocate a data pointers array, samples buffer for nb_samples + * samples, and fill data pointers and linesize accordingly. + * + * This is the same as av_samples_alloc(), but also allocates the data + * pointers array. + * + * @see av_samples_alloc() + */ +int av_samples_alloc_array_and_samples(uint8_t ***audio_data, int *linesize, int nb_channels, + int nb_samples, enum AVSampleFormat sample_fmt, int align); + +/** + * Copy samples from src to dst. + * + * @param dst destination array of pointers to data planes + * @param src source array of pointers to data planes + * @param dst_offset offset in samples at which the data will be written to dst + * @param src_offset offset in samples at which the data will be read from src + * @param nb_samples number of samples to be copied + * @param nb_channels number of audio channels + * @param sample_fmt audio sample format + */ +int av_samples_copy(uint8_t **dst, uint8_t * const *src, int dst_offset, + int src_offset, int nb_samples, int nb_channels, + enum AVSampleFormat sample_fmt); + +/** + * Fill an audio buffer with silence. + * + * @param audio_data array of pointers to data planes + * @param offset offset in samples at which to start filling + * @param nb_samples number of samples to fill + * @param nb_channels number of audio channels + * @param sample_fmt audio sample format + */ +int av_samples_set_silence(uint8_t **audio_data, int offset, int nb_samples, + int nb_channels, enum AVSampleFormat sample_fmt); + +/** + * @} + * @} + */ +#endif /* AVUTIL_SAMPLEFMT_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/sha.h b/third-party/FFmpeg-iOS/include/libavutil/sha.h new file mode 100644 index 0000000000..bf4377e51b --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/sha.h @@ -0,0 +1,74 @@ +/* + * Copyright (C) 2007 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_SHA_H +#define AVUTIL_SHA_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_sha SHA + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_sha_size; + +struct AVSHA; + +/** + * Allocate an AVSHA context. + */ +struct AVSHA *av_sha_alloc(void); + +/** + * Initialize SHA-1 or SHA-2 hashing. + * + * @param context pointer to the function context (of size av_sha_size) + * @param bits number of bits in digest (SHA-1 - 160 bits, SHA-2 224 or 256 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int av_sha_init(struct AVSHA* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +void av_sha_update(struct AVSHA* context, const uint8_t* data, unsigned int len); + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void av_sha_final(struct AVSHA* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_SHA_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/sha512.h b/third-party/FFmpeg-iOS/include/libavutil/sha512.h new file mode 100644 index 0000000000..7b08701477 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/sha512.h @@ -0,0 +1,75 @@ +/* + * Copyright (C) 2007 Michael Niedermayer + * Copyright (C) 2013 James Almer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_SHA512_H +#define AVUTIL_SHA512_H + +#include + +#include "attributes.h" +#include "version.h" + +/** + * @defgroup lavu_sha512 SHA512 + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_sha512_size; + +struct AVSHA512; + +/** + * Allocate an AVSHA512 context. + */ +struct AVSHA512 *av_sha512_alloc(void); + +/** + * Initialize SHA-2 512 hashing. + * + * @param context pointer to the function context (of size av_sha512_size) + * @param bits number of bits in digest (224, 256, 384 or 512 bits) + * @return zero if initialization succeeded, -1 otherwise + */ +int av_sha512_init(struct AVSHA512* context, int bits); + +/** + * Update hash value. + * + * @param context hash function context + * @param data input data to update hash with + * @param len input data length + */ +void av_sha512_update(struct AVSHA512* context, const uint8_t* data, unsigned int len); + +/** + * Finish hashing and output digest value. + * + * @param context hash function context + * @param digest buffer where output digest value is stored + */ +void av_sha512_final(struct AVSHA512* context, uint8_t *digest); + +/** + * @} + */ + +#endif /* AVUTIL_SHA512_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/stereo3d.h b/third-party/FFmpeg-iOS/include/libavutil/stereo3d.h new file mode 100644 index 0000000000..19c541643e --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/stereo3d.h @@ -0,0 +1,170 @@ +/* + * Copyright (c) 2013 Vittorio Giovara + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_STEREO3D_H +#define AVUTIL_STEREO3D_H + +#include + +#include "frame.h" + +/** + * List of possible 3D Types + */ +enum AVStereo3DType { + /** + * Video is not stereoscopic (and metadata has to be there). + */ + AV_STEREO3D_2D, + + /** + * Views are next to each other. + * + * LLLLRRRR + * LLLLRRRR + * LLLLRRRR + * ... + */ + AV_STEREO3D_SIDEBYSIDE, + + /** + * Views are on top of each other. + * + * LLLLLLLL + * LLLLLLLL + * RRRRRRRR + * RRRRRRRR + */ + AV_STEREO3D_TOPBOTTOM, + + /** + * Views are alternated temporally. + * + * frame0 frame1 frame2 ... + * LLLLLLLL RRRRRRRR LLLLLLLL + * LLLLLLLL RRRRRRRR LLLLLLLL + * LLLLLLLL RRRRRRRR LLLLLLLL + * ... ... ... + */ + AV_STEREO3D_FRAMESEQUENCE, + + /** + * Views are packed in a checkerboard-like structure per pixel. + * + * LRLRLRLR + * RLRLRLRL + * LRLRLRLR + * ... + */ + AV_STEREO3D_CHECKERBOARD, + + /** + * Views are next to each other, but when upscaling + * apply a checkerboard pattern. + * + * LLLLRRRR L L L L R R R R + * LLLLRRRR => L L L L R R R R + * LLLLRRRR L L L L R R R R + * LLLLRRRR L L L L R R R R + */ + AV_STEREO3D_SIDEBYSIDE_QUINCUNX, + + /** + * Views are packed per line, as if interlaced. + * + * LLLLLLLL + * RRRRRRRR + * LLLLLLLL + * ... + */ + AV_STEREO3D_LINES, + + /** + * Views are packed per column. + * + * LRLRLRLR + * LRLRLRLR + * LRLRLRLR + * ... + */ + AV_STEREO3D_COLUMNS, +}; + + +/** + * Inverted views, Right/Bottom represents the left view. + */ +#define AV_STEREO3D_FLAG_INVERT (1 << 0) + +/** + * Stereo 3D type: this structure describes how two videos are packed + * within a single video surface, with additional information as needed. + * + * @note The struct must be allocated with av_stereo3d_alloc() and + * its size is not a part of the public ABI. + */ +typedef struct AVStereo3D { + /** + * How views are packed within the video. + */ + enum AVStereo3DType type; + + /** + * Additional information about the frame packing. + */ + int flags; +} AVStereo3D; + +/** + * Allocate an AVStereo3D structure and set its fields to default values. + * The resulting struct can be freed using av_freep(). + * + * @return An AVStereo3D filled with default values or NULL on failure. + */ +AVStereo3D *av_stereo3d_alloc(void); + +/** + * Allocate a complete AVFrameSideData and add it to the frame. + * + * @param frame The frame which side data is added to. + * + * @return The AVStereo3D structure to be filled by caller. + */ +AVStereo3D *av_stereo3d_create_side_data(AVFrame *frame); + +/** + * Provide a human-readable name of a given stereo3d type. + * + * @param type The input stereo3d type value. + * + * @return The name of the stereo3d value, or "unknown". + */ +const char *av_stereo3d_type_name(unsigned int type); + +/** + * Get the AVStereo3DType form a human-readable name. + * + * @param type The input string. + * + * @return The AVStereo3DType value, or -1 if not found. + */ +int av_stereo3d_from_name(const char *name); + +#endif /* AVUTIL_STEREO3D_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/tea.h b/third-party/FFmpeg-iOS/include/libavutil/tea.h new file mode 100644 index 0000000000..dd929bdafd --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/tea.h @@ -0,0 +1,71 @@ +/* + * A 32-bit implementation of the TEA algorithm + * Copyright (c) 2015 Vesselin Bontchev + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TEA_H +#define AVUTIL_TEA_H + +#include + +/** + * @file + * @brief Public header for libavutil TEA algorithm + * @defgroup lavu_tea TEA + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_tea_size; + +struct AVTEA; + +/** + * Allocate an AVTEA context + * To free the struct: av_free(ptr) + */ +struct AVTEA *av_tea_alloc(void); + +/** + * Initialize an AVTEA context. + * + * @param ctx an AVTEA context + * @param key a key of 16 bytes used for encryption/decryption + * @param rounds the number of rounds in TEA (64 is the "standard") + */ +void av_tea_init(struct AVTEA *ctx, const uint8_t key[16], int rounds); + +/** + * Encrypt or decrypt a buffer using a previously initialized context. + * + * @param ctx an AVTEA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_tea_crypt(struct AVTEA *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_TEA_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/threadmessage.h b/third-party/FFmpeg-iOS/include/libavutil/threadmessage.h new file mode 100644 index 0000000000..8480a0a3db --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/threadmessage.h @@ -0,0 +1,107 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public License + * as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public License + * along with FFmpeg; if not, write to the Free Software Foundation, Inc., + * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_THREADMESSAGE_H +#define AVUTIL_THREADMESSAGE_H + +typedef struct AVThreadMessageQueue AVThreadMessageQueue; + +typedef enum AVThreadMessageFlags { + + /** + * Perform non-blocking operation. + * If this flag is set, send and recv operations are non-blocking and + * return AVERROR(EAGAIN) immediately if they can not proceed. + */ + AV_THREAD_MESSAGE_NONBLOCK = 1, + +} AVThreadMessageFlags; + +/** + * Allocate a new message queue. + * + * @param mq pointer to the message queue + * @param nelem maximum number of elements in the queue + * @param elsize size of each element in the queue + * @return >=0 for success; <0 for error, in particular AVERROR(ENOSYS) if + * lavu was built without thread support + */ +int av_thread_message_queue_alloc(AVThreadMessageQueue **mq, + unsigned nelem, + unsigned elsize); + +/** + * Free a message queue. + * + * The message queue must no longer be in use by another thread. + */ +void av_thread_message_queue_free(AVThreadMessageQueue **mq); + +/** + * Send a message on the queue. + */ +int av_thread_message_queue_send(AVThreadMessageQueue *mq, + void *msg, + unsigned flags); + +/** + * Receive a message from the queue. + */ +int av_thread_message_queue_recv(AVThreadMessageQueue *mq, + void *msg, + unsigned flags); + +/** + * Set the sending error code. + * + * If the error code is set to non-zero, av_thread_message_queue_send() will + * return it immediately. Conventional values, such as AVERROR_EOF or + * AVERROR(EAGAIN), can be used to cause the sending thread to stop or + * suspend its operation. + */ +void av_thread_message_queue_set_err_send(AVThreadMessageQueue *mq, + int err); + +/** + * Set the receiving error code. + * + * If the error code is set to non-zero, av_thread_message_queue_recv() will + * return it immediately when there are no longer available messages. + * Conventional values, such as AVERROR_EOF or AVERROR(EAGAIN), can be used + * to cause the receiving thread to stop or suspend its operation. + */ +void av_thread_message_queue_set_err_recv(AVThreadMessageQueue *mq, + int err); + +/** + * Set the optional free message callback function which will be called if an + * operation is removing messages from the queue. + */ +void av_thread_message_queue_set_free_func(AVThreadMessageQueue *mq, + void (*free_func)(void *msg)); + +/** + * Flush the message queue + * + * This function is mostly equivalent to reading and free-ing every message + * except that it will be done in a single operation (no lock/unlock between + * reads). + */ +void av_thread_message_flush(AVThreadMessageQueue *mq); + +#endif /* AVUTIL_THREADMESSAGE_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/time.h b/third-party/FFmpeg-iOS/include/libavutil/time.h new file mode 100644 index 0000000000..dc169b064a --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/time.h @@ -0,0 +1,56 @@ +/* + * Copyright (c) 2000-2003 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TIME_H +#define AVUTIL_TIME_H + +#include + +/** + * Get the current time in microseconds. + */ +int64_t av_gettime(void); + +/** + * Get the current time in microseconds since some unspecified starting point. + * On platforms that support it, the time comes from a monotonic clock + * This property makes this time source ideal for measuring relative time. + * The returned values may not be monotonic on platforms where a monotonic + * clock is not available. + */ +int64_t av_gettime_relative(void); + +/** + * Indicates with a boolean result if the av_gettime_relative() time source + * is monotonic. + */ +int av_gettime_relative_is_monotonic(void); + +/** + * Sleep for a period of time. Although the duration is expressed in + * microseconds, the actual delay may be rounded to the precision of the + * system timer. + * + * @param usec Number of microseconds to sleep. + * @return zero on success or (negative) error code. + */ +int av_usleep(unsigned usec); + +#endif /* AVUTIL_TIME_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/timecode.h b/third-party/FFmpeg-iOS/include/libavutil/timecode.h new file mode 100644 index 0000000000..56e3975fd8 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/timecode.h @@ -0,0 +1,140 @@ +/* + * Copyright (c) 2006 Smartjog S.A.S, Baptiste Coudurier + * Copyright (c) 2011-2012 Smartjog S.A.S, Clément Bœsch + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * Timecode helpers header + */ + +#ifndef AVUTIL_TIMECODE_H +#define AVUTIL_TIMECODE_H + +#include +#include "rational.h" + +#define AV_TIMECODE_STR_SIZE 16 + +enum AVTimecodeFlag { + AV_TIMECODE_FLAG_DROPFRAME = 1<<0, ///< timecode is drop frame + AV_TIMECODE_FLAG_24HOURSMAX = 1<<1, ///< timecode wraps after 24 hours + AV_TIMECODE_FLAG_ALLOWNEGATIVE = 1<<2, ///< negative time values are allowed +}; + +typedef struct { + int start; ///< timecode frame start (first base frame number) + uint32_t flags; ///< flags such as drop frame, +24 hours support, ... + AVRational rate; ///< frame rate in rational form + unsigned fps; ///< frame per second; must be consistent with the rate field +} AVTimecode; + +/** + * Adjust frame number for NTSC drop frame time code. + * + * @param framenum frame number to adjust + * @param fps frame per second, 30 or 60 + * @return adjusted frame number + * @warning adjustment is only valid in NTSC 29.97 and 59.94 + */ +int av_timecode_adjust_ntsc_framenum2(int framenum, int fps); + +/** + * Convert frame number to SMPTE 12M binary representation. + * + * @param tc timecode data correctly initialized + * @param framenum frame number + * @return the SMPTE binary representation + * + * @note Frame number adjustment is automatically done in case of drop timecode, + * you do NOT have to call av_timecode_adjust_ntsc_framenum2(). + * @note The frame number is relative to tc->start. + * @note Color frame (CF), binary group flags (BGF) and biphase mark polarity + * correction (PC) bits are set to zero. + */ +uint32_t av_timecode_get_smpte_from_framenum(const AVTimecode *tc, int framenum); + +/** + * Load timecode string in buf. + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tc timecode data correctly initialized + * @param framenum frame number + * @return the buf parameter + * + * @note Timecode representation can be a negative timecode and have more than + * 24 hours, but will only be honored if the flags are correctly set. + * @note The frame number is relative to tc->start. + */ +char *av_timecode_make_string(const AVTimecode *tc, char *buf, int framenum); + +/** + * Get the timecode string from the SMPTE timecode format. + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tcsmpte the 32-bit SMPTE timecode + * @param prevent_df prevent the use of a drop flag when it is known the DF bit + * is arbitrary + * @return the buf parameter + */ +char *av_timecode_make_smpte_tc_string(char *buf, uint32_t tcsmpte, int prevent_df); + +/** + * Get the timecode string from the 25-bit timecode format (MPEG GOP format). + * + * @param buf destination buffer, must be at least AV_TIMECODE_STR_SIZE long + * @param tc25bit the 25-bits timecode + * @return the buf parameter + */ +char *av_timecode_make_mpeg_tc_string(char *buf, uint32_t tc25bit); + +/** + * Init a timecode struct with the passed parameters. + * + * @param log_ctx a pointer to an arbitrary struct of which the first field + * is a pointer to an AVClass struct (used for av_log) + * @param tc pointer to an allocated AVTimecode + * @param rate frame rate in rational form + * @param flags miscellaneous flags such as drop frame, +24 hours, ... + * (see AVTimecodeFlag) + * @param frame_start the first frame number + * @return 0 on success, AVERROR otherwise + */ +int av_timecode_init(AVTimecode *tc, AVRational rate, int flags, int frame_start, void *log_ctx); + +/** + * Parse timecode representation (hh:mm:ss[:;.]ff). + * + * @param log_ctx a pointer to an arbitrary struct of which the first field is a + * pointer to an AVClass struct (used for av_log). + * @param tc pointer to an allocated AVTimecode + * @param rate frame rate in rational form + * @param str timecode string which will determine the frame start + * @return 0 on success, AVERROR otherwise + */ +int av_timecode_init_from_string(AVTimecode *tc, AVRational rate, const char *str, void *log_ctx); + +/** + * Check if the timecode feature is available for the given frame rate + * + * @return 0 if supported, <0 otherwise + */ +int av_timecode_check_frame_rate(AVRational rate); + +#endif /* AVUTIL_TIMECODE_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/timestamp.h b/third-party/FFmpeg-iOS/include/libavutil/timestamp.h new file mode 100644 index 0000000000..f010a7ee38 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/timestamp.h @@ -0,0 +1,78 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * timestamp utils, mostly useful for debugging/logging purposes + */ + +#ifndef AVUTIL_TIMESTAMP_H +#define AVUTIL_TIMESTAMP_H + +#include "common.h" + +#if defined(__cplusplus) && !defined(__STDC_FORMAT_MACROS) && !defined(PRId64) +#error missing -D__STDC_FORMAT_MACROS / #define __STDC_FORMAT_MACROS +#endif + +#define AV_TS_MAX_STRING_SIZE 32 + +/** + * Fill the provided buffer with a string containing a timestamp + * representation. + * + * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE + * @param ts the timestamp to represent + * @return the buffer in input + */ +static inline char *av_ts_make_string(char *buf, int64_t ts) +{ + if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); + else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%"PRId64, ts); + return buf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_ts2str(ts) av_ts_make_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts) + +/** + * Fill the provided buffer with a string containing a timestamp time + * representation. + * + * @param buf a buffer with size in bytes of at least AV_TS_MAX_STRING_SIZE + * @param ts the timestamp to represent + * @param tb the timebase of the timestamp + * @return the buffer in input + */ +static inline char *av_ts_make_time_string(char *buf, int64_t ts, AVRational *tb) +{ + if (ts == AV_NOPTS_VALUE) snprintf(buf, AV_TS_MAX_STRING_SIZE, "NOPTS"); + else snprintf(buf, AV_TS_MAX_STRING_SIZE, "%.6g", av_q2d(*tb) * ts); + return buf; +} + +/** + * Convenience macro, the return value should be used only directly in + * function arguments but never stand-alone. + */ +#define av_ts2timestr(ts, tb) av_ts_make_time_string((char[AV_TS_MAX_STRING_SIZE]){0}, ts, tb) + +#endif /* AVUTIL_TIMESTAMP_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/tree.h b/third-party/FFmpeg-iOS/include/libavutil/tree.h new file mode 100644 index 0000000000..9a9e11b92c --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/tree.h @@ -0,0 +1,138 @@ +/* + * copyright (c) 2006 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file + * A tree container. + * @author Michael Niedermayer + */ + +#ifndef AVUTIL_TREE_H +#define AVUTIL_TREE_H + +#include "attributes.h" +#include "version.h" + +/** + * @addtogroup lavu_tree AVTree + * @ingroup lavu_data + * + * Low-complexity tree container + * + * Insertion, removal, finding equal, largest which is smaller than and + * smallest which is larger than, all have O(log n) worst-case complexity. + * @{ + */ + + +struct AVTreeNode; +extern const int av_tree_node_size; + +/** + * Allocate an AVTreeNode. + */ +struct AVTreeNode *av_tree_node_alloc(void); + +/** + * Find an element. + * @param root a pointer to the root node of the tree + * @param next If next is not NULL, then next[0] will contain the previous + * element and next[1] the next element. If either does not exist, + * then the corresponding entry in next is unchanged. + * @param cmp compare function used to compare elements in the tree, + * API identical to that of Standard C's qsort + * It is guranteed that the first and only the first argument to cmp() + * will be the key parameter to av_tree_find(), thus it could if the + * user wants, be a different type (like an opaque context). + * @return An element with cmp(key, elem) == 0 or NULL if no such element + * exists in the tree. + */ +void *av_tree_find(const struct AVTreeNode *root, void *key, + int (*cmp)(const void *key, const void *b), void *next[2]); + +/** + * Insert or remove an element. + * + * If *next is NULL, then the supplied element will be removed if it exists. + * If *next is non-NULL, then the supplied element will be inserted, unless + * it already exists in the tree. + * + * @param rootp A pointer to a pointer to the root node of the tree; note that + * the root node can change during insertions, this is required + * to keep the tree balanced. + * @param key pointer to the element key to insert in the tree + * @param next Used to allocate and free AVTreeNodes. For insertion the user + * must set it to an allocated and zeroed object of at least + * av_tree_node_size bytes size. av_tree_insert() will set it to + * NULL if it has been consumed. + * For deleting elements *next is set to NULL by the user and + * av_tree_insert() will set it to the AVTreeNode which was + * used for the removed element. + * This allows the use of flat arrays, which have + * lower overhead compared to many malloced elements. + * You might want to define a function like: + * @code + * void *tree_insert(struct AVTreeNode **rootp, void *key, + * int (*cmp)(void *key, const void *b), + * AVTreeNode **next) + * { + * if (!*next) + * *next = av_mallocz(av_tree_node_size); + * return av_tree_insert(rootp, key, cmp, next); + * } + * void *tree_remove(struct AVTreeNode **rootp, void *key, + * int (*cmp)(void *key, const void *b, AVTreeNode **next)) + * { + * av_freep(next); + * return av_tree_insert(rootp, key, cmp, next); + * } + * @endcode + * @param cmp compare function used to compare elements in the tree, API identical + * to that of Standard C's qsort + * @return If no insertion happened, the found element; if an insertion or + * removal happened, then either key or NULL will be returned. + * Which one it is depends on the tree state and the implementation. You + * should make no assumptions that it's one or the other in the code. + */ +void *av_tree_insert(struct AVTreeNode **rootp, void *key, + int (*cmp)(const void *key, const void *b), + struct AVTreeNode **next); + +void av_tree_destroy(struct AVTreeNode *t); + +/** + * Apply enu(opaque, &elem) to all the elements in the tree in a given range. + * + * @param cmp a comparison function that returns < 0 for an element below the + * range, > 0 for an element above the range and == 0 for an + * element inside the range + * + * @note The cmp function should use the same ordering used to construct the + * tree. + */ +void av_tree_enumerate(struct AVTreeNode *t, void *opaque, + int (*cmp)(void *opaque, void *elem), + int (*enu)(void *opaque, void *elem)); + +/** + * @} + */ + +#endif /* AVUTIL_TREE_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/twofish.h b/third-party/FFmpeg-iOS/include/libavutil/twofish.h new file mode 100644 index 0000000000..813cfecdf8 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/twofish.h @@ -0,0 +1,70 @@ +/* + * An implementation of the TwoFish algorithm + * Copyright (c) 2015 Supraja Meedinti + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_TWOFISH_H +#define AVUTIL_TWOFISH_H + +#include + + +/** + * @file + * @brief Public header for libavutil TWOFISH algorithm + * @defgroup lavu_twofish TWOFISH + * @ingroup lavu_crypto + * @{ + */ + +extern const int av_twofish_size; + +struct AVTWOFISH; + +/** + * Allocate an AVTWOFISH context + * To free the struct: av_free(ptr) + */ +struct AVTWOFISH *av_twofish_alloc(void); + +/** + * Initialize an AVTWOFISH context. + * + * @param ctx an AVTWOFISH context + * @param key a key of size ranging from 1 to 32 bytes used for encryption/decryption + * @param key_bits number of keybits: 128, 192, 256 If less than the required, padded with zeroes to nearest valid value; return value is 0 if key_bits is 128/192/256, -1 if less than 0, 1 otherwise + */ +int av_twofish_init(struct AVTWOFISH *ctx, const uint8_t *key, int key_bits); + +/** + * Encrypt or decrypt a buffer using a previously initialized context + * + * @param ctx an AVTWOFISH context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 16 byte blocks + * @paran iv initialization vector for CBC mode, NULL for ECB mode + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_twofish_crypt(struct AVTWOFISH *ctx, uint8_t *dst, const uint8_t *src, int count, uint8_t* iv, int decrypt); + +/** + * @} + */ +#endif /* AVUTIL_TWOFISH_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/version.h b/third-party/FFmpeg-iOS/include/libavutil/version.h new file mode 100644 index 0000000000..07618fc0bc --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/version.h @@ -0,0 +1,128 @@ +/* + * copyright (c) 2003 Fabrice Bellard + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_VERSION_H +#define AVUTIL_VERSION_H + +#include "macros.h" + +/** + * @addtogroup version_utils + * + * Useful to check and match library version in order to maintain + * backward compatibility. + * + * @{ + */ + +#define AV_VERSION_INT(a, b, c) ((a)<<16 | (b)<<8 | (c)) +#define AV_VERSION_DOT(a, b, c) a ##.## b ##.## c +#define AV_VERSION(a, b, c) AV_VERSION_DOT(a, b, c) + +/** + * Extract version components from the full ::AV_VERSION_INT int as returned + * by functions like ::avformat_version() and ::avcodec_version() + */ +#define AV_VERSION_MAJOR(a) ((a) >> 16) +#define AV_VERSION_MINOR(a) (((a) & 0x00FF00) >> 8) +#define AV_VERSION_MICRO(a) ((a) & 0xFF) + +/** + * @} + */ + +/** + * @file + * @ingroup lavu + * Libavutil version macros + */ + +/** + * @defgroup lavu_ver Version and Build diagnostics + * + * Macros and function useful to check at compiletime and at runtime + * which version of libavutil is in use. + * + * @{ + */ + +#define LIBAVUTIL_VERSION_MAJOR 55 +#define LIBAVUTIL_VERSION_MINOR 28 +#define LIBAVUTIL_VERSION_MICRO 100 + +#define LIBAVUTIL_VERSION_INT AV_VERSION_INT(LIBAVUTIL_VERSION_MAJOR, \ + LIBAVUTIL_VERSION_MINOR, \ + LIBAVUTIL_VERSION_MICRO) +#define LIBAVUTIL_VERSION AV_VERSION(LIBAVUTIL_VERSION_MAJOR, \ + LIBAVUTIL_VERSION_MINOR, \ + LIBAVUTIL_VERSION_MICRO) +#define LIBAVUTIL_BUILD LIBAVUTIL_VERSION_INT + +#define LIBAVUTIL_IDENT "Lavu" AV_STRINGIFY(LIBAVUTIL_VERSION) + +/** + * @} + * + * @defgroup depr_guards Deprecation guards + * FF_API_* defines may be placed below to indicate public API that will be + * dropped at a future version bump. The defines themselves are not part of + * the public API and may change, break or disappear at any time. + * + * @note, when bumping the major version it is recommended to manually + * disable each FF_API_* in its own commit instead of disabling them all + * at once through the bump. This improves the git bisect-ability of the change. + * + * @{ + */ + +#ifndef FF_API_VDPAU +#define FF_API_VDPAU (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_XVMC +#define FF_API_XVMC (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_OPT_TYPE_METADATA +#define FF_API_OPT_TYPE_METADATA (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_DLOG +#define FF_API_DLOG (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_VAAPI +#define FF_API_VAAPI (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_FRAME_QP +#define FF_API_FRAME_QP (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_PLUS1_MINUS1 +#define FF_API_PLUS1_MINUS1 (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_ERROR_FRAME +#define FF_API_ERROR_FRAME (LIBAVUTIL_VERSION_MAJOR < 56) +#endif +#ifndef FF_API_CRC_BIG_TABLE +#define FF_API_CRC_BIG_TABLE (LIBAVUTIL_VERSION_MAJOR < 56) +#endif + + +/** + * @} + */ + +#endif /* AVUTIL_VERSION_H */ diff --git a/third-party/FFmpeg-iOS/include/libavutil/xtea.h b/third-party/FFmpeg-iOS/include/libavutil/xtea.h new file mode 100644 index 0000000000..735427c109 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libavutil/xtea.h @@ -0,0 +1,94 @@ +/* + * A 32-bit implementation of the XTEA algorithm + * Copyright (c) 2012 Samuel Pitoiset + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_XTEA_H +#define AVUTIL_XTEA_H + +#include + +/** + * @file + * @brief Public header for libavutil XTEA algorithm + * @defgroup lavu_xtea XTEA + * @ingroup lavu_crypto + * @{ + */ + +typedef struct AVXTEA { + uint32_t key[16]; +} AVXTEA; + +/** + * Allocate an AVXTEA context. + */ +AVXTEA *av_xtea_alloc(void); + +/** + * Initialize an AVXTEA context. + * + * @param ctx an AVXTEA context + * @param key a key of 16 bytes used for encryption/decryption, + * interpreted as big endian 32 bit numbers + */ +void av_xtea_init(struct AVXTEA *ctx, const uint8_t key[16]); + +/** + * Initialize an AVXTEA context. + * + * @param ctx an AVXTEA context + * @param key a key of 16 bytes used for encryption/decryption, + * interpreted as little endian 32 bit numbers + */ +void av_xtea_le_init(struct AVXTEA *ctx, const uint8_t key[16]); + +/** + * Encrypt or decrypt a buffer using a previously initialized context, + * in big endian format. + * + * @param ctx an AVXTEA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_xtea_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * Encrypt or decrypt a buffer using a previously initialized context, + * in little endian format. + * + * @param ctx an AVXTEA context + * @param dst destination array, can be equal to src + * @param src source array, can be equal to dst + * @param count number of 8 byte blocks + * @param iv initialization vector for CBC mode, if NULL then ECB will be used + * @param decrypt 0 for encryption, 1 for decryption + */ +void av_xtea_le_crypt(struct AVXTEA *ctx, uint8_t *dst, const uint8_t *src, + int count, uint8_t *iv, int decrypt); + +/** + * @} + */ + +#endif /* AVUTIL_XTEA_H */ diff --git a/third-party/FFmpeg-iOS/include/libswresample/swresample.h b/third-party/FFmpeg-iOS/include/libswresample/swresample.h new file mode 100644 index 0000000000..f461edc544 --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libswresample/swresample.h @@ -0,0 +1,553 @@ +/* + * Copyright (C) 2011-2013 Michael Niedermayer (michaelni@gmx.at) + * + * This file is part of libswresample + * + * libswresample is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * libswresample is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with libswresample; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWRESAMPLE_SWRESAMPLE_H +#define SWRESAMPLE_SWRESAMPLE_H + +/** + * @file + * @ingroup lswr + * libswresample public header + */ + +/** + * @defgroup lswr Libswresample + * @{ + * + * Libswresample (lswr) is a library that handles audio resampling, sample + * format conversion and mixing. + * + * Interaction with lswr is done through SwrContext, which is + * allocated with swr_alloc() or swr_alloc_set_opts(). It is opaque, so all parameters + * must be set with the @ref avoptions API. + * + * The first thing you will need to do in order to use lswr is to allocate + * SwrContext. This can be done with swr_alloc() or swr_alloc_set_opts(). If you + * are using the former, you must set options through the @ref avoptions API. + * The latter function provides the same feature, but it allows you to set some + * common options in the same statement. + * + * For example the following code will setup conversion from planar float sample + * format to interleaved signed 16-bit integer, downsampling from 48kHz to + * 44.1kHz and downmixing from 5.1 channels to stereo (using the default mixing + * matrix). This is using the swr_alloc() function. + * @code + * SwrContext *swr = swr_alloc(); + * av_opt_set_channel_layout(swr, "in_channel_layout", AV_CH_LAYOUT_5POINT1, 0); + * av_opt_set_channel_layout(swr, "out_channel_layout", AV_CH_LAYOUT_STEREO, 0); + * av_opt_set_int(swr, "in_sample_rate", 48000, 0); + * av_opt_set_int(swr, "out_sample_rate", 44100, 0); + * av_opt_set_sample_fmt(swr, "in_sample_fmt", AV_SAMPLE_FMT_FLTP, 0); + * av_opt_set_sample_fmt(swr, "out_sample_fmt", AV_SAMPLE_FMT_S16, 0); + * @endcode + * + * The same job can be done using swr_alloc_set_opts() as well: + * @code + * SwrContext *swr = swr_alloc_set_opts(NULL, // we're allocating a new context + * AV_CH_LAYOUT_STEREO, // out_ch_layout + * AV_SAMPLE_FMT_S16, // out_sample_fmt + * 44100, // out_sample_rate + * AV_CH_LAYOUT_5POINT1, // in_ch_layout + * AV_SAMPLE_FMT_FLTP, // in_sample_fmt + * 48000, // in_sample_rate + * 0, // log_offset + * NULL); // log_ctx + * @endcode + * + * Once all values have been set, it must be initialized with swr_init(). If + * you need to change the conversion parameters, you can change the parameters + * using @ref AVOptions, as described above in the first example; or by using + * swr_alloc_set_opts(), but with the first argument the allocated context. + * You must then call swr_init() again. + * + * The conversion itself is done by repeatedly calling swr_convert(). + * Note that the samples may get buffered in swr if you provide insufficient + * output space or if sample rate conversion is done, which requires "future" + * samples. Samples that do not require future input can be retrieved at any + * time by using swr_convert() (in_count can be set to 0). + * At the end of conversion the resampling buffer can be flushed by calling + * swr_convert() with NULL in and 0 in_count. + * + * The samples used in the conversion process can be managed with the libavutil + * @ref lavu_sampmanip "samples manipulation" API, including av_samples_alloc() + * function used in the following example. + * + * The delay between input and output, can at any time be found by using + * swr_get_delay(). + * + * The following code demonstrates the conversion loop assuming the parameters + * from above and caller-defined functions get_input() and handle_output(): + * @code + * uint8_t **input; + * int in_samples; + * + * while (get_input(&input, &in_samples)) { + * uint8_t *output; + * int out_samples = av_rescale_rnd(swr_get_delay(swr, 48000) + + * in_samples, 44100, 48000, AV_ROUND_UP); + * av_samples_alloc(&output, NULL, 2, out_samples, + * AV_SAMPLE_FMT_S16, 0); + * out_samples = swr_convert(swr, &output, out_samples, + * input, in_samples); + * handle_output(output, out_samples); + * av_freep(&output); + * } + * @endcode + * + * When the conversion is finished, the conversion + * context and everything associated with it must be freed with swr_free(). + * A swr_close() function is also available, but it exists mainly for + * compatibility with libavresample, and is not required to be called. + * + * There will be no memory leak if the data is not completely flushed before + * swr_free(). + */ + +#include +#include "../libavutil/frame.h" +#include "../libavutil/samplefmt.h" + +#include "../libswresample/version.h" + +#if LIBSWRESAMPLE_VERSION_MAJOR < 1 +#define SWR_CH_MAX 32 ///< Maximum number of channels +#endif + +/** + * @name Option constants + * These constants are used for the @ref avoptions interface for lswr. + * @{ + * + */ + +#define SWR_FLAG_RESAMPLE 1 ///< Force resampling even if equal sample rate +//TODO use int resample ? +//long term TODO can we enable this dynamically? + +/** Dithering algorithms */ +enum SwrDitherType { + SWR_DITHER_NONE = 0, + SWR_DITHER_RECTANGULAR, + SWR_DITHER_TRIANGULAR, + SWR_DITHER_TRIANGULAR_HIGHPASS, + + SWR_DITHER_NS = 64, ///< not part of API/ABI + SWR_DITHER_NS_LIPSHITZ, + SWR_DITHER_NS_F_WEIGHTED, + SWR_DITHER_NS_MODIFIED_E_WEIGHTED, + SWR_DITHER_NS_IMPROVED_E_WEIGHTED, + SWR_DITHER_NS_SHIBATA, + SWR_DITHER_NS_LOW_SHIBATA, + SWR_DITHER_NS_HIGH_SHIBATA, + SWR_DITHER_NB, ///< not part of API/ABI +}; + +/** Resampling Engines */ +enum SwrEngine { + SWR_ENGINE_SWR, /**< SW Resampler */ + SWR_ENGINE_SOXR, /**< SoX Resampler */ + SWR_ENGINE_NB, ///< not part of API/ABI +}; + +/** Resampling Filter Types */ +enum SwrFilterType { + SWR_FILTER_TYPE_CUBIC, /**< Cubic */ + SWR_FILTER_TYPE_BLACKMAN_NUTTALL, /**< Blackman Nuttall windowed sinc */ + SWR_FILTER_TYPE_KAISER, /**< Kaiser windowed sinc */ +}; + +/** + * @} + */ + +/** + * The libswresample context. Unlike libavcodec and libavformat, this structure + * is opaque. This means that if you would like to set options, you must use + * the @ref avoptions API and cannot directly set values to members of the + * structure. + */ +typedef struct SwrContext SwrContext; + +/** + * Get the AVClass for SwrContext. It can be used in combination with + * AV_OPT_SEARCH_FAKE_OBJ for examining options. + * + * @see av_opt_find(). + * @return the AVClass of SwrContext + */ +const AVClass *swr_get_class(void); + +/** + * @name SwrContext constructor functions + * @{ + */ + +/** + * Allocate SwrContext. + * + * If you use this function you will need to set the parameters (manually or + * with swr_alloc_set_opts()) before calling swr_init(). + * + * @see swr_alloc_set_opts(), swr_init(), swr_free() + * @return NULL on error, allocated context otherwise + */ +struct SwrContext *swr_alloc(void); + +/** + * Initialize context after user parameters have been set. + * @note The context must be configured using the AVOption API. + * + * @see av_opt_set_int() + * @see av_opt_set_dict() + * + * @param[in,out] s Swr context to initialize + * @return AVERROR error code in case of failure. + */ +int swr_init(struct SwrContext *s); + +/** + * Check whether an swr context has been initialized or not. + * + * @param[in] s Swr context to check + * @see swr_init() + * @return positive if it has been initialized, 0 if not initialized + */ +int swr_is_initialized(struct SwrContext *s); + +/** + * Allocate SwrContext if needed and set/reset common parameters. + * + * This function does not require s to be allocated with swr_alloc(). On the + * other hand, swr_alloc() can use swr_alloc_set_opts() to set the parameters + * on the allocated context. + * + * @param s existing Swr context if available, or NULL if not + * @param out_ch_layout output channel layout (AV_CH_LAYOUT_*) + * @param out_sample_fmt output sample format (AV_SAMPLE_FMT_*). + * @param out_sample_rate output sample rate (frequency in Hz) + * @param in_ch_layout input channel layout (AV_CH_LAYOUT_*) + * @param in_sample_fmt input sample format (AV_SAMPLE_FMT_*). + * @param in_sample_rate input sample rate (frequency in Hz) + * @param log_offset logging level offset + * @param log_ctx parent logging context, can be NULL + * + * @see swr_init(), swr_free() + * @return NULL on error, allocated context otherwise + */ +struct SwrContext *swr_alloc_set_opts(struct SwrContext *s, + int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate, + int64_t in_ch_layout, enum AVSampleFormat in_sample_fmt, int in_sample_rate, + int log_offset, void *log_ctx); + +/** + * @} + * + * @name SwrContext destructor functions + * @{ + */ + +/** + * Free the given SwrContext and set the pointer to NULL. + * + * @param[in] s a pointer to a pointer to Swr context + */ +void swr_free(struct SwrContext **s); + +/** + * Closes the context so that swr_is_initialized() returns 0. + * + * The context can be brought back to life by running swr_init(), + * swr_init() can also be used without swr_close(). + * This function is mainly provided for simplifying the usecase + * where one tries to support libavresample and libswresample. + * + * @param[in,out] s Swr context to be closed + */ +void swr_close(struct SwrContext *s); + +/** + * @} + * + * @name Core conversion functions + * @{ + */ + +/** Convert audio. + * + * in and in_count can be set to 0 to flush the last few samples out at the + * end. + * + * If more input is provided than output space, then the input will be buffered. + * You can avoid this buffering by using swr_get_out_samples() to retrieve an + * upper bound on the required number of output samples for the given number of + * input samples. Conversion will run directly without copying whenever possible. + * + * @param s allocated Swr context, with parameters set + * @param out output buffers, only the first one need be set in case of packed audio + * @param out_count amount of space available for output in samples per channel + * @param in input buffers, only the first one need to be set in case of packed audio + * @param in_count number of input samples available in one channel + * + * @return number of samples output per channel, negative value on error + */ +int swr_convert(struct SwrContext *s, uint8_t **out, int out_count, + const uint8_t **in , int in_count); + +/** + * Convert the next timestamp from input to output + * timestamps are in 1/(in_sample_rate * out_sample_rate) units. + * + * @note There are 2 slightly differently behaving modes. + * @li When automatic timestamp compensation is not used, (min_compensation >= FLT_MAX) + * in this case timestamps will be passed through with delays compensated + * @li When automatic timestamp compensation is used, (min_compensation < FLT_MAX) + * in this case the output timestamps will match output sample numbers. + * See ffmpeg-resampler(1) for the two modes of compensation. + * + * @param s[in] initialized Swr context + * @param pts[in] timestamp for the next input sample, INT64_MIN if unknown + * @see swr_set_compensation(), swr_drop_output(), and swr_inject_silence() are + * function used internally for timestamp compensation. + * @return the output timestamp for the next output sample + */ +int64_t swr_next_pts(struct SwrContext *s, int64_t pts); + +/** + * @} + * + * @name Low-level option setting functions + * These functons provide a means to set low-level options that is not possible + * with the AVOption API. + * @{ + */ + +/** + * Activate resampling compensation ("soft" compensation). This function is + * internally called when needed in swr_next_pts(). + * + * @param[in,out] s allocated Swr context. If it is not initialized, + * or SWR_FLAG_RESAMPLE is not set, swr_init() is + * called with the flag set. + * @param[in] sample_delta delta in PTS per sample + * @param[in] compensation_distance number of samples to compensate for + * @return >= 0 on success, AVERROR error codes if: + * @li @c s is NULL, + * @li @c compensation_distance is less than 0, + * @li @c compensation_distance is 0 but sample_delta is not, + * @li compensation unsupported by resampler, or + * @li swr_init() fails when called. + */ +int swr_set_compensation(struct SwrContext *s, int sample_delta, int compensation_distance); + +/** + * Set a customized input channel mapping. + * + * @param[in,out] s allocated Swr context, not yet initialized + * @param[in] channel_map customized input channel mapping (array of channel + * indexes, -1 for a muted channel) + * @return >= 0 on success, or AVERROR error code in case of failure. + */ +int swr_set_channel_mapping(struct SwrContext *s, const int *channel_map); + +/** + * Set a customized remix matrix. + * + * @param s allocated Swr context, not yet initialized + * @param matrix remix coefficients; matrix[i + stride * o] is + * the weight of input channel i in output channel o + * @param stride offset between lines of the matrix + * @return >= 0 on success, or AVERROR error code in case of failure. + */ +int swr_set_matrix(struct SwrContext *s, const double *matrix, int stride); + +/** + * @} + * + * @name Sample handling functions + * @{ + */ + +/** + * Drops the specified number of output samples. + * + * This function, along with swr_inject_silence(), is called by swr_next_pts() + * if needed for "hard" compensation. + * + * @param s allocated Swr context + * @param count number of samples to be dropped + * + * @return >= 0 on success, or a negative AVERROR code on failure + */ +int swr_drop_output(struct SwrContext *s, int count); + +/** + * Injects the specified number of silence samples. + * + * This function, along with swr_drop_output(), is called by swr_next_pts() + * if needed for "hard" compensation. + * + * @param s allocated Swr context + * @param count number of samples to be dropped + * + * @return >= 0 on success, or a negative AVERROR code on failure + */ +int swr_inject_silence(struct SwrContext *s, int count); + +/** + * Gets the delay the next input sample will experience relative to the next output sample. + * + * Swresample can buffer data if more input has been provided than available + * output space, also converting between sample rates needs a delay. + * This function returns the sum of all such delays. + * The exact delay is not necessarily an integer value in either input or + * output sample rate. Especially when downsampling by a large value, the + * output sample rate may be a poor choice to represent the delay, similarly + * for upsampling and the input sample rate. + * + * @param s swr context + * @param base timebase in which the returned delay will be: + * @li if it's set to 1 the returned delay is in seconds + * @li if it's set to 1000 the returned delay is in milliseconds + * @li if it's set to the input sample rate then the returned + * delay is in input samples + * @li if it's set to the output sample rate then the returned + * delay is in output samples + * @li if it's the least common multiple of in_sample_rate and + * out_sample_rate then an exact rounding-free delay will be + * returned + * @returns the delay in 1 / @c base units. + */ +int64_t swr_get_delay(struct SwrContext *s, int64_t base); + +/** + * Find an upper bound on the number of samples that the next swr_convert + * call will output, if called with in_samples of input samples. This + * depends on the internal state, and anything changing the internal state + * (like further swr_convert() calls) will may change the number of samples + * swr_get_out_samples() returns for the same number of input samples. + * + * @param in_samples number of input samples. + * @note any call to swr_inject_silence(), swr_convert(), swr_next_pts() + * or swr_set_compensation() invalidates this limit + * @note it is recommended to pass the correct available buffer size + * to all functions like swr_convert() even if swr_get_out_samples() + * indicates that less would be used. + * @returns an upper bound on the number of samples that the next swr_convert + * will output or a negative value to indicate an error + */ +int swr_get_out_samples(struct SwrContext *s, int in_samples); + +/** + * @} + * + * @name Configuration accessors + * @{ + */ + +/** + * Return the @ref LIBSWRESAMPLE_VERSION_INT constant. + * + * This is useful to check if the build-time libswresample has the same version + * as the run-time one. + * + * @returns the unsigned int-typed version + */ +unsigned swresample_version(void); + +/** + * Return the swr build-time configuration. + * + * @returns the build-time @c ./configure flags + */ +const char *swresample_configuration(void); + +/** + * Return the swr license. + * + * @returns the license of libswresample, determined at build-time + */ +const char *swresample_license(void); + +/** + * @} + * + * @name AVFrame based API + * @{ + */ + +/** + * Convert the samples in the input AVFrame and write them to the output AVFrame. + * + * Input and output AVFrames must have channel_layout, sample_rate and format set. + * + * If the output AVFrame does not have the data pointers allocated the nb_samples + * field will be set using av_frame_get_buffer() + * is called to allocate the frame. + * + * The output AVFrame can be NULL or have fewer allocated samples than required. + * In this case, any remaining samples not written to the output will be added + * to an internal FIFO buffer, to be returned at the next call to this function + * or to swr_convert(). + * + * If converting sample rate, there may be data remaining in the internal + * resampling delay buffer. swr_get_delay() tells the number of + * remaining samples. To get this data as output, call this function or + * swr_convert() with NULL input. + * + * If the SwrContext configuration does not match the output and + * input AVFrame settings the conversion does not take place and depending on + * which AVFrame is not matching AVERROR_OUTPUT_CHANGED, AVERROR_INPUT_CHANGED + * or the result of a bitwise-OR of them is returned. + * + * @see swr_delay() + * @see swr_convert() + * @see swr_get_delay() + * + * @param swr audio resample context + * @param output output AVFrame + * @param input input AVFrame + * @return 0 on success, AVERROR on failure or nonmatching + * configuration. + */ +int swr_convert_frame(SwrContext *swr, + AVFrame *output, const AVFrame *input); + +/** + * Configure or reconfigure the SwrContext using the information + * provided by the AVFrames. + * + * The original resampling context is reset even on failure. + * The function calls swr_close() internally if the context is open. + * + * @see swr_close(); + * + * @param swr audio resample context + * @param output output AVFrame + * @param input input AVFrame + * @return 0 on success, AVERROR on failure. + */ +int swr_config_frame(SwrContext *swr, const AVFrame *out, const AVFrame *in); + +/** + * @} + * @} + */ + +#endif /* SWRESAMPLE_SWRESAMPLE_H */ diff --git a/third-party/FFmpeg-iOS/include/libswresample/version.h b/third-party/FFmpeg-iOS/include/libswresample/version.h new file mode 100644 index 0000000000..703d76f19d --- /dev/null +++ b/third-party/FFmpeg-iOS/include/libswresample/version.h @@ -0,0 +1,45 @@ +/* + * Version macros. + * + * This file is part of libswresample + * + * libswresample is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * libswresample is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with libswresample; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef SWRESAMPLE_VERSION_H +#define SWRESAMPLE_VERSION_H + +/** + * @file + * Libswresample version macros + */ + +#include "../libavutil/avutil.h" + +#define LIBSWRESAMPLE_VERSION_MAJOR 2 +#define LIBSWRESAMPLE_VERSION_MINOR 1 +#define LIBSWRESAMPLE_VERSION_MICRO 100 + +#define LIBSWRESAMPLE_VERSION_INT AV_VERSION_INT(LIBSWRESAMPLE_VERSION_MAJOR, \ + LIBSWRESAMPLE_VERSION_MINOR, \ + LIBSWRESAMPLE_VERSION_MICRO) +#define LIBSWRESAMPLE_VERSION AV_VERSION(LIBSWRESAMPLE_VERSION_MAJOR, \ + LIBSWRESAMPLE_VERSION_MINOR, \ + LIBSWRESAMPLE_VERSION_MICRO) +#define LIBSWRESAMPLE_BUILD LIBSWRESAMPLE_VERSION_INT + +#define LIBSWRESAMPLE_IDENT "SwR" AV_STRINGIFY(LIBSWRESAMPLE_VERSION) + +#endif /* SWRESAMPLE_VERSION_H */ diff --git a/third-party/FFmpeg-iOS/lib/libavcodec.a b/third-party/FFmpeg-iOS/lib/libavcodec.a new file mode 100644 index 0000000000..e7c6e75d19 Binary files /dev/null and b/third-party/FFmpeg-iOS/lib/libavcodec.a differ diff --git a/third-party/FFmpeg-iOS/lib/libavformat.a b/third-party/FFmpeg-iOS/lib/libavformat.a new file mode 100644 index 0000000000..17f3aae87e Binary files /dev/null and b/third-party/FFmpeg-iOS/lib/libavformat.a differ diff --git a/third-party/FFmpeg-iOS/lib/libavutil.a b/third-party/FFmpeg-iOS/lib/libavutil.a new file mode 100644 index 0000000000..f4c347ce91 Binary files /dev/null and b/third-party/FFmpeg-iOS/lib/libavutil.a differ diff --git a/third-party/FFmpeg-iOS/lib/libswresample.a b/third-party/FFmpeg-iOS/lib/libswresample.a new file mode 100644 index 0000000000..c022d0be8f Binary files /dev/null and b/third-party/FFmpeg-iOS/lib/libswresample.a differ diff --git a/third-party/libwebp/include/webp/decode.h b/third-party/libwebp/include/webp/decode.h new file mode 100644 index 0000000000..b00d15b346 --- /dev/null +++ b/third-party/libwebp/include/webp/decode.h @@ -0,0 +1,491 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Main decoding functions for WebP images. +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_WEBP_DECODE_H_ +#define WEBP_WEBP_DECODE_H_ + +#include "./types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define WEBP_DECODER_ABI_VERSION 0x0208 // MAJOR(8b) + MINOR(8b) + +// Note: forward declaring enumerations is not allowed in (strict) C and C++, +// the types are left here for reference. +// typedef enum VP8StatusCode VP8StatusCode; +// typedef enum WEBP_CSP_MODE WEBP_CSP_MODE; +typedef struct WebPRGBABuffer WebPRGBABuffer; +typedef struct WebPYUVABuffer WebPYUVABuffer; +typedef struct WebPDecBuffer WebPDecBuffer; +typedef struct WebPIDecoder WebPIDecoder; +typedef struct WebPBitstreamFeatures WebPBitstreamFeatures; +typedef struct WebPDecoderOptions WebPDecoderOptions; +typedef struct WebPDecoderConfig WebPDecoderConfig; + +// Return the decoder's version number, packed in hexadecimal using 8bits for +// each of major/minor/revision. E.g: v2.5.7 is 0x020507. +WEBP_EXTERN(int) WebPGetDecoderVersion(void); + +// Retrieve basic header information: width, height. +// This function will also validate the header and return 0 in +// case of formatting error. +// Pointers 'width' and 'height' can be passed NULL if deemed irrelevant. +WEBP_EXTERN(int) WebPGetInfo(const uint8_t* data, size_t data_size, + int* width, int* height); + +// Decodes WebP images pointed to by 'data' and returns RGBA samples, along +// with the dimensions in *width and *height. The ordering of samples in +// memory is R, G, B, A, R, G, B, A... in scan order (endian-independent). +// The returned pointer should be deleted calling WebPFree(). +// Returns NULL in case of error. +WEBP_EXTERN(uint8_t*) WebPDecodeRGBA(const uint8_t* data, size_t data_size, + int* width, int* height); + +// Same as WebPDecodeRGBA, but returning A, R, G, B, A, R, G, B... ordered data. +WEBP_EXTERN(uint8_t*) WebPDecodeARGB(const uint8_t* data, size_t data_size, + int* width, int* height); + +// Same as WebPDecodeRGBA, but returning B, G, R, A, B, G, R, A... ordered data. +WEBP_EXTERN(uint8_t*) WebPDecodeBGRA(const uint8_t* data, size_t data_size, + int* width, int* height); + +// Same as WebPDecodeRGBA, but returning R, G, B, R, G, B... ordered data. +// If the bitstream contains transparency, it is ignored. +WEBP_EXTERN(uint8_t*) WebPDecodeRGB(const uint8_t* data, size_t data_size, + int* width, int* height); + +// Same as WebPDecodeRGB, but returning B, G, R, B, G, R... ordered data. +WEBP_EXTERN(uint8_t*) WebPDecodeBGR(const uint8_t* data, size_t data_size, + int* width, int* height); + + +// Decode WebP images pointed to by 'data' to Y'UV format(*). The pointer +// returned is the Y samples buffer. Upon return, *u and *v will point to +// the U and V chroma data. These U and V buffers need NOT be passed to +// WebPFree(), unlike the returned Y luma one. The dimension of the U and V +// planes are both (*width + 1) / 2 and (*height + 1)/ 2. +// Upon return, the Y buffer has a stride returned as '*stride', while U and V +// have a common stride returned as '*uv_stride'. +// Return NULL in case of error. +// (*) Also named Y'CbCr. See: http://en.wikipedia.org/wiki/YCbCr +WEBP_EXTERN(uint8_t*) WebPDecodeYUV(const uint8_t* data, size_t data_size, + int* width, int* height, + uint8_t** u, uint8_t** v, + int* stride, int* uv_stride); + +// Releases memory returned by the WebPDecode*() functions above. +WEBP_EXTERN(void) WebPFree(void* ptr); + +// These five functions are variants of the above ones, that decode the image +// directly into a pre-allocated buffer 'output_buffer'. The maximum storage +// available in this buffer is indicated by 'output_buffer_size'. If this +// storage is not sufficient (or an error occurred), NULL is returned. +// Otherwise, output_buffer is returned, for convenience. +// The parameter 'output_stride' specifies the distance (in bytes) +// between scanlines. Hence, output_buffer_size is expected to be at least +// output_stride x picture-height. +WEBP_EXTERN(uint8_t*) WebPDecodeRGBAInto( + const uint8_t* data, size_t data_size, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); +WEBP_EXTERN(uint8_t*) WebPDecodeARGBInto( + const uint8_t* data, size_t data_size, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); +WEBP_EXTERN(uint8_t*) WebPDecodeBGRAInto( + const uint8_t* data, size_t data_size, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); + +// RGB and BGR variants. Here too the transparency information, if present, +// will be dropped and ignored. +WEBP_EXTERN(uint8_t*) WebPDecodeRGBInto( + const uint8_t* data, size_t data_size, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); +WEBP_EXTERN(uint8_t*) WebPDecodeBGRInto( + const uint8_t* data, size_t data_size, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); + +// WebPDecodeYUVInto() is a variant of WebPDecodeYUV() that operates directly +// into pre-allocated luma/chroma plane buffers. This function requires the +// strides to be passed: one for the luma plane and one for each of the +// chroma ones. The size of each plane buffer is passed as 'luma_size', +// 'u_size' and 'v_size' respectively. +// Pointer to the luma plane ('*luma') is returned or NULL if an error occurred +// during decoding (or because some buffers were found to be too small). +WEBP_EXTERN(uint8_t*) WebPDecodeYUVInto( + const uint8_t* data, size_t data_size, + uint8_t* luma, size_t luma_size, int luma_stride, + uint8_t* u, size_t u_size, int u_stride, + uint8_t* v, size_t v_size, int v_stride); + +//------------------------------------------------------------------------------ +// Output colorspaces and buffer + +// Colorspaces +// Note: the naming describes the byte-ordering of packed samples in memory. +// For instance, MODE_BGRA relates to samples ordered as B,G,R,A,B,G,R,A,... +// Non-capital names (e.g.:MODE_Argb) relates to pre-multiplied RGB channels. +// RGBA-4444 and RGB-565 colorspaces are represented by following byte-order: +// RGBA-4444: [r3 r2 r1 r0 g3 g2 g1 g0], [b3 b2 b1 b0 a3 a2 a1 a0], ... +// RGB-565: [r4 r3 r2 r1 r0 g5 g4 g3], [g2 g1 g0 b4 b3 b2 b1 b0], ... +// In the case WEBP_SWAP_16BITS_CSP is defined, the bytes are swapped for +// these two modes: +// RGBA-4444: [b3 b2 b1 b0 a3 a2 a1 a0], [r3 r2 r1 r0 g3 g2 g1 g0], ... +// RGB-565: [g2 g1 g0 b4 b3 b2 b1 b0], [r4 r3 r2 r1 r0 g5 g4 g3], ... + +typedef enum WEBP_CSP_MODE { + MODE_RGB = 0, MODE_RGBA = 1, + MODE_BGR = 2, MODE_BGRA = 3, + MODE_ARGB = 4, MODE_RGBA_4444 = 5, + MODE_RGB_565 = 6, + // RGB-premultiplied transparent modes (alpha value is preserved) + MODE_rgbA = 7, + MODE_bgrA = 8, + MODE_Argb = 9, + MODE_rgbA_4444 = 10, + // YUV modes must come after RGB ones. + MODE_YUV = 11, MODE_YUVA = 12, // yuv 4:2:0 + MODE_LAST = 13 +} WEBP_CSP_MODE; + +// Some useful macros: +static WEBP_INLINE int WebPIsPremultipliedMode(WEBP_CSP_MODE mode) { + return (mode == MODE_rgbA || mode == MODE_bgrA || mode == MODE_Argb || + mode == MODE_rgbA_4444); +} + +static WEBP_INLINE int WebPIsAlphaMode(WEBP_CSP_MODE mode) { + return (mode == MODE_RGBA || mode == MODE_BGRA || mode == MODE_ARGB || + mode == MODE_RGBA_4444 || mode == MODE_YUVA || + WebPIsPremultipliedMode(mode)); +} + +static WEBP_INLINE int WebPIsRGBMode(WEBP_CSP_MODE mode) { + return (mode < MODE_YUV); +} + +//------------------------------------------------------------------------------ +// WebPDecBuffer: Generic structure for describing the output sample buffer. + +struct WebPRGBABuffer { // view as RGBA + uint8_t* rgba; // pointer to RGBA samples + int stride; // stride in bytes from one scanline to the next. + size_t size; // total size of the *rgba buffer. +}; + +struct WebPYUVABuffer { // view as YUVA + uint8_t* y, *u, *v, *a; // pointer to luma, chroma U/V, alpha samples + int y_stride; // luma stride + int u_stride, v_stride; // chroma strides + int a_stride; // alpha stride + size_t y_size; // luma plane size + size_t u_size, v_size; // chroma planes size + size_t a_size; // alpha-plane size +}; + +// Output buffer +struct WebPDecBuffer { + WEBP_CSP_MODE colorspace; // Colorspace. + int width, height; // Dimensions. + int is_external_memory; // If non-zero, 'internal_memory' pointer is not + // used. If value is '2' or more, the external + // memory is considered 'slow' and multiple + // read/write will be avoided. + union { + WebPRGBABuffer RGBA; + WebPYUVABuffer YUVA; + } u; // Nameless union of buffer parameters. + uint32_t pad[4]; // padding for later use + + uint8_t* private_memory; // Internally allocated memory (only when + // is_external_memory is 0). Should not be used + // externally, but accessed via the buffer union. +}; + +// Internal, version-checked, entry point +WEBP_EXTERN(int) WebPInitDecBufferInternal(WebPDecBuffer*, int); + +// Initialize the structure as empty. Must be called before any other use. +// Returns false in case of version mismatch +static WEBP_INLINE int WebPInitDecBuffer(WebPDecBuffer* buffer) { + return WebPInitDecBufferInternal(buffer, WEBP_DECODER_ABI_VERSION); +} + +// Free any memory associated with the buffer. Must always be called last. +// Note: doesn't free the 'buffer' structure itself. +WEBP_EXTERN(void) WebPFreeDecBuffer(WebPDecBuffer* buffer); + +//------------------------------------------------------------------------------ +// Enumeration of the status codes + +typedef enum VP8StatusCode { + VP8_STATUS_OK = 0, + VP8_STATUS_OUT_OF_MEMORY, + VP8_STATUS_INVALID_PARAM, + VP8_STATUS_BITSTREAM_ERROR, + VP8_STATUS_UNSUPPORTED_FEATURE, + VP8_STATUS_SUSPENDED, + VP8_STATUS_USER_ABORT, + VP8_STATUS_NOT_ENOUGH_DATA +} VP8StatusCode; + +//------------------------------------------------------------------------------ +// Incremental decoding +// +// This API allows streamlined decoding of partial data. +// Picture can be incrementally decoded as data become available thanks to the +// WebPIDecoder object. This object can be left in a SUSPENDED state if the +// picture is only partially decoded, pending additional input. +// Code example: +// +// WebPInitDecBuffer(&buffer); +// buffer.colorspace = mode; +// ... +// WebPIDecoder* idec = WebPINewDecoder(&buffer); +// while (has_more_data) { +// // ... (get additional data) +// status = WebPIAppend(idec, new_data, new_data_size); +// if (status != VP8_STATUS_SUSPENDED || +// break; +// } +// +// // The above call decodes the current available buffer. +// // Part of the image can now be refreshed by calling to +// // WebPIDecGetRGB()/WebPIDecGetYUVA() etc. +// } +// WebPIDelete(idec); + +// Creates a new incremental decoder with the supplied buffer parameter. +// This output_buffer can be passed NULL, in which case a default output buffer +// is used (with MODE_RGB). Otherwise, an internal reference to 'output_buffer' +// is kept, which means that the lifespan of 'output_buffer' must be larger than +// that of the returned WebPIDecoder object. +// The supplied 'output_buffer' content MUST NOT be changed between calls to +// WebPIAppend() or WebPIUpdate() unless 'output_buffer.is_external_memory' is +// not set to 0. In such a case, it is allowed to modify the pointers, size and +// stride of output_buffer.u.RGBA or output_buffer.u.YUVA, provided they remain +// within valid bounds. +// All other fields of WebPDecBuffer MUST remain constant between calls. +// Returns NULL if the allocation failed. +WEBP_EXTERN(WebPIDecoder*) WebPINewDecoder(WebPDecBuffer* output_buffer); + +// This function allocates and initializes an incremental-decoder object, which +// will output the RGB/A samples specified by 'csp' into a preallocated +// buffer 'output_buffer'. The size of this buffer is at least +// 'output_buffer_size' and the stride (distance in bytes between two scanlines) +// is specified by 'output_stride'. +// Additionally, output_buffer can be passed NULL in which case the output +// buffer will be allocated automatically when the decoding starts. The +// colorspace 'csp' is taken into account for allocating this buffer. All other +// parameters are ignored. +// Returns NULL if the allocation failed, or if some parameters are invalid. +WEBP_EXTERN(WebPIDecoder*) WebPINewRGB( + WEBP_CSP_MODE csp, + uint8_t* output_buffer, size_t output_buffer_size, int output_stride); + +// This function allocates and initializes an incremental-decoder object, which +// will output the raw luma/chroma samples into a preallocated planes if +// supplied. The luma plane is specified by its pointer 'luma', its size +// 'luma_size' and its stride 'luma_stride'. Similarly, the chroma-u plane +// is specified by the 'u', 'u_size' and 'u_stride' parameters, and the chroma-v +// plane by 'v' and 'v_size'. And same for the alpha-plane. The 'a' pointer +// can be pass NULL in case one is not interested in the transparency plane. +// Conversely, 'luma' can be passed NULL if no preallocated planes are supplied. +// In this case, the output buffer will be automatically allocated (using +// MODE_YUVA) when decoding starts. All parameters are then ignored. +// Returns NULL if the allocation failed or if a parameter is invalid. +WEBP_EXTERN(WebPIDecoder*) WebPINewYUVA( + uint8_t* luma, size_t luma_size, int luma_stride, + uint8_t* u, size_t u_size, int u_stride, + uint8_t* v, size_t v_size, int v_stride, + uint8_t* a, size_t a_size, int a_stride); + +// Deprecated version of the above, without the alpha plane. +// Kept for backward compatibility. +WEBP_EXTERN(WebPIDecoder*) WebPINewYUV( + uint8_t* luma, size_t luma_size, int luma_stride, + uint8_t* u, size_t u_size, int u_stride, + uint8_t* v, size_t v_size, int v_stride); + +// Deletes the WebPIDecoder object and associated memory. Must always be called +// if WebPINewDecoder, WebPINewRGB or WebPINewYUV succeeded. +WEBP_EXTERN(void) WebPIDelete(WebPIDecoder* idec); + +// Copies and decodes the next available data. Returns VP8_STATUS_OK when +// the image is successfully decoded. Returns VP8_STATUS_SUSPENDED when more +// data is expected. Returns error in other cases. +WEBP_EXTERN(VP8StatusCode) WebPIAppend( + WebPIDecoder* idec, const uint8_t* data, size_t data_size); + +// A variant of the above function to be used when data buffer contains +// partial data from the beginning. In this case data buffer is not copied +// to the internal memory. +// Note that the value of the 'data' pointer can change between calls to +// WebPIUpdate, for instance when the data buffer is resized to fit larger data. +WEBP_EXTERN(VP8StatusCode) WebPIUpdate( + WebPIDecoder* idec, const uint8_t* data, size_t data_size); + +// Returns the RGB/A image decoded so far. Returns NULL if output params +// are not initialized yet. The RGB/A output type corresponds to the colorspace +// specified during call to WebPINewDecoder() or WebPINewRGB(). +// *last_y is the index of last decoded row in raster scan order. Some pointers +// (*last_y, *width etc.) can be NULL if corresponding information is not +// needed. +WEBP_EXTERN(uint8_t*) WebPIDecGetRGB( + const WebPIDecoder* idec, int* last_y, + int* width, int* height, int* stride); + +// Same as above function to get a YUVA image. Returns pointer to the luma +// plane or NULL in case of error. If there is no alpha information +// the alpha pointer '*a' will be returned NULL. +WEBP_EXTERN(uint8_t*) WebPIDecGetYUVA( + const WebPIDecoder* idec, int* last_y, + uint8_t** u, uint8_t** v, uint8_t** a, + int* width, int* height, int* stride, int* uv_stride, int* a_stride); + +// Deprecated alpha-less version of WebPIDecGetYUVA(): it will ignore the +// alpha information (if present). Kept for backward compatibility. +static WEBP_INLINE uint8_t* WebPIDecGetYUV( + const WebPIDecoder* idec, int* last_y, uint8_t** u, uint8_t** v, + int* width, int* height, int* stride, int* uv_stride) { + return WebPIDecGetYUVA(idec, last_y, u, v, NULL, width, height, + stride, uv_stride, NULL); +} + +// Generic call to retrieve information about the displayable area. +// If non NULL, the left/right/width/height pointers are filled with the visible +// rectangular area so far. +// Returns NULL in case the incremental decoder object is in an invalid state. +// Otherwise returns the pointer to the internal representation. This structure +// is read-only, tied to WebPIDecoder's lifespan and should not be modified. +WEBP_EXTERN(const WebPDecBuffer*) WebPIDecodedArea( + const WebPIDecoder* idec, int* left, int* top, int* width, int* height); + +//------------------------------------------------------------------------------ +// Advanced decoding parametrization +// +// Code sample for using the advanced decoding API +/* + // A) Init a configuration object + WebPDecoderConfig config; + CHECK(WebPInitDecoderConfig(&config)); + + // B) optional: retrieve the bitstream's features. + CHECK(WebPGetFeatures(data, data_size, &config.input) == VP8_STATUS_OK); + + // C) Adjust 'config', if needed + config.no_fancy_upsampling = 1; + config.output.colorspace = MODE_BGRA; + // etc. + + // Note that you can also make config.output point to an externally + // supplied memory buffer, provided it's big enough to store the decoded + // picture. Otherwise, config.output will just be used to allocate memory + // and store the decoded picture. + + // D) Decode! + CHECK(WebPDecode(data, data_size, &config) == VP8_STATUS_OK); + + // E) Decoded image is now in config.output (and config.output.u.RGBA) + + // F) Reclaim memory allocated in config's object. It's safe to call + // this function even if the memory is external and wasn't allocated + // by WebPDecode(). + WebPFreeDecBuffer(&config.output); +*/ + +// Features gathered from the bitstream +struct WebPBitstreamFeatures { + int width; // Width in pixels, as read from the bitstream. + int height; // Height in pixels, as read from the bitstream. + int has_alpha; // True if the bitstream contains an alpha channel. + int has_animation; // True if the bitstream is an animation. + int format; // 0 = undefined (/mixed), 1 = lossy, 2 = lossless + + uint32_t pad[5]; // padding for later use +}; + +// Internal, version-checked, entry point +WEBP_EXTERN(VP8StatusCode) WebPGetFeaturesInternal( + const uint8_t*, size_t, WebPBitstreamFeatures*, int); + +// Retrieve features from the bitstream. The *features structure is filled +// with information gathered from the bitstream. +// Returns VP8_STATUS_OK when the features are successfully retrieved. Returns +// VP8_STATUS_NOT_ENOUGH_DATA when more data is needed to retrieve the +// features from headers. Returns error in other cases. +static WEBP_INLINE VP8StatusCode WebPGetFeatures( + const uint8_t* data, size_t data_size, + WebPBitstreamFeatures* features) { + return WebPGetFeaturesInternal(data, data_size, features, + WEBP_DECODER_ABI_VERSION); +} + +// Decoding options +struct WebPDecoderOptions { + int bypass_filtering; // if true, skip the in-loop filtering + int no_fancy_upsampling; // if true, use faster pointwise upsampler + int use_cropping; // if true, cropping is applied _first_ + int crop_left, crop_top; // top-left position for cropping. + // Will be snapped to even values. + int crop_width, crop_height; // dimension of the cropping area + int use_scaling; // if true, scaling is applied _afterward_ + int scaled_width, scaled_height; // final resolution + int use_threads; // if true, use multi-threaded decoding + int dithering_strength; // dithering strength (0=Off, 100=full) + int flip; // flip output vertically + int alpha_dithering_strength; // alpha dithering strength in [0..100] + + uint32_t pad[5]; // padding for later use +}; + +// Main object storing the configuration for advanced decoding. +struct WebPDecoderConfig { + WebPBitstreamFeatures input; // Immutable bitstream features (optional) + WebPDecBuffer output; // Output buffer (can point to external mem) + WebPDecoderOptions options; // Decoding options +}; + +// Internal, version-checked, entry point +WEBP_EXTERN(int) WebPInitDecoderConfigInternal(WebPDecoderConfig*, int); + +// Initialize the configuration as empty. This function must always be +// called first, unless WebPGetFeatures() is to be called. +// Returns false in case of mismatched version. +static WEBP_INLINE int WebPInitDecoderConfig(WebPDecoderConfig* config) { + return WebPInitDecoderConfigInternal(config, WEBP_DECODER_ABI_VERSION); +} + +// Instantiate a new incremental decoder object with the requested +// configuration. The bitstream can be passed using 'data' and 'data_size' +// parameter, in which case the features will be parsed and stored into +// config->input. Otherwise, 'data' can be NULL and no parsing will occur. +// Note that 'config' can be NULL too, in which case a default configuration +// is used. +// The return WebPIDecoder object must always be deleted calling WebPIDelete(). +// Returns NULL in case of error (and config->status will then reflect +// the error condition). +WEBP_EXTERN(WebPIDecoder*) WebPIDecode(const uint8_t* data, size_t data_size, + WebPDecoderConfig* config); + +// Non-incremental version. This version decodes the full data at once, taking +// 'config' into account. Returns decoding status (which should be VP8_STATUS_OK +// if the decoding was successful). +WEBP_EXTERN(VP8StatusCode) WebPDecode(const uint8_t* data, size_t data_size, + WebPDecoderConfig* config); + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif /* WEBP_WEBP_DECODE_H_ */ diff --git a/third-party/libwebp/include/webp/encode.h b/third-party/libwebp/include/webp/encode.h new file mode 100644 index 0000000000..9291b7195c --- /dev/null +++ b/third-party/libwebp/include/webp/encode.h @@ -0,0 +1,527 @@ +// Copyright 2011 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// WebP encoder: main interface +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_WEBP_ENCODE_H_ +#define WEBP_WEBP_ENCODE_H_ + +#include "./types.h" + +#ifdef __cplusplus +extern "C" { +#endif + +#define WEBP_ENCODER_ABI_VERSION 0x0209 // MAJOR(8b) + MINOR(8b) + +// Note: forward declaring enumerations is not allowed in (strict) C and C++, +// the types are left here for reference. +// typedef enum WebPImageHint WebPImageHint; +// typedef enum WebPEncCSP WebPEncCSP; +// typedef enum WebPPreset WebPPreset; +// typedef enum WebPEncodingError WebPEncodingError; +typedef struct WebPConfig WebPConfig; +typedef struct WebPPicture WebPPicture; // main structure for I/O +typedef struct WebPAuxStats WebPAuxStats; +typedef struct WebPMemoryWriter WebPMemoryWriter; + +// Return the encoder's version number, packed in hexadecimal using 8bits for +// each of major/minor/revision. E.g: v2.5.7 is 0x020507. +WEBP_EXTERN(int) WebPGetEncoderVersion(void); + +//------------------------------------------------------------------------------ +// One-stop-shop call! No questions asked: + +// Returns the size of the compressed data (pointed to by *output), or 0 if +// an error occurred. The compressed data must be released by the caller +// using the call 'WebPFree(*output)'. +// These functions compress using the lossy format, and the quality_factor +// can go from 0 (smaller output, lower quality) to 100 (best quality, +// larger output). +WEBP_EXTERN(size_t) WebPEncodeRGB(const uint8_t* rgb, + int width, int height, int stride, + float quality_factor, uint8_t** output); +WEBP_EXTERN(size_t) WebPEncodeBGR(const uint8_t* bgr, + int width, int height, int stride, + float quality_factor, uint8_t** output); +WEBP_EXTERN(size_t) WebPEncodeRGBA(const uint8_t* rgba, + int width, int height, int stride, + float quality_factor, uint8_t** output); +WEBP_EXTERN(size_t) WebPEncodeBGRA(const uint8_t* bgra, + int width, int height, int stride, + float quality_factor, uint8_t** output); + +// These functions are the equivalent of the above, but compressing in a +// lossless manner. Files are usually larger than lossy format, but will +// not suffer any compression loss. +WEBP_EXTERN(size_t) WebPEncodeLosslessRGB(const uint8_t* rgb, + int width, int height, int stride, + uint8_t** output); +WEBP_EXTERN(size_t) WebPEncodeLosslessBGR(const uint8_t* bgr, + int width, int height, int stride, + uint8_t** output); +WEBP_EXTERN(size_t) WebPEncodeLosslessRGBA(const uint8_t* rgba, + int width, int height, int stride, + uint8_t** output); +WEBP_EXTERN(size_t) WebPEncodeLosslessBGRA(const uint8_t* bgra, + int width, int height, int stride, + uint8_t** output); + +// Releases memory returned by the WebPEncode*() functions above. +WEBP_EXTERN(void) WebPFree(void* ptr); + +//------------------------------------------------------------------------------ +// Coding parameters + +// Image characteristics hint for the underlying encoder. +typedef enum WebPImageHint { + WEBP_HINT_DEFAULT = 0, // default preset. + WEBP_HINT_PICTURE, // digital picture, like portrait, inner shot + WEBP_HINT_PHOTO, // outdoor photograph, with natural lighting + WEBP_HINT_GRAPH, // Discrete tone image (graph, map-tile etc). + WEBP_HINT_LAST +} WebPImageHint; + +// Compression parameters. +struct WebPConfig { + int lossless; // Lossless encoding (0=lossy(default), 1=lossless). + float quality; // between 0 (smallest file) and 100 (biggest) + int method; // quality/speed trade-off (0=fast, 6=slower-better) + + WebPImageHint image_hint; // Hint for image type (lossless only for now). + + // Parameters related to lossy compression only: + int target_size; // if non-zero, set the desired target size in bytes. + // Takes precedence over the 'compression' parameter. + float target_PSNR; // if non-zero, specifies the minimal distortion to + // try to achieve. Takes precedence over target_size. + int segments; // maximum number of segments to use, in [1..4] + int sns_strength; // Spatial Noise Shaping. 0=off, 100=maximum. + int filter_strength; // range: [0 = off .. 100 = strongest] + int filter_sharpness; // range: [0 = off .. 7 = least sharp] + int filter_type; // filtering type: 0 = simple, 1 = strong (only used + // if filter_strength > 0 or autofilter > 0) + int autofilter; // Auto adjust filter's strength [0 = off, 1 = on] + int alpha_compression; // Algorithm for encoding the alpha plane (0 = none, + // 1 = compressed with WebP lossless). Default is 1. + int alpha_filtering; // Predictive filtering method for alpha plane. + // 0: none, 1: fast, 2: best. Default if 1. + int alpha_quality; // Between 0 (smallest size) and 100 (lossless). + // Default is 100. + int pass; // number of entropy-analysis passes (in [1..10]). + + int show_compressed; // if true, export the compressed picture back. + // In-loop filtering is not applied. + int preprocessing; // preprocessing filter: + // 0=none, 1=segment-smooth, 2=pseudo-random dithering + int partitions; // log2(number of token partitions) in [0..3]. Default + // is set to 0 for easier progressive decoding. + int partition_limit; // quality degradation allowed to fit the 512k limit + // on prediction modes coding (0: no degradation, + // 100: maximum possible degradation). + int emulate_jpeg_size; // If true, compression parameters will be remapped + // to better match the expected output size from + // JPEG compression. Generally, the output size will + // be similar but the degradation will be lower. + int thread_level; // If non-zero, try and use multi-threaded encoding. + int low_memory; // If set, reduce memory usage (but increase CPU use). + + int near_lossless; // Near lossless encoding [0 = max loss .. 100 = off + // (default)]. + int exact; // if non-zero, preserve the exact RGB values under + // transparent area. Otherwise, discard this invisible + // RGB information for better compression. The default + // value is 0. + +#ifdef WEBP_EXPERIMENTAL_FEATURES + int delta_palettization; + uint32_t pad[2]; // padding for later use +#else + uint32_t pad[3]; // padding for later use +#endif // WEBP_EXPERIMENTAL_FEATURES +}; + +// Enumerate some predefined settings for WebPConfig, depending on the type +// of source picture. These presets are used when calling WebPConfigPreset(). +typedef enum WebPPreset { + WEBP_PRESET_DEFAULT = 0, // default preset. + WEBP_PRESET_PICTURE, // digital picture, like portrait, inner shot + WEBP_PRESET_PHOTO, // outdoor photograph, with natural lighting + WEBP_PRESET_DRAWING, // hand or line drawing, with high-contrast details + WEBP_PRESET_ICON, // small-sized colorful images + WEBP_PRESET_TEXT // text-like +} WebPPreset; + +// Internal, version-checked, entry point +WEBP_EXTERN(int) WebPConfigInitInternal(WebPConfig*, WebPPreset, float, int); + +// Should always be called, to initialize a fresh WebPConfig structure before +// modification. Returns false in case of version mismatch. WebPConfigInit() +// must have succeeded before using the 'config' object. +// Note that the default values are lossless=0 and quality=75. +static WEBP_INLINE int WebPConfigInit(WebPConfig* config) { + return WebPConfigInitInternal(config, WEBP_PRESET_DEFAULT, 75.f, + WEBP_ENCODER_ABI_VERSION); +} + +// This function will initialize the configuration according to a predefined +// set of parameters (referred to by 'preset') and a given quality factor. +// This function can be called as a replacement to WebPConfigInit(). Will +// return false in case of error. +static WEBP_INLINE int WebPConfigPreset(WebPConfig* config, + WebPPreset preset, float quality) { + return WebPConfigInitInternal(config, preset, quality, + WEBP_ENCODER_ABI_VERSION); +} + +// Activate the lossless compression mode with the desired efficiency level +// between 0 (fastest, lowest compression) and 9 (slower, best compression). +// A good default level is '6', providing a fair tradeoff between compression +// speed and final compressed size. +// This function will overwrite several fields from config: 'method', 'quality' +// and 'lossless'. Returns false in case of parameter error. +WEBP_EXTERN(int) WebPConfigLosslessPreset(WebPConfig* config, int level); + +// Returns true if 'config' is non-NULL and all configuration parameters are +// within their valid ranges. +WEBP_EXTERN(int) WebPValidateConfig(const WebPConfig* config); + +//------------------------------------------------------------------------------ +// Input / Output +// Structure for storing auxiliary statistics (mostly for lossy encoding). + +struct WebPAuxStats { + int coded_size; // final size + + float PSNR[5]; // peak-signal-to-noise ratio for Y/U/V/All/Alpha + int block_count[3]; // number of intra4/intra16/skipped macroblocks + int header_bytes[2]; // approximate number of bytes spent for header + // and mode-partition #0 + int residual_bytes[3][4]; // approximate number of bytes spent for + // DC/AC/uv coefficients for each (0..3) segments. + int segment_size[4]; // number of macroblocks in each segments + int segment_quant[4]; // quantizer values for each segments + int segment_level[4]; // filtering strength for each segments [0..63] + + int alpha_data_size; // size of the transparency data + int layer_data_size; // size of the enhancement layer data + + // lossless encoder statistics + uint32_t lossless_features; // bit0:predictor bit1:cross-color transform + // bit2:subtract-green bit3:color indexing + int histogram_bits; // number of precision bits of histogram + int transform_bits; // precision bits for transform + int cache_bits; // number of bits for color cache lookup + int palette_size; // number of color in palette, if used + int lossless_size; // final lossless size + int lossless_hdr_size; // lossless header (transform, huffman etc) size + int lossless_data_size; // lossless image data size + + uint32_t pad[2]; // padding for later use +}; + +// Signature for output function. Should return true if writing was successful. +// data/data_size is the segment of data to write, and 'picture' is for +// reference (and so one can make use of picture->custom_ptr). +typedef int (*WebPWriterFunction)(const uint8_t* data, size_t data_size, + const WebPPicture* picture); + +// WebPMemoryWrite: a special WebPWriterFunction that writes to memory using +// the following WebPMemoryWriter object (to be set as a custom_ptr). +struct WebPMemoryWriter { + uint8_t* mem; // final buffer (of size 'max_size', larger than 'size'). + size_t size; // final size + size_t max_size; // total capacity + uint32_t pad[1]; // padding for later use +}; + +// The following must be called first before any use. +WEBP_EXTERN(void) WebPMemoryWriterInit(WebPMemoryWriter* writer); + +// The following must be called to deallocate writer->mem memory. The 'writer' +// object itself is not deallocated. +WEBP_EXTERN(void) WebPMemoryWriterClear(WebPMemoryWriter* writer); +// The custom writer to be used with WebPMemoryWriter as custom_ptr. Upon +// completion, writer.mem and writer.size will hold the coded data. +// writer.mem must be freed by calling WebPMemoryWriterClear. +WEBP_EXTERN(int) WebPMemoryWrite(const uint8_t* data, size_t data_size, + const WebPPicture* picture); + +// Progress hook, called from time to time to report progress. It can return +// false to request an abort of the encoding process, or true otherwise if +// everything is OK. +typedef int (*WebPProgressHook)(int percent, const WebPPicture* picture); + +// Color spaces. +typedef enum WebPEncCSP { + // chroma sampling + WEBP_YUV420 = 0, // 4:2:0 + WEBP_YUV420A = 4, // alpha channel variant + WEBP_CSP_UV_MASK = 3, // bit-mask to get the UV sampling factors + WEBP_CSP_ALPHA_BIT = 4 // bit that is set if alpha is present +} WebPEncCSP; + +// Encoding error conditions. +typedef enum WebPEncodingError { + VP8_ENC_OK = 0, + VP8_ENC_ERROR_OUT_OF_MEMORY, // memory error allocating objects + VP8_ENC_ERROR_BITSTREAM_OUT_OF_MEMORY, // memory error while flushing bits + VP8_ENC_ERROR_NULL_PARAMETER, // a pointer parameter is NULL + VP8_ENC_ERROR_INVALID_CONFIGURATION, // configuration is invalid + VP8_ENC_ERROR_BAD_DIMENSION, // picture has invalid width/height + VP8_ENC_ERROR_PARTITION0_OVERFLOW, // partition is bigger than 512k + VP8_ENC_ERROR_PARTITION_OVERFLOW, // partition is bigger than 16M + VP8_ENC_ERROR_BAD_WRITE, // error while flushing bytes + VP8_ENC_ERROR_FILE_TOO_BIG, // file is bigger than 4G + VP8_ENC_ERROR_USER_ABORT, // abort request by user + VP8_ENC_ERROR_LAST // list terminator. always last. +} WebPEncodingError; + +// maximum width/height allowed (inclusive), in pixels +#define WEBP_MAX_DIMENSION 16383 + +// Main exchange structure (input samples, output bytes, statistics) +struct WebPPicture { + // INPUT + ////////////// + // Main flag for encoder selecting between ARGB or YUV input. + // It is recommended to use ARGB input (*argb, argb_stride) for lossless + // compression, and YUV input (*y, *u, *v, etc.) for lossy compression + // since these are the respective native colorspace for these formats. + int use_argb; + + // YUV input (mostly used for input to lossy compression) + WebPEncCSP colorspace; // colorspace: should be YUV420 for now (=Y'CbCr). + int width, height; // dimensions (less or equal to WEBP_MAX_DIMENSION) + uint8_t *y, *u, *v; // pointers to luma/chroma planes. + int y_stride, uv_stride; // luma/chroma strides. + uint8_t* a; // pointer to the alpha plane + int a_stride; // stride of the alpha plane + uint32_t pad1[2]; // padding for later use + + // ARGB input (mostly used for input to lossless compression) + uint32_t* argb; // Pointer to argb (32 bit) plane. + int argb_stride; // This is stride in pixels units, not bytes. + uint32_t pad2[3]; // padding for later use + + // OUTPUT + /////////////// + // Byte-emission hook, to store compressed bytes as they are ready. + WebPWriterFunction writer; // can be NULL + void* custom_ptr; // can be used by the writer. + + // map for extra information (only for lossy compression mode) + int extra_info_type; // 1: intra type, 2: segment, 3: quant + // 4: intra-16 prediction mode, + // 5: chroma prediction mode, + // 6: bit cost, 7: distortion + uint8_t* extra_info; // if not NULL, points to an array of size + // ((width + 15) / 16) * ((height + 15) / 16) that + // will be filled with a macroblock map, depending + // on extra_info_type. + + // STATS AND REPORTS + /////////////////////////// + // Pointer to side statistics (updated only if not NULL) + WebPAuxStats* stats; + + // Error code for the latest error encountered during encoding + WebPEncodingError error_code; + + // If not NULL, report progress during encoding. + WebPProgressHook progress_hook; + + void* user_data; // this field is free to be set to any value and + // used during callbacks (like progress-report e.g.). + + uint32_t pad3[3]; // padding for later use + + // Unused for now + uint8_t *pad4, *pad5; + uint32_t pad6[8]; // padding for later use + + // PRIVATE FIELDS + //////////////////// + void* memory_; // row chunk of memory for yuva planes + void* memory_argb_; // and for argb too. + void* pad7[2]; // padding for later use +}; + +// Internal, version-checked, entry point +WEBP_EXTERN(int) WebPPictureInitInternal(WebPPicture*, int); + +// Should always be called, to initialize the structure. Returns false in case +// of version mismatch. WebPPictureInit() must have succeeded before using the +// 'picture' object. +// Note that, by default, use_argb is false and colorspace is WEBP_YUV420. +static WEBP_INLINE int WebPPictureInit(WebPPicture* picture) { + return WebPPictureInitInternal(picture, WEBP_ENCODER_ABI_VERSION); +} + +//------------------------------------------------------------------------------ +// WebPPicture utils + +// Convenience allocation / deallocation based on picture->width/height: +// Allocate y/u/v buffers as per colorspace/width/height specification. +// Note! This function will free the previous buffer if needed. +// Returns false in case of memory error. +WEBP_EXTERN(int) WebPPictureAlloc(WebPPicture* picture); + +// Release the memory allocated by WebPPictureAlloc() or WebPPictureImport*(). +// Note that this function does _not_ free the memory used by the 'picture' +// object itself. +// Besides memory (which is reclaimed) all other fields of 'picture' are +// preserved. +WEBP_EXTERN(void) WebPPictureFree(WebPPicture* picture); + +// Copy the pixels of *src into *dst, using WebPPictureAlloc. Upon return, *dst +// will fully own the copied pixels (this is not a view). The 'dst' picture need +// not be initialized as its content is overwritten. +// Returns false in case of memory allocation error. +WEBP_EXTERN(int) WebPPictureCopy(const WebPPicture* src, WebPPicture* dst); + +// Compute PSNR, SSIM or LSIM distortion metric between two pictures. Results +// are in dB, stored in result[] in the Y/U/V/Alpha/All or B/G/R/A/All order. +// Returns false in case of error (src and ref don't have same dimension, ...) +// Warning: this function is rather CPU-intensive. +WEBP_EXTERN(int) WebPPictureDistortion( + const WebPPicture* src, const WebPPicture* ref, + int metric_type, // 0 = PSNR, 1 = SSIM, 2 = LSIM + float result[5]); + +// self-crops a picture to the rectangle defined by top/left/width/height. +// Returns false in case of memory allocation error, or if the rectangle is +// outside of the source picture. +// The rectangle for the view is defined by the top-left corner pixel +// coordinates (left, top) as well as its width and height. This rectangle +// must be fully be comprised inside the 'src' source picture. If the source +// picture uses the YUV420 colorspace, the top and left coordinates will be +// snapped to even values. +WEBP_EXTERN(int) WebPPictureCrop(WebPPicture* picture, + int left, int top, int width, int height); + +// Extracts a view from 'src' picture into 'dst'. The rectangle for the view +// is defined by the top-left corner pixel coordinates (left, top) as well +// as its width and height. This rectangle must be fully be comprised inside +// the 'src' source picture. If the source picture uses the YUV420 colorspace, +// the top and left coordinates will be snapped to even values. +// Picture 'src' must out-live 'dst' picture. Self-extraction of view is allowed +// ('src' equal to 'dst') as a mean of fast-cropping (but note that doing so, +// the original dimension will be lost). Picture 'dst' need not be initialized +// with WebPPictureInit() if it is different from 'src', since its content will +// be overwritten. +// Returns false in case of memory allocation error or invalid parameters. +WEBP_EXTERN(int) WebPPictureView(const WebPPicture* src, + int left, int top, int width, int height, + WebPPicture* dst); + +// Returns true if the 'picture' is actually a view and therefore does +// not own the memory for pixels. +WEBP_EXTERN(int) WebPPictureIsView(const WebPPicture* picture); + +// Rescale a picture to new dimension width x height. +// If either 'width' or 'height' (but not both) is 0 the corresponding +// dimension will be calculated preserving the aspect ratio. +// No gamma correction is applied. +// Returns false in case of error (invalid parameter or insufficient memory). +WEBP_EXTERN(int) WebPPictureRescale(WebPPicture* pic, int width, int height); + +// Colorspace conversion function to import RGB samples. +// Previous buffer will be free'd, if any. +// *rgb buffer should have a size of at least height * rgb_stride. +// Returns false in case of memory error. +WEBP_EXTERN(int) WebPPictureImportRGB( + WebPPicture* picture, const uint8_t* rgb, int rgb_stride); +// Same, but for RGBA buffer. +WEBP_EXTERN(int) WebPPictureImportRGBA( + WebPPicture* picture, const uint8_t* rgba, int rgba_stride); +// Same, but for RGBA buffer. Imports the RGB direct from the 32-bit format +// input buffer ignoring the alpha channel. Avoids needing to copy the data +// to a temporary 24-bit RGB buffer to import the RGB only. +WEBP_EXTERN(int) WebPPictureImportRGBX( + WebPPicture* picture, const uint8_t* rgbx, int rgbx_stride); + +// Variants of the above, but taking BGR(A|X) input. +WEBP_EXTERN(int) WebPPictureImportBGR( + WebPPicture* picture, const uint8_t* bgr, int bgr_stride); +WEBP_EXTERN(int) WebPPictureImportBGRA( + WebPPicture* picture, const uint8_t* bgra, int bgra_stride); +WEBP_EXTERN(int) WebPPictureImportBGRX( + WebPPicture* picture, const uint8_t* bgrx, int bgrx_stride); + +// Converts picture->argb data to the YUV420A format. The 'colorspace' +// parameter is deprecated and should be equal to WEBP_YUV420. +// Upon return, picture->use_argb is set to false. The presence of real +// non-opaque transparent values is detected, and 'colorspace' will be +// adjusted accordingly. Note that this method is lossy. +// Returns false in case of error. +WEBP_EXTERN(int) WebPPictureARGBToYUVA(WebPPicture* picture, + WebPEncCSP /*colorspace = WEBP_YUV420*/); + +// Same as WebPPictureARGBToYUVA(), but the conversion is done using +// pseudo-random dithering with a strength 'dithering' between +// 0.0 (no dithering) and 1.0 (maximum dithering). This is useful +// for photographic picture. +WEBP_EXTERN(int) WebPPictureARGBToYUVADithered( + WebPPicture* picture, WebPEncCSP colorspace, float dithering); + +// Performs 'smart' RGBA->YUVA420 downsampling and colorspace conversion. +// Downsampling is handled with extra care in case of color clipping. This +// method is roughly 2x slower than WebPPictureARGBToYUVA() but produces better +// YUV representation. +// Returns false in case of error. +WEBP_EXTERN(int) WebPPictureSmartARGBToYUVA(WebPPicture* picture); + +// Converts picture->yuv to picture->argb and sets picture->use_argb to true. +// The input format must be YUV_420 or YUV_420A. +// Note that the use of this method is discouraged if one has access to the +// raw ARGB samples, since using YUV420 is comparatively lossy. Also, the +// conversion from YUV420 to ARGB incurs a small loss too. +// Returns false in case of error. +WEBP_EXTERN(int) WebPPictureYUVAToARGB(WebPPicture* picture); + +// Helper function: given a width x height plane of RGBA or YUV(A) samples +// clean-up the YUV or RGB samples under fully transparent area, to help +// compressibility (no guarantee, though). +WEBP_EXTERN(void) WebPCleanupTransparentArea(WebPPicture* picture); + +// Scan the picture 'picture' for the presence of non fully opaque alpha values. +// Returns true in such case. Otherwise returns false (indicating that the +// alpha plane can be ignored altogether e.g.). +WEBP_EXTERN(int) WebPPictureHasTransparency(const WebPPicture* picture); + +// Remove the transparency information (if present) by blending the color with +// the background color 'background_rgb' (specified as 24bit RGB triplet). +// After this call, all alpha values are reset to 0xff. +WEBP_EXTERN(void) WebPBlendAlpha(WebPPicture* pic, uint32_t background_rgb); + +//------------------------------------------------------------------------------ +// Main call + +// Main encoding call, after config and picture have been initialized. +// 'picture' must be less than 16384x16384 in dimension (cf WEBP_MAX_DIMENSION), +// and the 'config' object must be a valid one. +// Returns false in case of error, true otherwise. +// In case of error, picture->error_code is updated accordingly. +// 'picture' can hold the source samples in both YUV(A) or ARGB input, depending +// on the value of 'picture->use_argb'. It is highly recommended to use +// the former for lossy encoding, and the latter for lossless encoding +// (when config.lossless is true). Automatic conversion from one format to +// another is provided but they both incur some loss. +WEBP_EXTERN(int) WebPEncode(const WebPConfig* config, WebPPicture* picture); + +//------------------------------------------------------------------------------ + +#ifdef __cplusplus +} // extern "C" +#endif + +#endif /* WEBP_WEBP_ENCODE_H_ */ diff --git a/third-party/libwebp/include/webp/types.h b/third-party/libwebp/include/webp/types.h new file mode 100644 index 0000000000..98fff35a11 --- /dev/null +++ b/third-party/libwebp/include/webp/types.h @@ -0,0 +1,52 @@ +// Copyright 2010 Google Inc. All Rights Reserved. +// +// Use of this source code is governed by a BSD-style license +// that can be found in the COPYING file in the root of the source +// tree. An additional intellectual property rights grant can be found +// in the file PATENTS. All contributing project authors may +// be found in the AUTHORS file in the root of the source tree. +// ----------------------------------------------------------------------------- +// +// Common types +// +// Author: Skal (pascal.massimino@gmail.com) + +#ifndef WEBP_WEBP_TYPES_H_ +#define WEBP_WEBP_TYPES_H_ + +#include // for size_t + +#ifndef _MSC_VER +#include +#if defined(__cplusplus) || !defined(__STRICT_ANSI__) || \ + (defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L) +#define WEBP_INLINE inline +#else +#define WEBP_INLINE +#endif +#else +typedef signed char int8_t; +typedef unsigned char uint8_t; +typedef signed short int16_t; +typedef unsigned short uint16_t; +typedef signed int int32_t; +typedef unsigned int uint32_t; +typedef unsigned long long int uint64_t; +typedef long long int int64_t; +#define WEBP_INLINE __forceinline +#endif /* _MSC_VER */ + +#ifndef WEBP_EXTERN +// This explicitly marks library functions and allows for changing the +// signature for e.g., Windows DLL builds. +# if defined(__GNUC__) && __GNUC__ >= 4 +# define WEBP_EXTERN(type) extern __attribute__ ((visibility ("default"))) type +# else +# define WEBP_EXTERN(type) extern type +# endif /* __GNUC__ >= 4 */ +#endif /* WEBP_EXTERN */ + +// Macro to check ABI compatibility (same major revision number) +#define WEBP_ABI_IS_INCOMPATIBLE(a, b) (((a) >> 8) != ((b) >> 8)) + +#endif /* WEBP_WEBP_TYPES_H_ */ diff --git a/third-party/libwebp/lib/libwebp.a b/third-party/libwebp/lib/libwebp.a new file mode 100644 index 0000000000..f88dde2571 Binary files /dev/null and b/third-party/libwebp/lib/libwebp.a differ