mirror of
https://github.com/Swiftgram/Telegram-iOS.git
synced 2025-06-16 05:55:20 +00:00
Merge branch 'master' of gitlab.com:peter-iakovlev/telegram-ios
This commit is contained in:
commit
dec40ef830
@ -796,6 +796,10 @@ public extension CALayer {
|
||||
static func luminanceToAlpha() -> NSObject? {
|
||||
return makeLuminanceToAlphaFilter()
|
||||
}
|
||||
|
||||
static func colorInvert() -> NSObject? {
|
||||
return makeColorInvertFilter()
|
||||
}
|
||||
}
|
||||
|
||||
public extension CALayer {
|
||||
|
@ -24,10 +24,18 @@ typedef struct FFMpegStreamMetrics {
|
||||
int32_t extradataSize;
|
||||
} FFMpegStreamMetrics;
|
||||
|
||||
typedef struct FFMpegAVIndexEntry {
|
||||
int64_t pos;
|
||||
int64_t timestamp;
|
||||
bool isKeyframe;
|
||||
int32_t size;
|
||||
} FFMpegAVIndexEntry;
|
||||
|
||||
extern int FFMpegCodecIdH264;
|
||||
extern int FFMpegCodecIdHEVC;
|
||||
extern int FFMpegCodecIdMPEG4;
|
||||
extern int FFMpegCodecIdVP9;
|
||||
extern int FFMpegCodecIdVP8;
|
||||
extern int FFMpegCodecIdAV1;
|
||||
|
||||
@class FFMpegAVCodecContext;
|
||||
@ -40,6 +48,7 @@ extern int FFMpegCodecIdAV1;
|
||||
- (bool)openInputWithDirectFilePath:(NSString * _Nullable)directFilePath;
|
||||
- (bool)findStreamInfo;
|
||||
- (void)seekFrameForStreamIndex:(int32_t)streamIndex pts:(int64_t)pts positionOnKeyframe:(bool)positionOnKeyframe;
|
||||
- (void)seekFrameForStreamIndex:(int32_t)streamIndex byteOffset:(int64_t)byteOffset;
|
||||
- (bool)readFrameIntoPacket:(FFMpegPacket *)packet;
|
||||
- (NSArray<NSNumber *> *)streamIndicesForType:(FFMpegAVFormatStreamType)type;
|
||||
- (bool)isAttachedPicAtStreamIndex:(int32_t)streamIndex;
|
||||
@ -47,6 +56,8 @@ extern int FFMpegCodecIdAV1;
|
||||
- (double)duration;
|
||||
- (int64_t)startTimeAtStreamIndex:(int32_t)streamIndex;
|
||||
- (int64_t)durationAtStreamIndex:(int32_t)streamIndex;
|
||||
- (int)numberOfIndexEntriesAtStreamIndex:(int32_t)streamIndex;
|
||||
- (bool)fillIndexEntryAtStreamIndex:(int32_t)streamIndex entryIndex:(int32_t)entryIndex outEntry:(FFMpegAVIndexEntry * _Nonnull)outEntry;
|
||||
- (bool)codecParamsAtStreamIndex:(int32_t)streamIndex toContext:(FFMpegAVCodecContext *)context;
|
||||
- (FFMpegFpsAndTimebase)fpsAndTimebaseForStreamIndex:(int32_t)streamIndex defaultTimeBase:(CMTime)defaultTimeBase;
|
||||
- (FFMpegStreamMetrics)metricsForStreamAtIndex:(int32_t)streamIndex;
|
||||
|
@ -12,6 +12,7 @@ NS_ASSUME_NONNULL_BEGIN
|
||||
@property (nonatomic, readonly) int32_t streamIndex;
|
||||
@property (nonatomic, readonly) int32_t size;
|
||||
@property (nonatomic, readonly) uint8_t *data;
|
||||
@property (nonatomic, readonly) bool isKeyframe;
|
||||
|
||||
- (void *)impl;
|
||||
- (int32_t)sendToDecoder:(FFMpegAVCodecContext *)codecContext;
|
||||
|
@ -11,6 +11,7 @@ int FFMpegCodecIdH264 = AV_CODEC_ID_H264;
|
||||
int FFMpegCodecIdHEVC = AV_CODEC_ID_HEVC;
|
||||
int FFMpegCodecIdMPEG4 = AV_CODEC_ID_MPEG4;
|
||||
int FFMpegCodecIdVP9 = AV_CODEC_ID_VP9;
|
||||
int FFMpegCodecIdVP8 = AV_CODEC_ID_VP8;
|
||||
int FFMpegCodecIdAV1 = AV_CODEC_ID_AV1;
|
||||
|
||||
@interface FFMpegAVFormatContext () {
|
||||
@ -70,6 +71,11 @@ int FFMpegCodecIdAV1 = AV_CODEC_ID_AV1;
|
||||
av_seek_frame(_impl, streamIndex, pts, options);
|
||||
}
|
||||
|
||||
- (void)seekFrameForStreamIndex:(int32_t)streamIndex byteOffset:(int64_t)byteOffset {
|
||||
int options = AVSEEK_FLAG_BYTE;
|
||||
av_seek_frame(_impl, streamIndex, byteOffset, options);
|
||||
}
|
||||
|
||||
- (bool)readFrameIntoPacket:(FFMpegPacket *)packet {
|
||||
int result = av_read_frame(_impl, (AVPacket *)[packet impl]);
|
||||
return result >= 0;
|
||||
@ -117,6 +123,28 @@ int FFMpegCodecIdAV1 = AV_CODEC_ID_AV1;
|
||||
return _impl->streams[streamIndex]->duration;
|
||||
}
|
||||
|
||||
- (int)numberOfIndexEntriesAtStreamIndex:(int32_t)streamIndex {
|
||||
return avformat_index_get_entries_count(_impl->streams[streamIndex]);
|
||||
}
|
||||
|
||||
- (bool)fillIndexEntryAtStreamIndex:(int32_t)streamIndex entryIndex:(int32_t)entryIndex outEntry:(FFMpegAVIndexEntry * _Nonnull)outEntry {
|
||||
const AVIndexEntry *entry = avformat_index_get_entry(_impl->streams[streamIndex], entryIndex);
|
||||
if (!entry) {
|
||||
outEntry->pos = -1;
|
||||
outEntry->timestamp = 0;
|
||||
outEntry->isKeyframe = false;
|
||||
outEntry->size = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
outEntry->pos = entry->pos;
|
||||
outEntry->timestamp = entry->timestamp;
|
||||
outEntry->isKeyframe = (entry->flags & AVINDEX_KEYFRAME) != 0;
|
||||
outEntry->size = entry->size;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
- (bool)codecParamsAtStreamIndex:(int32_t)streamIndex toContext:(FFMpegAVCodecContext *)context {
|
||||
int result = avcodec_parameters_to_context((AVCodecContext *)[context impl], _impl->streams[streamIndex]->codecpar);
|
||||
return result >= 0;
|
||||
|
@ -53,6 +53,10 @@
|
||||
return (int32_t)_impl->size;
|
||||
}
|
||||
|
||||
- (bool)isKeyframe {
|
||||
return (_impl->flags & AV_PKT_FLAG_KEY) != 0;
|
||||
}
|
||||
|
||||
- (uint8_t *)data {
|
||||
return _impl->data;
|
||||
}
|
||||
|
@ -10,7 +10,7 @@ public func convertOpusToAAC(sourcePath: String, allocateTempFile: @escaping ()
|
||||
|
||||
queue.async {
|
||||
do {
|
||||
let audioSource = SoftwareAudioSource(path: sourcePath)
|
||||
let audioSource = SoftwareAudioSource(path: sourcePath, focusedPart: nil)
|
||||
|
||||
let outputPath = allocateTempFile()
|
||||
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,486 @@
|
||||
import Foundation
|
||||
import UIKit
|
||||
import SwiftSignalKit
|
||||
import Postbox
|
||||
import TelegramCore
|
||||
import FFMpegBinding
|
||||
import RangeSet
|
||||
|
||||
private final class FFMpegMediaFrameExtractContext {
|
||||
let fd: Int32
|
||||
var readPosition: Int = 0
|
||||
let size: Int
|
||||
|
||||
var accessedRanges = RangeSet<Int>()
|
||||
var maskRanges: RangeSet<Int>?
|
||||
var recordAccessedRanges = false
|
||||
|
||||
init(fd: Int32, size: Int) {
|
||||
self.fd = fd
|
||||
self.size = size
|
||||
}
|
||||
}
|
||||
|
||||
private func FFMpegMediaFrameExtractContextReadPacketCallback(userData: UnsafeMutableRawPointer?, buffer: UnsafeMutablePointer<UInt8>?, bufferSize: Int32) -> Int32 {
|
||||
let context = Unmanaged<FFMpegMediaFrameExtractContext>.fromOpaque(userData!).takeUnretainedValue()
|
||||
if context.recordAccessedRanges {
|
||||
context.accessedRanges.insert(contentsOf: context.readPosition ..< (context.readPosition + Int(bufferSize)))
|
||||
}
|
||||
|
||||
let result: Int
|
||||
if let maskRanges = context.maskRanges {
|
||||
let readRange = context.readPosition ..< (context.readPosition + Int(bufferSize))
|
||||
let _ = maskRanges
|
||||
let _ = readRange
|
||||
result = read(context.fd, buffer, Int(bufferSize))
|
||||
} else {
|
||||
result = read(context.fd, buffer, Int(bufferSize))
|
||||
}
|
||||
context.readPosition += Int(bufferSize)
|
||||
if result == 0 {
|
||||
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||
}
|
||||
return Int32(result)
|
||||
}
|
||||
|
||||
private func FFMpegMediaFrameExtractContextSeekCallback(userData: UnsafeMutableRawPointer?, offset: Int64, whence: Int32) -> Int64 {
|
||||
let context = Unmanaged<FFMpegMediaFrameExtractContext>.fromOpaque(userData!).takeUnretainedValue()
|
||||
if (whence & FFMPEG_AVSEEK_SIZE) != 0 {
|
||||
return Int64(context.size)
|
||||
} else {
|
||||
context.readPosition = Int(offset)
|
||||
lseek(context.fd, off_t(offset), SEEK_SET)
|
||||
return offset
|
||||
}
|
||||
}
|
||||
|
||||
private struct FFMpegFrameSegment {
|
||||
struct Stream {
|
||||
let index: Int
|
||||
let startPts: CMTime
|
||||
let startPosition: Int64
|
||||
var endPts: CMTime
|
||||
var endPosition: Int64
|
||||
var duration: Double
|
||||
}
|
||||
|
||||
var audio: Stream?
|
||||
var video: Stream?
|
||||
|
||||
init() {
|
||||
}
|
||||
|
||||
mutating func addFrame(isVideo: Bool, index: Int, pts: CMTime, duration: Double, position: Int64, size: Int64) {
|
||||
if var stream = isVideo ? self.video : self.audio {
|
||||
stream.endPts = pts
|
||||
stream.duration += duration
|
||||
stream.endPosition = max(stream.endPosition, position + size)
|
||||
if isVideo {
|
||||
self.video = stream
|
||||
} else {
|
||||
self.audio = stream
|
||||
}
|
||||
} else {
|
||||
let stream = Stream(index: index, startPts: pts, startPosition: position, endPts: pts, endPosition: position + size, duration: duration)
|
||||
if isVideo {
|
||||
self.video = stream
|
||||
} else {
|
||||
self.audio = stream
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private final class FFMpegFrameSegmentInfo {
|
||||
let headerAccessRanges: RangeSet<Int>
|
||||
let segments: [FFMpegFrameSegment]
|
||||
|
||||
init(headerAccessRanges: RangeSet<Int>, segments: [FFMpegFrameSegment]) {
|
||||
self.headerAccessRanges = headerAccessRanges
|
||||
self.segments = segments
|
||||
}
|
||||
}
|
||||
|
||||
private func extractFFMpegFrameSegmentInfo(path: String) -> FFMpegFrameSegmentInfo? {
|
||||
let _ = FFMpegMediaFrameSourceContextHelpers.registerFFMpegGlobals
|
||||
|
||||
var s = stat()
|
||||
stat(path, &s)
|
||||
let size = Int32(s.st_size)
|
||||
|
||||
let fd = open(path, O_RDONLY, S_IRUSR)
|
||||
if fd < 0 {
|
||||
return nil
|
||||
}
|
||||
defer {
|
||||
close(fd)
|
||||
}
|
||||
|
||||
let avFormatContext = FFMpegAVFormatContext()
|
||||
let ioBufferSize = 32 * 1024
|
||||
|
||||
let context = FFMpegMediaFrameExtractContext(fd: fd, size: Int(size))
|
||||
context.recordAccessedRanges = true
|
||||
|
||||
guard let avIoContext = FFMpegAVIOContext(bufferSize: Int32(ioBufferSize), opaqueContext: Unmanaged.passUnretained(context).toOpaque(), readPacket: FFMpegMediaFrameExtractContextReadPacketCallback, writePacket: nil, seek: FFMpegMediaFrameExtractContextSeekCallback, isSeekable: true) else {
|
||||
return nil
|
||||
}
|
||||
|
||||
avFormatContext.setIO(avIoContext)
|
||||
|
||||
if !avFormatContext.openInput(withDirectFilePath: nil) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !avFormatContext.findStreamInfo() {
|
||||
return nil
|
||||
}
|
||||
|
||||
var audioStream: FFMpegMediaInfo.Info?
|
||||
var videoStream: FFMpegMediaInfo.Info?
|
||||
|
||||
for typeIndex in 0 ..< 2 {
|
||||
let isVideo = typeIndex == 0
|
||||
|
||||
for streamIndexNumber in avFormatContext.streamIndices(for: isVideo ? FFMpegAVFormatStreamTypeVideo : FFMpegAVFormatStreamTypeAudio) {
|
||||
let streamIndex = streamIndexNumber.int32Value
|
||||
if avFormatContext.isAttachedPic(atStreamIndex: streamIndex) {
|
||||
continue
|
||||
}
|
||||
|
||||
let fpsAndTimebase = avFormatContext.fpsAndTimebase(forStreamIndex: streamIndex, defaultTimeBase: CMTimeMake(value: 1, timescale: 40000))
|
||||
let (fps, timebase) = (fpsAndTimebase.fps, fpsAndTimebase.timebase)
|
||||
|
||||
let startTime: CMTime
|
||||
let rawStartTime = avFormatContext.startTime(atStreamIndex: streamIndex)
|
||||
if rawStartTime == Int64(bitPattern: 0x8000000000000000 as UInt64) {
|
||||
startTime = CMTime(value: 0, timescale: timebase.timescale)
|
||||
} else {
|
||||
startTime = CMTimeMake(value: rawStartTime, timescale: timebase.timescale)
|
||||
}
|
||||
var duration = CMTimeMake(value: avFormatContext.duration(atStreamIndex: streamIndex), timescale: timebase.timescale)
|
||||
duration = CMTimeMaximum(CMTime(value: 0, timescale: duration.timescale), CMTimeSubtract(duration, startTime))
|
||||
|
||||
var codecName: String?
|
||||
let codecId = avFormatContext.codecId(atStreamIndex: streamIndex)
|
||||
if codecId == FFMpegCodecIdMPEG4 {
|
||||
codecName = "mpeg4"
|
||||
} else if codecId == FFMpegCodecIdH264 {
|
||||
codecName = "h264"
|
||||
} else if codecId == FFMpegCodecIdHEVC {
|
||||
codecName = "hevc"
|
||||
} else if codecId == FFMpegCodecIdAV1 {
|
||||
codecName = "av1"
|
||||
} else if codecId == FFMpegCodecIdVP9 {
|
||||
codecName = "vp9"
|
||||
} else if codecId == FFMpegCodecIdVP8 {
|
||||
codecName = "vp8"
|
||||
}
|
||||
|
||||
let info = FFMpegMediaInfo.Info(
|
||||
index: Int(streamIndex),
|
||||
timescale: timebase.timescale,
|
||||
startTime: startTime,
|
||||
duration: duration,
|
||||
fps: fps,
|
||||
codecName: codecName
|
||||
)
|
||||
|
||||
if isVideo {
|
||||
videoStream = info
|
||||
} else {
|
||||
audioStream = info
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var segments: [FFMpegFrameSegment] = []
|
||||
let maxSegmentDuration: Double = 5.0
|
||||
|
||||
if let videoStream {
|
||||
let indexEntryCount = avFormatContext.numberOfIndexEntries(atStreamIndex: Int32(videoStream.index))
|
||||
|
||||
if indexEntryCount > 0 {
|
||||
let frameDuration = 1.0 / videoStream.fps.seconds
|
||||
|
||||
var indexEntry = FFMpegAVIndexEntry()
|
||||
for i in 0 ..< indexEntryCount {
|
||||
if !avFormatContext.fillIndexEntry(atStreamIndex: Int32(videoStream.index), entryIndex: Int32(i), outEntry: &indexEntry) {
|
||||
continue
|
||||
}
|
||||
|
||||
let packetPts = CMTime(value: indexEntry.timestamp, timescale: videoStream.timescale)
|
||||
//print("index: \(packetPts.seconds), isKeyframe: \(indexEntry.isKeyframe), position: \(indexEntry.pos), size: \(indexEntry.size)")
|
||||
|
||||
var startNewSegment = segments.isEmpty
|
||||
if indexEntry.isKeyframe {
|
||||
if segments.isEmpty {
|
||||
startNewSegment = true
|
||||
} else if let video = segments[segments.count - 1].video {
|
||||
if packetPts.seconds - video.startPts.seconds > maxSegmentDuration {
|
||||
startNewSegment = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if startNewSegment {
|
||||
segments.append(FFMpegFrameSegment())
|
||||
}
|
||||
segments[segments.count - 1].addFrame(isVideo: true, index: videoStream.index, pts: packetPts, duration: frameDuration, position: indexEntry.pos, size: Int64(indexEntry.size))
|
||||
}
|
||||
if !segments.isEmpty, let video = segments[segments.count - 1].video {
|
||||
if video.endPts.seconds + 1.0 / videoStream.fps.seconds + 0.001 < videoStream.duration.seconds {
|
||||
segments[segments.count - 1].video?.duration = videoStream.duration.seconds - video.startPts.seconds
|
||||
segments[segments.count - 1].video?.endPts = videoStream.duration
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if let audioStream {
|
||||
let indexEntryCount = avFormatContext.numberOfIndexEntries(atStreamIndex: Int32(audioStream.index))
|
||||
if indexEntryCount > 0 {
|
||||
var minSegmentIndex = 0
|
||||
var minSegmentStartTime: Double = -100000.0
|
||||
|
||||
let frameDuration = 1.0 / audioStream.fps.seconds
|
||||
|
||||
var indexEntry = FFMpegAVIndexEntry()
|
||||
for i in 0 ..< indexEntryCount {
|
||||
if !avFormatContext.fillIndexEntry(atStreamIndex: Int32(audioStream.index), entryIndex: Int32(i), outEntry: &indexEntry) {
|
||||
continue
|
||||
}
|
||||
|
||||
let packetPts = CMTime(value: indexEntry.timestamp, timescale: audioStream.timescale)
|
||||
//print("index: \(packetPts.value), timestamp: \(packetPts.seconds), isKeyframe: \(indexEntry.isKeyframe), position: \(indexEntry.pos), size: \(indexEntry.size)")
|
||||
|
||||
if videoStream != nil {
|
||||
for i in minSegmentIndex ..< segments.count {
|
||||
if let video = segments[i].video {
|
||||
if minSegmentStartTime <= packetPts.seconds && video.endPts.seconds >= packetPts.seconds {
|
||||
segments[i].addFrame(isVideo: false, index: audioStream.index, pts: packetPts, duration: frameDuration, position: indexEntry.pos, size: Int64(indexEntry.size))
|
||||
if minSegmentIndex != i {
|
||||
minSegmentIndex = i
|
||||
minSegmentStartTime = video.startPts.seconds
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if segments.isEmpty {
|
||||
segments.append(FFMpegFrameSegment())
|
||||
}
|
||||
segments[segments.count - 1].addFrame(isVideo: false, index: audioStream.index, pts: packetPts, duration: frameDuration, position: indexEntry.pos, size: Int64(indexEntry.size))
|
||||
}
|
||||
}
|
||||
}
|
||||
if !segments.isEmpty, let audio = segments[segments.count - 1].audio {
|
||||
if audio.endPts.seconds + 0.001 < audioStream.duration.seconds {
|
||||
segments[segments.count - 1].audio?.duration = audioStream.duration.seconds - audio.startPts.seconds
|
||||
segments[segments.count - 1].audio?.endPts = audioStream.duration
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let headerAccessRanges = context.accessedRanges
|
||||
|
||||
for i in 1 ..< segments.count {
|
||||
let segment = segments[i]
|
||||
|
||||
if let video = segment.video {
|
||||
context.maskRanges = headerAccessRanges
|
||||
context.maskRanges?.insert(contentsOf: Int(video.startPosition) ..< Int(video.endPosition))
|
||||
|
||||
context.accessedRanges = RangeSet()
|
||||
context.recordAccessedRanges = true
|
||||
|
||||
avFormatContext.seekFrame(forStreamIndex: Int32(video.index), byteOffset: video.startPosition)
|
||||
|
||||
let packet = FFMpegPacket()
|
||||
while true {
|
||||
if !avFormatContext.readFrame(into: packet) {
|
||||
break
|
||||
}
|
||||
|
||||
if Int(packet.streamIndex) == video.index {
|
||||
let packetPts = CMTime(value: packet.pts, timescale: video.startPts.timescale)
|
||||
if packetPts.value >= video.endPts.value {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
print("Segment \(i): \(video.startPosition) ..< \(video.endPosition) accessed \(context.accessedRanges.ranges)")
|
||||
}
|
||||
}
|
||||
|
||||
/*{
|
||||
if let videoStream {
|
||||
avFormatContext.seekFrame(forStreamIndex: Int32(videoStream.index), pts: 0, positionOnKeyframe: true)
|
||||
|
||||
let packet = FFMpegPacket()
|
||||
while true {
|
||||
if !avFormatContext.readFrame(into: packet) {
|
||||
break
|
||||
}
|
||||
|
||||
if Int(packet.streamIndex) == videoStream.index {
|
||||
let packetPts = CMTime(value: packet.pts, timescale: videoStream.timescale)
|
||||
let packetDuration = CMTime(value: packet.duration, timescale: videoStream.timescale)
|
||||
|
||||
var startNewSegment = segments.isEmpty
|
||||
if packet.isKeyframe {
|
||||
if segments.isEmpty {
|
||||
startNewSegment = true
|
||||
} else if let video = segments[segments.count - 1].video {
|
||||
if packetPts.seconds - video.startPts.seconds > maxSegmentDuration {
|
||||
startNewSegment = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if startNewSegment {
|
||||
segments.append(FFMpegFrameSegment())
|
||||
}
|
||||
segments[segments.count - 1].addFrame(isVideo: true, index: Int(packet.streamIndex), pts: packetPts, duration: packetDuration.seconds)
|
||||
}
|
||||
}
|
||||
}
|
||||
if let audioStream {
|
||||
avFormatContext.seekFrame(forStreamIndex: Int32(audioStream.index), pts: 0, positionOnKeyframe: true)
|
||||
|
||||
var minSegmentIndex = 0
|
||||
|
||||
let packet = FFMpegPacket()
|
||||
while true {
|
||||
if !avFormatContext.readFrame(into: packet) {
|
||||
break
|
||||
}
|
||||
|
||||
if Int(packet.streamIndex) == audioStream.index {
|
||||
let packetPts = CMTime(value: packet.pts, timescale: audioStream.timescale)
|
||||
let packetDuration = CMTime(value: packet.duration, timescale: audioStream.timescale)
|
||||
|
||||
if videoStream != nil {
|
||||
for i in minSegmentIndex ..< segments.count {
|
||||
if let video = segments[i].video {
|
||||
if video.startPts.seconds <= packetPts.seconds && video.endPts.seconds >= packetPts.seconds {
|
||||
segments[i].addFrame(isVideo: false, index: Int(audioStream.index), pts: packetPts, duration: packetDuration.seconds)
|
||||
minSegmentIndex = i
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if segments.isEmpty {
|
||||
segments.append(FFMpegFrameSegment())
|
||||
}
|
||||
segments[segments.count - 1].addFrame(isVideo: false, index: Int(packet.streamIndex), pts: packetPts, duration: packetDuration.seconds)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}*/
|
||||
|
||||
/*for i in 0 ..< segments.count {
|
||||
print("Segment \(i):\n video \(segments[i].video?.startPts.seconds ?? -1.0) ... \(segments[i].video?.endPts.seconds ?? -1.0)\n audio \(segments[i].audio?.startPts.seconds ?? -1.0) ... \(segments[i].audio?.endPts.seconds ?? -1.0)")
|
||||
}*/
|
||||
|
||||
return FFMpegFrameSegmentInfo(
|
||||
headerAccessRanges: context.accessedRanges,
|
||||
segments: segments
|
||||
)
|
||||
}
|
||||
|
||||
final class ChunkMediaPlayerDirectFetchSourceImpl: ChunkMediaPlayerSourceImpl {
|
||||
private let resource: ChunkMediaPlayerV2.SourceDescription.ResourceDescription
|
||||
|
||||
private let partsStateValue = Promise<ChunkMediaPlayerPartsState>()
|
||||
var partsState: Signal<ChunkMediaPlayerPartsState, NoError> {
|
||||
return self.partsStateValue.get()
|
||||
}
|
||||
|
||||
private var completeFetchDisposable: Disposable?
|
||||
private var dataDisposable: Disposable?
|
||||
|
||||
init(resource: ChunkMediaPlayerV2.SourceDescription.ResourceDescription) {
|
||||
self.resource = resource
|
||||
|
||||
if resource.fetchAutomatically {
|
||||
self.completeFetchDisposable = fetchedMediaResource(
|
||||
mediaBox: resource.postbox.mediaBox,
|
||||
userLocation: resource.userLocation,
|
||||
userContentType: resource.userContentType,
|
||||
reference: resource.reference,
|
||||
statsCategory: resource.statsCategory,
|
||||
preferBackgroundReferenceRevalidation: true
|
||||
).startStrict()
|
||||
}
|
||||
|
||||
self.dataDisposable = (resource.postbox.mediaBox.resourceData(resource.reference.resource)
|
||||
|> deliverOnMainQueue).startStrict(next: { [weak self] data in
|
||||
guard let self else {
|
||||
return
|
||||
}
|
||||
if data.complete {
|
||||
if let mediaInfo = extractFFMpegMediaInfo(path: data.path), let mainTrack = mediaInfo.audio ?? mediaInfo.video, let segmentInfo = extractFFMpegFrameSegmentInfo(path: data.path) {
|
||||
var parts: [ChunkMediaPlayerPart] = []
|
||||
for segment in segmentInfo.segments {
|
||||
guard let mainStream = segment.video ?? segment.audio else {
|
||||
assertionFailure()
|
||||
continue
|
||||
}
|
||||
parts.append(ChunkMediaPlayerPart(
|
||||
startTime: mainStream.startPts.seconds,
|
||||
endTime: mainStream.startPts.seconds + mainStream.duration,
|
||||
content: .directFile(ChunkMediaPlayerPart.Content.FFMpegDirectFile(
|
||||
path: data.path,
|
||||
audio: segment.audio.flatMap { stream in
|
||||
return ChunkMediaPlayerPart.DirectStream(
|
||||
index: stream.index,
|
||||
startPts: stream.startPts,
|
||||
endPts: stream.endPts,
|
||||
duration: stream.duration
|
||||
)
|
||||
},
|
||||
video: segment.video.flatMap { stream in
|
||||
return ChunkMediaPlayerPart.DirectStream(
|
||||
index: stream.index,
|
||||
startPts: stream.startPts,
|
||||
endPts: stream.endPts,
|
||||
duration: stream.duration
|
||||
)
|
||||
}
|
||||
)),
|
||||
codecName: mediaInfo.video?.codecName
|
||||
))
|
||||
}
|
||||
|
||||
self.partsStateValue.set(.single(ChunkMediaPlayerPartsState(
|
||||
duration: mainTrack.duration.seconds,
|
||||
parts: parts
|
||||
)))
|
||||
} else {
|
||||
self.partsStateValue.set(.single(ChunkMediaPlayerPartsState(
|
||||
duration: nil,
|
||||
parts: []
|
||||
)))
|
||||
}
|
||||
} else {
|
||||
self.partsStateValue.set(.single(ChunkMediaPlayerPartsState(
|
||||
duration: nil,
|
||||
parts: []
|
||||
)))
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
deinit {
|
||||
self.completeFetchDisposable?.dispose()
|
||||
self.dataDisposable?.dispose()
|
||||
}
|
||||
|
||||
func updatePlaybackState(position: Double, isPlaying: Bool) {
|
||||
|
||||
}
|
||||
}
|
@ -11,11 +11,51 @@ public let internal_isHardwareAv1Supported: Bool = {
|
||||
return value
|
||||
}()
|
||||
|
||||
protocol ChunkMediaPlayerSourceImpl: AnyObject {
|
||||
var partsState: Signal<ChunkMediaPlayerPartsState, NoError> { get }
|
||||
|
||||
func updatePlaybackState(position: Double, isPlaying: Bool)
|
||||
}
|
||||
|
||||
private final class ChunkMediaPlayerExternalSourceImpl: ChunkMediaPlayerSourceImpl {
|
||||
let partsState: Signal<ChunkMediaPlayerPartsState, NoError>
|
||||
|
||||
init(partsState: Signal<ChunkMediaPlayerPartsState, NoError>) {
|
||||
self.partsState = partsState
|
||||
}
|
||||
|
||||
func updatePlaybackState(position: Double, isPlaying: Bool) {
|
||||
}
|
||||
}
|
||||
|
||||
public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
public enum SourceDescription {
|
||||
public final class ResourceDescription {
|
||||
public let postbox: Postbox
|
||||
public let reference: MediaResourceReference
|
||||
public let userLocation: MediaResourceUserLocation
|
||||
public let userContentType: MediaResourceUserContentType
|
||||
public let statsCategory: MediaResourceStatsCategory
|
||||
public let fetchAutomatically: Bool
|
||||
|
||||
public init(postbox: Postbox, reference: MediaResourceReference, userLocation: MediaResourceUserLocation, userContentType: MediaResourceUserContentType, statsCategory: MediaResourceStatsCategory, fetchAutomatically: Bool) {
|
||||
self.postbox = postbox
|
||||
self.reference = reference
|
||||
self.userLocation = userLocation
|
||||
self.userContentType = userContentType
|
||||
self.statsCategory = statsCategory
|
||||
self.fetchAutomatically = fetchAutomatically
|
||||
}
|
||||
}
|
||||
|
||||
case externalParts(Signal<ChunkMediaPlayerPartsState, NoError>)
|
||||
case directFetch(ResourceDescription)
|
||||
}
|
||||
|
||||
private final class LoadedPart {
|
||||
final class Media {
|
||||
let queue: Queue
|
||||
let tempFile: TempBoxFile
|
||||
let content: ChunkMediaPlayerPart.Content
|
||||
let mediaType: AVMediaType
|
||||
let codecName: String?
|
||||
|
||||
@ -24,11 +64,11 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
var didBeginReading: Bool = false
|
||||
var isFinished: Bool = false
|
||||
|
||||
init(queue: Queue, tempFile: TempBoxFile, mediaType: AVMediaType, codecName: String?) {
|
||||
init(queue: Queue, content: ChunkMediaPlayerPart.Content, mediaType: AVMediaType, codecName: String?) {
|
||||
assert(queue.isCurrent())
|
||||
|
||||
self.queue = queue
|
||||
self.tempFile = tempFile
|
||||
self.content = content
|
||||
self.mediaType = mediaType
|
||||
self.codecName = codecName
|
||||
}
|
||||
@ -39,10 +79,10 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
|
||||
func load() {
|
||||
let reader: MediaDataReader
|
||||
if self.mediaType == .video && (self.codecName == "av1" || self.codecName == "av01") && internal_isHardwareAv1Supported {
|
||||
reader = AVAssetVideoDataReader(filePath: self.tempFile.path, isVideo: self.mediaType == .video)
|
||||
if case let .tempFile(tempFile) = self.content, self.mediaType == .video, (self.codecName == "av1" || self.codecName == "av01"), internal_isHardwareAv1Supported {
|
||||
reader = AVAssetVideoDataReader(filePath: tempFile.file.path, isVideo: self.mediaType == .video)
|
||||
} else {
|
||||
reader = FFMpegMediaDataReader(filePath: self.tempFile.path, isVideo: self.mediaType == .video, codecName: self.codecName)
|
||||
reader = FFMpegMediaDataReader(content: self.content, isVideo: self.mediaType == .video, codecName: self.codecName)
|
||||
}
|
||||
if self.mediaType == .video {
|
||||
if reader.hasVideo {
|
||||
@ -91,12 +131,10 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
private let renderSynchronizer: AVSampleBufferRenderSynchronizer
|
||||
private var videoRenderer: AVSampleBufferDisplayLayer
|
||||
private var audioRenderer: AVSampleBufferAudioRenderer?
|
||||
private weak var videoNode: MediaPlayerNode?
|
||||
|
||||
private var partsState = ChunkMediaPlayerPartsState(duration: nil, parts: [])
|
||||
private var loadedParts: [LoadedPart] = []
|
||||
private var loadedPartsMediaData: QueueLocalObject<LoadedPartsMediaData>
|
||||
private var reportedDidEnqueueVideo: Bool = false
|
||||
private var hasSound: Bool = false
|
||||
|
||||
private var statusValue: MediaPlayerStatus? {
|
||||
@ -115,7 +153,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
return .never()
|
||||
}
|
||||
|
||||
public var actionAtEnd: ChunkMediaPlayerActionAtEnd = .stop
|
||||
public var actionAtEnd: MediaPlayerActionAtEnd = .stop
|
||||
|
||||
private var isPlaying: Bool = false
|
||||
private var baseRate: Double = 1.0
|
||||
@ -132,6 +170,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
private var videoIsRequestingMediaData: Bool = false
|
||||
private var audioIsRequestingMediaData: Bool = false
|
||||
|
||||
private let source: ChunkMediaPlayerSourceImpl
|
||||
private var partsStateDisposable: Disposable?
|
||||
private var updateTimer: Foundation.Timer?
|
||||
|
||||
@ -140,7 +179,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
|
||||
public init(
|
||||
audioSessionManager: ManagedAudioSession,
|
||||
partsState: Signal<ChunkMediaPlayerPartsState, NoError>,
|
||||
source: SourceDescription,
|
||||
video: Bool,
|
||||
playAutomatically: Bool = false,
|
||||
enableSound: Bool,
|
||||
@ -175,7 +214,13 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
assertionFailure()
|
||||
}
|
||||
self.videoRenderer = playerNode.videoLayer ?? AVSampleBufferDisplayLayer()
|
||||
self.videoNode = playerNode
|
||||
|
||||
switch source {
|
||||
case let .externalParts(partsState):
|
||||
self.source = ChunkMediaPlayerExternalSourceImpl(partsState: partsState)
|
||||
case let .directFetch(resource):
|
||||
self.source = ChunkMediaPlayerDirectFetchSourceImpl(resource: resource)
|
||||
}
|
||||
|
||||
self.updateTimer = Foundation.Timer.scheduledTimer(withTimeInterval: 1.0 / 60.0, repeats: true, block: { [weak self] _ in
|
||||
guard let self else {
|
||||
@ -184,7 +229,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
self.updateInternalState()
|
||||
})
|
||||
|
||||
self.partsStateDisposable = (partsState
|
||||
self.partsStateDisposable = (self.source.partsState
|
||||
|> deliverOnMainQueue).startStrict(next: { [weak self] partsState in
|
||||
guard let self else {
|
||||
return
|
||||
@ -291,6 +336,11 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
}
|
||||
let timestampSeconds = timestamp.seconds
|
||||
|
||||
self.source.updatePlaybackState(
|
||||
position: timestampSeconds,
|
||||
isPlaying: self.isPlaying
|
||||
)
|
||||
|
||||
var duration: Double = 0.0
|
||||
if let partsStateDuration = self.partsState.duration {
|
||||
duration = partsStateDuration
|
||||
@ -318,7 +368,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
startTime: part.startTime,
|
||||
clippedStartTime: partStartTime == part.startTime ? nil : partStartTime,
|
||||
endTime: part.endTime,
|
||||
file: part.file,
|
||||
content: part.content,
|
||||
codecName: part.codecName
|
||||
))
|
||||
minStartTime = max(minStartTime, partEndTime)
|
||||
@ -340,7 +390,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
startTime: part.startTime,
|
||||
clippedStartTime: partStartTime == part.startTime ? nil : partStartTime,
|
||||
endTime: part.endTime,
|
||||
file: part.file,
|
||||
content: part.content,
|
||||
codecName: part.codecName
|
||||
))
|
||||
minStartTime = max(minStartTime, partEndTime)
|
||||
@ -385,7 +435,12 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
for part in loadedParts {
|
||||
if let loadedPart = loadedPartsMediaData.parts[part.part.id] {
|
||||
if let audio = loadedPart.audio, audio.didBeginReading, !isSoundEnabled {
|
||||
let cleanAudio = LoadedPart.Media(queue: dataQueue, tempFile: part.part.file, mediaType: .audio, codecName: part.part.codecName)
|
||||
let cleanAudio = LoadedPart.Media(
|
||||
queue: dataQueue,
|
||||
content: part.part.content,
|
||||
mediaType: .audio,
|
||||
codecName: part.part.codecName
|
||||
)
|
||||
cleanAudio.load()
|
||||
|
||||
loadedPartsMediaData.parts[part.part.id] = LoadedPart.MediaData(
|
||||
@ -395,10 +450,20 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
)
|
||||
}
|
||||
} else {
|
||||
let video = LoadedPart.Media(queue: dataQueue, tempFile: part.part.file, mediaType: .video, codecName: part.part.codecName)
|
||||
let video = LoadedPart.Media(
|
||||
queue: dataQueue,
|
||||
content: part.part.content,
|
||||
mediaType: .video,
|
||||
codecName: part.part.codecName
|
||||
)
|
||||
video.load()
|
||||
|
||||
let audio = LoadedPart.Media(queue: dataQueue, tempFile: part.part.file, mediaType: .audio, codecName: part.part.codecName)
|
||||
let audio = LoadedPart.Media(
|
||||
queue: dataQueue,
|
||||
content: part.part.content,
|
||||
mediaType: .audio,
|
||||
codecName: part.part.codecName
|
||||
)
|
||||
audio.load()
|
||||
|
||||
loadedPartsMediaData.parts[part.part.id] = LoadedPart.MediaData(
|
||||
@ -680,8 +745,8 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
|
||||
videoTarget.requestMediaDataWhenReady(on: self.dataQueue.queue, using: { [weak self] in
|
||||
if let loadedPartsMediaData = loadedPartsMediaData.unsafeGet() {
|
||||
let fillResult = ChunkMediaPlayerV2.fillRendererBuffer(bufferTarget: videoTarget, loadedPartsMediaData: loadedPartsMediaData, isVideo: true)
|
||||
if fillResult.isReadyForMoreData {
|
||||
let bufferIsReadyForMoreData = ChunkMediaPlayerV2.fillRendererBuffer(bufferTarget: videoTarget, loadedPartsMediaData: loadedPartsMediaData, isVideo: true)
|
||||
if bufferIsReadyForMoreData {
|
||||
videoTarget.stopRequestingMediaData()
|
||||
Queue.mainQueue().async {
|
||||
guard let self else {
|
||||
@ -698,12 +763,11 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
if !self.audioIsRequestingMediaData, let audioRenderer = self.audioRenderer {
|
||||
self.audioIsRequestingMediaData = true
|
||||
let loadedPartsMediaData = self.loadedPartsMediaData
|
||||
let reportedDidEnqueueVideo = self.reportedDidEnqueueVideo
|
||||
let audioTarget = audioRenderer
|
||||
audioTarget.requestMediaDataWhenReady(on: self.dataQueue.queue, using: { [weak self] in
|
||||
if let loadedPartsMediaData = loadedPartsMediaData.unsafeGet() {
|
||||
let fillResult = ChunkMediaPlayerV2.fillRendererBuffer(bufferTarget: audioTarget, loadedPartsMediaData: loadedPartsMediaData, isVideo: false)
|
||||
if fillResult.isReadyForMoreData {
|
||||
let bufferIsReadyForMoreData = ChunkMediaPlayerV2.fillRendererBuffer(bufferTarget: audioTarget, loadedPartsMediaData: loadedPartsMediaData, isVideo: false)
|
||||
if bufferIsReadyForMoreData {
|
||||
audioTarget.stopRequestingMediaData()
|
||||
Queue.mainQueue().async {
|
||||
guard let self else {
|
||||
@ -713,28 +777,13 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
self.updateInternalState()
|
||||
}
|
||||
}
|
||||
if fillResult.didEnqueue && !reportedDidEnqueueVideo {
|
||||
Queue.mainQueue().async {
|
||||
guard let self else {
|
||||
return
|
||||
}
|
||||
self.reportedDidEnqueueVideo = true
|
||||
if #available(iOS 17.4, *) {
|
||||
} else {
|
||||
if let videoNode = self.videoNode {
|
||||
videoNode.notifyHasSentFramesToDisplay()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
private static func fillRendererBuffer(bufferTarget: AVQueuedSampleBufferRendering, loadedPartsMediaData: LoadedPartsMediaData, isVideo: Bool) -> (isReadyForMoreData: Bool, didEnqueue: Bool) {
|
||||
private static func fillRendererBuffer(bufferTarget: AVQueuedSampleBufferRendering, loadedPartsMediaData: LoadedPartsMediaData, isVideo: Bool) -> Bool {
|
||||
var bufferIsReadyForMoreData = true
|
||||
var didEnqeue = false
|
||||
outer: while true {
|
||||
if !bufferTarget.isReadyForMoreMediaData {
|
||||
bufferIsReadyForMoreData = false
|
||||
@ -774,7 +823,9 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
continue outer
|
||||
}
|
||||
}
|
||||
didEnqeue = true
|
||||
/*if !isVideo {
|
||||
print("Enqueue audio \(CMSampleBufferGetPresentationTimeStamp(sampleBuffer).value) next: \(CMSampleBufferGetPresentationTimeStamp(sampleBuffer).value + 1024)")
|
||||
}*/
|
||||
bufferTarget.enqueue(sampleBuffer)
|
||||
hasData = true
|
||||
continue outer
|
||||
@ -787,7 +838,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||
}
|
||||
}
|
||||
|
||||
return (bufferIsReadyForMoreData, didEnqeue)
|
||||
return bufferIsReadyForMoreData
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -184,6 +184,10 @@ private func readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: Unsa
|
||||
}
|
||||
fetchedCount = Int32(fetchedData.count)
|
||||
context.readingOffset += Int64(fetchedCount)
|
||||
|
||||
if fetchedCount == 0 {
|
||||
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||
}
|
||||
}
|
||||
|
||||
if context.closed {
|
||||
|
@ -19,6 +19,7 @@ public protocol MediaDataReader: AnyObject {
|
||||
}
|
||||
|
||||
public final class FFMpegMediaDataReader: MediaDataReader {
|
||||
private let content: ChunkMediaPlayerPart.Content
|
||||
private let isVideo: Bool
|
||||
private let videoSource: SoftwareVideoReader?
|
||||
private let audioSource: SoftwareAudioSource?
|
||||
@ -31,15 +32,42 @@ public final class FFMpegMediaDataReader: MediaDataReader {
|
||||
return self.audioSource != nil
|
||||
}
|
||||
|
||||
public init(filePath: String, isVideo: Bool, codecName: String?) {
|
||||
public init(content: ChunkMediaPlayerPart.Content, isVideo: Bool, codecName: String?) {
|
||||
self.content = content
|
||||
self.isVideo = isVideo
|
||||
|
||||
let filePath: String
|
||||
var focusedPart: MediaStreamFocusedPart?
|
||||
switch content {
|
||||
case let .tempFile(tempFile):
|
||||
filePath = tempFile.file.path
|
||||
case let .directFile(directFile):
|
||||
filePath = directFile.path
|
||||
|
||||
let stream = isVideo ? directFile.video : directFile.audio
|
||||
guard let stream else {
|
||||
self.videoSource = nil
|
||||
self.audioSource = nil
|
||||
return
|
||||
}
|
||||
|
||||
focusedPart = MediaStreamFocusedPart(
|
||||
seekStreamIndex: stream.index,
|
||||
startPts: stream.startPts,
|
||||
endPts: stream.endPts
|
||||
)
|
||||
}
|
||||
|
||||
if self.isVideo {
|
||||
var passthroughDecoder = true
|
||||
if (codecName == "av1" || codecName == "av01") && !internal_isHardwareAv1Supported {
|
||||
passthroughDecoder = false
|
||||
}
|
||||
let videoSource = SoftwareVideoReader(path: filePath, hintVP9: false, passthroughDecoder: passthroughDecoder)
|
||||
if codecName == "vp9" || codecName == "vp8" {
|
||||
passthroughDecoder = false
|
||||
}
|
||||
|
||||
let videoSource = SoftwareVideoReader(path: filePath, hintVP9: false, passthroughDecoder: passthroughDecoder, focusedPart: focusedPart)
|
||||
if videoSource.hasStream {
|
||||
self.videoSource = videoSource
|
||||
} else {
|
||||
@ -47,7 +75,7 @@ public final class FFMpegMediaDataReader: MediaDataReader {
|
||||
}
|
||||
self.audioSource = nil
|
||||
} else {
|
||||
let audioSource = SoftwareAudioSource(path: filePath)
|
||||
let audioSource = SoftwareAudioSource(path: filePath, focusedPart: focusedPart)
|
||||
if audioSource.hasStream {
|
||||
self.audioSource = audioSource
|
||||
} else {
|
||||
|
@ -444,11 +444,4 @@ public final class MediaPlayerNode: ASDisplayNode {
|
||||
}
|
||||
self.updateVideoInHierarchy?(self.videoInHierarchy || self.canPlaybackWithoutHierarchy)
|
||||
}
|
||||
|
||||
func notifyHasSentFramesToDisplay() {
|
||||
if !self.didNotifyVideoLayerReadyForDisplay {
|
||||
self.didNotifyVideoLayerReadyForDisplay = true
|
||||
self.hasSentFramesToDisplay?()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -9,7 +9,7 @@ import CoreMedia
|
||||
import SwiftSignalKit
|
||||
import FFMpegBinding
|
||||
|
||||
private func readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: UnsafeMutablePointer<UInt8>?, bufferSize: Int32) -> Int32 {
|
||||
private func SoftwareVideoSource_readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: UnsafeMutablePointer<UInt8>?, bufferSize: Int32) -> Int32 {
|
||||
let context = Unmanaged<SoftwareVideoSource>.fromOpaque(userData!).takeUnretainedValue()
|
||||
if let fd = context.fd {
|
||||
let result = read(fd, buffer, Int(bufferSize))
|
||||
@ -21,7 +21,7 @@ private func readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: Unsa
|
||||
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||
}
|
||||
|
||||
private func seekCallback(userData: UnsafeMutableRawPointer?, offset: Int64, whence: Int32) -> Int64 {
|
||||
private func SoftwareVideoSource_seekCallback(userData: UnsafeMutableRawPointer?, offset: Int64, whence: Int32) -> Int64 {
|
||||
let context = Unmanaged<SoftwareVideoSource>.fromOpaque(userData!).takeUnretainedValue()
|
||||
if let fd = context.fd {
|
||||
if (whence & FFMPEG_AVSEEK_SIZE) != 0 {
|
||||
@ -102,7 +102,7 @@ public final class SoftwareVideoSource {
|
||||
}
|
||||
let ioBufferSize = 64 * 1024
|
||||
|
||||
let avIoContext = FFMpegAVIOContext(bufferSize: Int32(ioBufferSize), opaqueContext: Unmanaged.passUnretained(self).toOpaque(), readPacket: readPacketCallback, writePacket: nil, seek: seekCallback, isSeekable: true)
|
||||
let avIoContext = FFMpegAVIOContext(bufferSize: Int32(ioBufferSize), opaqueContext: Unmanaged.passUnretained(self).toOpaque(), readPacket: SoftwareVideoSource_readPacketCallback, writePacket: nil, seek: SoftwareVideoSource_seekCallback, isSeekable: true)
|
||||
self.avIoContext = avIoContext
|
||||
|
||||
avFormatContext.setIO(self.avIoContext!)
|
||||
@ -356,7 +356,33 @@ private final class SoftwareAudioStream {
|
||||
}
|
||||
}
|
||||
|
||||
private func SoftwareAudioSource_readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: UnsafeMutablePointer<UInt8>?, bufferSize: Int32) -> Int32 {
|
||||
let context = Unmanaged<SoftwareAudioSource>.fromOpaque(userData!).takeUnretainedValue()
|
||||
if let fd = context.fd {
|
||||
let result = read(fd, buffer, Int(bufferSize))
|
||||
if result == 0 {
|
||||
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||
}
|
||||
return Int32(result)
|
||||
}
|
||||
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||
}
|
||||
|
||||
private func SoftwareAudioSource_seekCallback(userData: UnsafeMutableRawPointer?, offset: Int64, whence: Int32) -> Int64 {
|
||||
let context = Unmanaged<SoftwareAudioSource>.fromOpaque(userData!).takeUnretainedValue()
|
||||
if let fd = context.fd {
|
||||
if (whence & FFMPEG_AVSEEK_SIZE) != 0 {
|
||||
return Int64(context.size)
|
||||
} else {
|
||||
lseek(fd, off_t(offset), SEEK_SET)
|
||||
return offset
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
public final class SoftwareAudioSource {
|
||||
private let focusedPart: MediaStreamFocusedPart?
|
||||
private var readingError = false
|
||||
private var audioStream: SoftwareAudioStream?
|
||||
private var avIoContext: FFMpegAVIOContext?
|
||||
@ -371,9 +397,11 @@ public final class SoftwareAudioSource {
|
||||
return self.audioStream != nil
|
||||
}
|
||||
|
||||
public init(path: String) {
|
||||
public init(path: String, focusedPart: MediaStreamFocusedPart?) {
|
||||
let _ = FFMpegMediaFrameSourceContextHelpers.registerFFMpegGlobals
|
||||
|
||||
self.focusedPart = focusedPart
|
||||
|
||||
var s = stat()
|
||||
stat(path, &s)
|
||||
self.size = Int32(s.st_size)
|
||||
@ -391,7 +419,7 @@ public final class SoftwareAudioSource {
|
||||
|
||||
let ioBufferSize = 64 * 1024
|
||||
|
||||
let avIoContext = FFMpegAVIOContext(bufferSize: Int32(ioBufferSize), opaqueContext: Unmanaged.passUnretained(self).toOpaque(), readPacket: readPacketCallback, writePacket: nil, seek: seekCallback, isSeekable: true)
|
||||
let avIoContext = FFMpegAVIOContext(bufferSize: Int32(ioBufferSize), opaqueContext: Unmanaged.passUnretained(self).toOpaque(), readPacket: SoftwareAudioSource_readPacketCallback, writePacket: nil, seek: SoftwareAudioSource_seekCallback, isSeekable: true)
|
||||
self.avIoContext = avIoContext
|
||||
|
||||
avFormatContext.setIO(self.avIoContext!)
|
||||
@ -438,8 +466,12 @@ public final class SoftwareAudioSource {
|
||||
|
||||
self.audioStream = audioStream
|
||||
|
||||
if let audioStream = self.audioStream {
|
||||
avFormatContext.seekFrame(forStreamIndex: Int32(audioStream.index), pts: 0, positionOnKeyframe: false)
|
||||
if let focusedPart = self.focusedPart {
|
||||
avFormatContext.seekFrame(forStreamIndex: Int32(focusedPart.seekStreamIndex), pts: focusedPart.startPts.value, positionOnKeyframe: true)
|
||||
} else {
|
||||
if let audioStream = self.audioStream {
|
||||
avFormatContext.seekFrame(forStreamIndex: Int32(audioStream.index), pts: 0, positionOnKeyframe: false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -462,15 +494,18 @@ public final class SoftwareAudioSource {
|
||||
}
|
||||
}
|
||||
|
||||
func readDecodableFrame() -> (MediaTrackDecodableFrame?, Bool) {
|
||||
func readDecodableFrame() -> MediaTrackDecodableFrame? {
|
||||
var frames: [MediaTrackDecodableFrame] = []
|
||||
var endOfStream = false
|
||||
|
||||
while !self.readingError && frames.isEmpty {
|
||||
while !self.readingError && !self.hasReadToEnd && frames.isEmpty {
|
||||
if let packet = self.readPacketInternal() {
|
||||
if let audioStream = audioStream, Int(packet.streamIndex) == audioStream.index {
|
||||
if let audioStream = self.audioStream, Int(packet.streamIndex) == audioStream.index {
|
||||
let packetPts = packet.pts
|
||||
|
||||
if let focusedPart = self.focusedPart, packetPts >= focusedPart.endPts.value {
|
||||
self.hasReadToEnd = true
|
||||
}
|
||||
|
||||
let pts = CMTimeMake(value: packetPts, timescale: audioStream.timebase.timescale)
|
||||
let dts = CMTimeMake(value: packet.dts, timescale: audioStream.timebase.timescale)
|
||||
|
||||
@ -487,21 +522,11 @@ public final class SoftwareAudioSource {
|
||||
frames.append(frame)
|
||||
}
|
||||
} else {
|
||||
if endOfStream {
|
||||
break
|
||||
} else {
|
||||
if let _ = self.avFormatContext, let _ = self.audioStream {
|
||||
endOfStream = true
|
||||
break
|
||||
} else {
|
||||
endOfStream = true
|
||||
break
|
||||
}
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return (frames.first, endOfStream)
|
||||
return frames.first
|
||||
}
|
||||
|
||||
public func readFrame() -> Data? {
|
||||
@ -509,8 +534,7 @@ public final class SoftwareAudioSource {
|
||||
return nil
|
||||
}
|
||||
|
||||
let (decodableFrame, _) = self.readDecodableFrame()
|
||||
if let decodableFrame = decodableFrame {
|
||||
if let decodableFrame = self.readDecodableFrame() {
|
||||
return audioStream.decoder.decodeRaw(frame: decodableFrame)
|
||||
} else {
|
||||
return nil
|
||||
@ -523,8 +547,7 @@ public final class SoftwareAudioSource {
|
||||
}
|
||||
|
||||
while true {
|
||||
let (decodableFrame, _) = self.readDecodableFrame()
|
||||
if let decodableFrame = decodableFrame {
|
||||
if let decodableFrame = self.readDecodableFrame() {
|
||||
if audioStream.decoder.send(frame: decodableFrame) {
|
||||
if let result = audioStream.decoder.decode() {
|
||||
return result.sampleBuffer
|
||||
@ -541,8 +564,7 @@ public final class SoftwareAudioSource {
|
||||
return nil
|
||||
}
|
||||
|
||||
let (decodableFrame, _) = self.readDecodableFrame()
|
||||
if let decodableFrame = decodableFrame {
|
||||
if let decodableFrame = self.readDecodableFrame() {
|
||||
return (decodableFrame.copyPacketData(), Int(decodableFrame.packet.duration))
|
||||
} else {
|
||||
return nil
|
||||
@ -557,7 +579,45 @@ public final class SoftwareAudioSource {
|
||||
}
|
||||
}
|
||||
|
||||
public struct MediaStreamFocusedPart {
|
||||
public let seekStreamIndex: Int
|
||||
public let startPts: CMTime
|
||||
public let endPts: CMTime
|
||||
|
||||
public init(seekStreamIndex: Int, startPts: CMTime, endPts: CMTime) {
|
||||
self.seekStreamIndex = seekStreamIndex
|
||||
self.startPts = startPts
|
||||
self.endPts = endPts
|
||||
}
|
||||
}
|
||||
|
||||
private func SoftwareVideoReader_readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: UnsafeMutablePointer<UInt8>?, bufferSize: Int32) -> Int32 {
|
||||
let context = Unmanaged<SoftwareVideoReader>.fromOpaque(userData!).takeUnretainedValue()
|
||||
if let fd = context.fd {
|
||||
let result = read(fd, buffer, Int(bufferSize))
|
||||
if result == 0 {
|
||||
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||
}
|
||||
return Int32(result)
|
||||
}
|
||||
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||
}
|
||||
|
||||
private func SoftwareVideoReader_seekCallback(userData: UnsafeMutableRawPointer?, offset: Int64, whence: Int32) -> Int64 {
|
||||
let context = Unmanaged<SoftwareVideoReader>.fromOpaque(userData!).takeUnretainedValue()
|
||||
if let fd = context.fd {
|
||||
if (whence & FFMPEG_AVSEEK_SIZE) != 0 {
|
||||
return Int64(context.size)
|
||||
} else {
|
||||
lseek(fd, off_t(offset), SEEK_SET)
|
||||
return offset
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
final class SoftwareVideoReader {
|
||||
private let focusedPart: MediaStreamFocusedPart?
|
||||
private var readingError = false
|
||||
private var videoStream: SoftwareVideoStream?
|
||||
private var avIoContext: FFMpegAVIOContext?
|
||||
@ -576,9 +636,11 @@ final class SoftwareVideoReader {
|
||||
return self.videoStream != nil
|
||||
}
|
||||
|
||||
public init(path: String, hintVP9: Bool, passthroughDecoder: Bool = false) {
|
||||
public init(path: String, hintVP9: Bool, passthroughDecoder: Bool = false, focusedPart: MediaStreamFocusedPart?) {
|
||||
let _ = FFMpegMediaFrameSourceContextHelpers.registerFFMpegGlobals
|
||||
|
||||
self.focusedPart = focusedPart
|
||||
|
||||
var s = stat()
|
||||
stat(path, &s)
|
||||
self.size = Int32(s.st_size)
|
||||
@ -598,7 +660,7 @@ final class SoftwareVideoReader {
|
||||
}
|
||||
let ioBufferSize = 64 * 1024
|
||||
|
||||
let avIoContext = FFMpegAVIOContext(bufferSize: Int32(ioBufferSize), opaqueContext: Unmanaged.passUnretained(self).toOpaque(), readPacket: readPacketCallback, writePacket: nil, seek: seekCallback, isSeekable: true)
|
||||
let avIoContext = FFMpegAVIOContext(bufferSize: Int32(ioBufferSize), opaqueContext: Unmanaged.passUnretained(self).toOpaque(), readPacket: SoftwareVideoReader_readPacketCallback, writePacket: nil, seek: SoftwareVideoReader_seekCallback, isSeekable: true)
|
||||
self.avIoContext = avIoContext
|
||||
|
||||
avFormatContext.setIO(self.avIoContext!)
|
||||
@ -675,8 +737,12 @@ final class SoftwareVideoReader {
|
||||
|
||||
self.videoStream = videoStream
|
||||
|
||||
if let videoStream = self.videoStream {
|
||||
avFormatContext.seekFrame(forStreamIndex: Int32(videoStream.index), pts: 0, positionOnKeyframe: true)
|
||||
if let focusedPart = self.focusedPart {
|
||||
avFormatContext.seekFrame(forStreamIndex: Int32(focusedPart.seekStreamIndex), pts: focusedPart.startPts.value, positionOnKeyframe: true)
|
||||
} else {
|
||||
if let videoStream = self.videoStream {
|
||||
avFormatContext.seekFrame(forStreamIndex: Int32(videoStream.index), pts: 0, positionOnKeyframe: true)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -709,6 +775,10 @@ final class SoftwareVideoReader {
|
||||
if let videoStream = self.videoStream, Int(packet.streamIndex) == videoStream.index {
|
||||
let packetPts = packet.pts
|
||||
|
||||
if let focusedPart = self.focusedPart, packetPts >= focusedPart.endPts.value {
|
||||
self.hasReadToEnd = true
|
||||
}
|
||||
|
||||
let pts = CMTimeMake(value: packetPts, timescale: videoStream.timebase.timescale)
|
||||
let dts = CMTimeMake(value: packet.dts, timescale: videoStream.timebase.timescale)
|
||||
|
||||
@ -784,8 +854,11 @@ final class SoftwareVideoReader {
|
||||
|
||||
public final class FFMpegMediaInfo {
|
||||
public struct Info {
|
||||
public let index: Int
|
||||
public let timescale: CMTimeScale
|
||||
public let startTime: CMTime
|
||||
public let duration: CMTime
|
||||
public let fps: CMTime
|
||||
public let codecName: String?
|
||||
}
|
||||
|
||||
@ -863,7 +936,7 @@ public func extractFFMpegMediaInfo(path: String) -> FFMpegMediaInfo? {
|
||||
|
||||
var streamInfos: [(isVideo: Bool, info: FFMpegMediaInfo.Info)] = []
|
||||
|
||||
for typeIndex in 0 ..< 1 {
|
||||
for typeIndex in 0 ..< 2 {
|
||||
let isVideo = typeIndex == 0
|
||||
|
||||
for streamIndexNumber in avFormatContext.streamIndices(for: isVideo ? FFMpegAVFormatStreamTypeVideo : FFMpegAVFormatStreamTypeAudio) {
|
||||
@ -873,7 +946,7 @@ public func extractFFMpegMediaInfo(path: String) -> FFMpegMediaInfo? {
|
||||
}
|
||||
|
||||
let fpsAndTimebase = avFormatContext.fpsAndTimebase(forStreamIndex: streamIndex, defaultTimeBase: CMTimeMake(value: 1, timescale: 40000))
|
||||
let (_, timebase) = (fpsAndTimebase.fps, fpsAndTimebase.timebase)
|
||||
let (fps, timebase) = (fpsAndTimebase.fps, fpsAndTimebase.timebase)
|
||||
|
||||
let startTime: CMTime
|
||||
let rawStartTime = avFormatContext.startTime(atStreamIndex: streamIndex)
|
||||
@ -895,9 +968,20 @@ public func extractFFMpegMediaInfo(path: String) -> FFMpegMediaInfo? {
|
||||
codecName = "hevc"
|
||||
} else if codecId == FFMpegCodecIdAV1 {
|
||||
codecName = "av1"
|
||||
} else if codecId == FFMpegCodecIdVP9 {
|
||||
codecName = "vp9"
|
||||
} else if codecId == FFMpegCodecIdVP8 {
|
||||
codecName = "vp8"
|
||||
}
|
||||
|
||||
streamInfos.append((isVideo: isVideo, info: FFMpegMediaInfo.Info(startTime: startTime, duration: duration, codecName: codecName)))
|
||||
streamInfos.append((isVideo: isVideo, info: FFMpegMediaInfo.Info(
|
||||
index: Int(streamIndex),
|
||||
timescale: timebase.timescale,
|
||||
startTime: startTime,
|
||||
duration: duration,
|
||||
fps: fps,
|
||||
codecName: codecName
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -70,6 +70,9 @@ private func readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: Unsa
|
||||
}
|
||||
let fetchedCount = Int32(fetchedData.count)
|
||||
context.readingOffset += Int64(fetchedCount)
|
||||
if fetchedCount == 0 {
|
||||
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||
}
|
||||
return fetchedCount
|
||||
} else {
|
||||
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||
|
@ -40,7 +40,8 @@ final class ReactionContextBackgroundNode: ASDisplayNode {
|
||||
private let smallCircleSize: CGFloat
|
||||
|
||||
private let backgroundView: BlurredBackgroundView
|
||||
private(set) var vibrancyEffectView: UIVisualEffectView?
|
||||
private let backgroundTintView: UIView
|
||||
let backgroundTintMaskContainer: UIView
|
||||
let vibrantExpandedContentContainer: UIView
|
||||
|
||||
private let maskLayer: SimpleLayer
|
||||
@ -58,7 +59,10 @@ final class ReactionContextBackgroundNode: ASDisplayNode {
|
||||
self.largeCircleSize = largeCircleSize
|
||||
self.smallCircleSize = smallCircleSize
|
||||
|
||||
self.backgroundView = BlurredBackgroundView(color: .clear, enableBlur: true)
|
||||
self.backgroundView = BlurredBackgroundView(color: nil, enableBlur: true)
|
||||
|
||||
self.backgroundTintView = UIView()
|
||||
self.backgroundTintMaskContainer = UIView()
|
||||
|
||||
self.maskLayer = SimpleLayer()
|
||||
self.backgroundClippingLayer = SimpleLayer()
|
||||
@ -86,6 +90,7 @@ final class ReactionContextBackgroundNode: ASDisplayNode {
|
||||
}
|
||||
|
||||
self.vibrantExpandedContentContainer = UIView()
|
||||
self.backgroundTintMaskContainer.addSubview(self.vibrantExpandedContentContainer)
|
||||
|
||||
super.init()
|
||||
|
||||
@ -97,6 +102,10 @@ final class ReactionContextBackgroundNode: ASDisplayNode {
|
||||
self.largeCircleShadowLayer.opacity = 0.0
|
||||
self.smallCircleShadowLayer.opacity = 0.0
|
||||
|
||||
self.backgroundView.addSubview(self.backgroundTintView)
|
||||
|
||||
self.backgroundTintMaskContainer.backgroundColor = .white
|
||||
|
||||
self.view.addSubview(self.backgroundView)
|
||||
|
||||
self.maskLayer.addSublayer(self.smallCircleLayer)
|
||||
@ -132,31 +141,24 @@ final class ReactionContextBackgroundNode: ASDisplayNode {
|
||||
if self.theme !== theme {
|
||||
self.theme = theme
|
||||
|
||||
if theme.overallDarkAppearance && !forceDark {
|
||||
if let vibrancyEffectView = self.vibrancyEffectView {
|
||||
self.vibrancyEffectView = nil
|
||||
vibrancyEffectView.removeFromSuperview()
|
||||
if theme.overallDarkAppearance {
|
||||
if let invertFilter = CALayer.colorInvert(), let filter = CALayer.luminanceToAlpha() {
|
||||
self.backgroundTintMaskContainer.layer.filters = [invertFilter, filter]
|
||||
}
|
||||
self.backgroundTintView.mask = self.backgroundTintMaskContainer
|
||||
|
||||
self.backgroundView.updateColor(color: theme.contextMenu.backgroundColor, forceKeepBlur: true, transition: .immediate)
|
||||
self.backgroundTintView.backgroundColor = UIColor(white: 1.0, alpha: 0.5)
|
||||
} else {
|
||||
if self.vibrancyEffectView == nil {
|
||||
let style: UIBlurEffect.Style
|
||||
if forceDark {
|
||||
style = .dark
|
||||
} else {
|
||||
style = .extraLight
|
||||
}
|
||||
let blurEffect = UIBlurEffect(style: style)
|
||||
let vibrancyEffect = UIVibrancyEffect(blurEffect: blurEffect)
|
||||
let vibrancyEffectView = UIVisualEffectView(effect: vibrancyEffect)
|
||||
self.vibrancyEffectView = vibrancyEffectView
|
||||
vibrancyEffectView.contentView.addSubview(self.vibrantExpandedContentContainer)
|
||||
self.backgroundView.addSubview(vibrancyEffectView)
|
||||
if let filter = CALayer.luminanceToAlpha() {
|
||||
self.backgroundTintMaskContainer.layer.filters = [filter]
|
||||
}
|
||||
self.backgroundTintView.mask = self.backgroundTintMaskContainer
|
||||
|
||||
self.backgroundView.updateColor(color: .clear, forceKeepBlur: true, transition: .immediate)
|
||||
self.backgroundTintView.backgroundColor = theme.contextMenu.backgroundColor
|
||||
}
|
||||
|
||||
self.backgroundView.updateColor(color: theme.contextMenu.backgroundColor, transition: .immediate)
|
||||
//self.backgroundView.updateColor(color: UIColor(white: 1.0, alpha: 0.0), forceKeepBlur: true, transition: .immediate)
|
||||
|
||||
let shadowColor = UIColor(white: 0.0, alpha: 0.4)
|
||||
|
||||
if let image = generateBubbleShadowImage(shadow: shadowColor, diameter: 46.0, shadowBlur: shadowInset) {
|
||||
@ -213,9 +215,8 @@ final class ReactionContextBackgroundNode: ASDisplayNode {
|
||||
transition.updateFrame(view: self.backgroundView, frame: contentBounds, beginWithCurrentState: true)
|
||||
self.backgroundView.update(size: contentBounds.size, transition: transition)
|
||||
|
||||
if let vibrancyEffectView = self.vibrancyEffectView {
|
||||
transition.updateFrame(view: vibrancyEffectView, frame: CGRect(origin: CGPoint(x: 10.0, y: 10.0), size: contentBounds.size), beginWithCurrentState: true)
|
||||
}
|
||||
transition.updateFrame(view: self.backgroundTintView, frame: CGRect(origin: CGPoint(x: -contentBounds.minX, y: -contentBounds.minY), size: contentBounds.size))
|
||||
transition.updateFrame(view: self.backgroundTintMaskContainer, frame: CGRect(origin: CGPoint(), size: contentBounds.size))
|
||||
}
|
||||
|
||||
func animateIn() {
|
||||
|
@ -130,7 +130,7 @@ private final class ExpandItemView: UIView {
|
||||
|
||||
override init(frame: CGRect) {
|
||||
self.tintView = UIView()
|
||||
self.tintView.backgroundColor = .white
|
||||
self.tintView.backgroundColor = .black
|
||||
|
||||
self.arrowView = UIImageView()
|
||||
self.arrowView.image = generateTintedImage(image: UIImage(bundleImageName: "Chat/Context Menu/ReactionExpandArrow"), color: .white)
|
||||
@ -187,9 +187,9 @@ private final class TitleLabelView: UIView {
|
||||
return nil
|
||||
})
|
||||
|
||||
let tintBody = MarkdownAttributeSet(font: Font.regular(13.0), textColor: .white)
|
||||
let tintBold = MarkdownAttributeSet(font: Font.semibold(13.0), textColor: .white)
|
||||
let tintLink = MarkdownAttributeSet(font: Font.regular(13.0), textColor: .white, additionalAttributes: [TelegramTextAttributes.URL: true as NSNumber])
|
||||
let tintBody = MarkdownAttributeSet(font: Font.regular(13.0), textColor: .black)
|
||||
let tintBold = MarkdownAttributeSet(font: Font.semibold(13.0), textColor: .black)
|
||||
let tintLink = MarkdownAttributeSet(font: Font.regular(13.0), textColor: .black, additionalAttributes: [TelegramTextAttributes.URL: true as NSNumber])
|
||||
let tintAttributes = MarkdownAttributes(body: tintBody, bold: tintBold, link: tintLink, linkAttribute: { _ in
|
||||
return (TelegramTextAttributes.URL, "")
|
||||
})
|
||||
@ -1593,10 +1593,8 @@ public final class ReactionContextNode: ASDisplayNode, ASScrollViewDelegate {
|
||||
transition: transition
|
||||
)
|
||||
|
||||
if let vibrancyEffectView = self.backgroundNode.vibrancyEffectView {
|
||||
if self.contentTintContainer.view.superview !== vibrancyEffectView.contentView {
|
||||
vibrancyEffectView.contentView.addSubview(self.contentTintContainer.view)
|
||||
}
|
||||
if self.contentTintContainer.view.superview !== self.backgroundNode.backgroundTintMaskContainer {
|
||||
self.backgroundNode.backgroundTintMaskContainer.addSubview(self.contentTintContainer.view)
|
||||
}
|
||||
|
||||
if let animateInFromAnchorRect = animateInFromAnchorRect, !self.reduceMotion {
|
||||
@ -2431,7 +2429,7 @@ public final class ReactionContextNode: ASDisplayNode, ASScrollViewDelegate {
|
||||
chatPeerId: nil,
|
||||
peekBehavior: nil,
|
||||
customLayout: emojiContentLayout,
|
||||
externalBackground: self.backgroundNode.vibrancyEffectView == nil ? nil : EmojiPagerContentComponent.ExternalBackground(
|
||||
externalBackground: self.backgroundNode.backgroundTintMaskContainer.isHidden ? nil : EmojiPagerContentComponent.ExternalBackground(
|
||||
effectContainerView: self.backgroundNode.vibrantExpandedContentContainer
|
||||
),
|
||||
externalExpansionView: self.view,
|
||||
|
@ -417,7 +417,7 @@ public final class EmojiKeyboardItemLayer: MultiAnimationRenderTarget {
|
||||
let color = theme.chat.inputMediaPanel.panelContentVibrantOverlayColor
|
||||
|
||||
iconLayer.contents = generateIcon(color: color)?.cgImage
|
||||
tintIconLayer.contents = generateIcon(color: .white)?.cgImage
|
||||
tintIconLayer.contents = generateIcon(color: .black)?.cgImage
|
||||
|
||||
tintIconLayer.isHidden = !needsVibrancy
|
||||
}
|
||||
|
@ -1342,9 +1342,10 @@ public final class EmojiPagerContentComponent: Component {
|
||||
private var isSearchActivated: Bool = false
|
||||
|
||||
private let backgroundView: BlurredBackgroundView
|
||||
private let backgroundTintView: UIView
|
||||
private var fadingMaskLayer: FadingMaskLayer?
|
||||
private var vibrancyClippingView: UIView
|
||||
private var vibrancyEffectView: UIVisualEffectView?
|
||||
private var vibrancyEffectView: UIView?
|
||||
public private(set) var mirrorContentClippingView: UIView?
|
||||
private let mirrorContentScrollView: UIView
|
||||
private var warpView: WarpView?
|
||||
@ -1398,6 +1399,7 @@ public final class EmojiPagerContentComponent: Component {
|
||||
|
||||
override init(frame: CGRect) {
|
||||
self.backgroundView = BlurredBackgroundView(color: nil)
|
||||
self.backgroundTintView = UIView()
|
||||
|
||||
if ProcessInfo.processInfo.processorCount > 4 {
|
||||
self.shimmerHostView = PortalSourceView()
|
||||
@ -1423,6 +1425,7 @@ public final class EmojiPagerContentComponent: Component {
|
||||
|
||||
super.init(frame: frame)
|
||||
|
||||
self.backgroundView.addSubview(self.backgroundTintView)
|
||||
self.addSubview(self.backgroundView)
|
||||
|
||||
if let shimmerHostView = self.shimmerHostView {
|
||||
@ -1618,7 +1621,7 @@ public final class EmojiPagerContentComponent: Component {
|
||||
if let mirrorContentClippingView = self.mirrorContentClippingView {
|
||||
mirrorContentClippingView.addSubview(self.mirrorContentScrollView)
|
||||
} else if let vibrancyEffectView = self.vibrancyEffectView {
|
||||
vibrancyEffectView.contentView.addSubview(self.mirrorContentScrollView)
|
||||
vibrancyEffectView.addSubview(self.mirrorContentScrollView)
|
||||
}
|
||||
|
||||
mirrorContentWarpView.removeFromSuperview()
|
||||
@ -3172,7 +3175,7 @@ public final class EmojiPagerContentComponent: Component {
|
||||
}
|
||||
|
||||
groupBorderLayer.strokeColor = borderColor.cgColor
|
||||
groupBorderLayer.tintContainerLayer.strokeColor = UIColor.white.cgColor
|
||||
groupBorderLayer.tintContainerLayer.strokeColor = UIColor.black.cgColor
|
||||
groupBorderLayer.lineWidth = 1.6
|
||||
groupBorderLayer.lineCap = .round
|
||||
groupBorderLayer.fillColor = nil
|
||||
@ -3584,7 +3587,7 @@ public final class EmojiPagerContentComponent: Component {
|
||||
itemSelectionLayer.tintContainerLayer.backgroundColor = UIColor.clear.cgColor
|
||||
} else {
|
||||
itemSelectionLayer.backgroundColor = keyboardChildEnvironment.theme.chat.inputMediaPanel.panelContentControlVibrantSelectionColor.cgColor
|
||||
itemSelectionLayer.tintContainerLayer.backgroundColor = UIColor(white: 1.0, alpha: 0.2).cgColor
|
||||
itemSelectionLayer.tintContainerLayer.backgroundColor = UIColor(white: 0.0, alpha: 0.2).cgColor
|
||||
}
|
||||
}
|
||||
|
||||
@ -4009,15 +4012,15 @@ public final class EmojiPagerContentComponent: Component {
|
||||
}
|
||||
} else {
|
||||
if self.vibrancyEffectView == nil {
|
||||
let style: UIBlurEffect.Style
|
||||
style = .extraLight
|
||||
let blurEffect = UIBlurEffect(style: style)
|
||||
let vibrancyEffect = UIVibrancyEffect(blurEffect: blurEffect)
|
||||
let vibrancyEffectView = UIVisualEffectView(effect: vibrancyEffect)
|
||||
let vibrancyEffectView = UIView()
|
||||
vibrancyEffectView.backgroundColor = .white
|
||||
if let filter = CALayer.luminanceToAlpha() {
|
||||
vibrancyEffectView.layer.filters = [filter]
|
||||
}
|
||||
self.vibrancyEffectView = vibrancyEffectView
|
||||
self.backgroundView.addSubview(vibrancyEffectView)
|
||||
self.backgroundTintView.mask = vibrancyEffectView
|
||||
self.vibrancyClippingView.addSubview(self.mirrorContentScrollView)
|
||||
vibrancyEffectView.contentView.addSubview(self.vibrancyClippingView)
|
||||
vibrancyEffectView.addSubview(self.vibrancyClippingView)
|
||||
}
|
||||
}
|
||||
|
||||
@ -4046,7 +4049,11 @@ public final class EmojiPagerContentComponent: Component {
|
||||
if hideBackground {
|
||||
backgroundColor = backgroundColor.withAlphaComponent(0.01)
|
||||
}
|
||||
self.backgroundView.updateColor(color: backgroundColor, enableBlur: true, forceKeepBlur: false, transition: transition.containedViewLayoutTransition)
|
||||
|
||||
self.backgroundTintView.backgroundColor = backgroundColor
|
||||
transition.setFrame(view: self.backgroundTintView, frame: CGRect(origin: CGPoint(), size: backgroundFrame.size))
|
||||
|
||||
self.backgroundView.updateColor(color: .clear, enableBlur: true, forceKeepBlur: true, transition: transition.containedViewLayoutTransition)
|
||||
transition.setFrame(view: self.backgroundView, frame: backgroundFrame)
|
||||
self.backgroundView.update(size: backgroundFrame.size, transition: transition.containedViewLayoutTransition)
|
||||
|
||||
@ -4652,7 +4659,7 @@ public final class EmojiPagerContentComponent: Component {
|
||||
if let mirrorContentClippingView = self.mirrorContentClippingView {
|
||||
mirrorContentClippingView.addSubview(visibleEmptySearchResultsView.tintContainerView)
|
||||
} else if let vibrancyEffectView = self.vibrancyEffectView {
|
||||
vibrancyEffectView.contentView.addSubview(visibleEmptySearchResultsView.tintContainerView)
|
||||
vibrancyEffectView.addSubview(visibleEmptySearchResultsView.tintContainerView)
|
||||
}
|
||||
}
|
||||
let emptySearchResultsSize = CGSize(width: availableSize.width, height: availableSize.height - itemLayout.searchInsets.top - itemLayout.searchHeight)
|
||||
|
@ -389,7 +389,7 @@ public final class EmojiSearchHeaderView: UIView, UITextFieldDelegate {
|
||||
self.clearIconView.image = generateTintedImage(image: UIImage(bundleImageName: "Components/Search Bar/Clear"), color: .white)?.withRenderingMode(.alwaysTemplate)
|
||||
self.clearIconView.tintColor = useOpaqueTheme ? theme.chat.inputMediaPanel.panelContentOpaqueSearchOverlayColor : theme.chat.inputMediaPanel.panelContentVibrantSearchOverlayColor
|
||||
|
||||
self.clearIconTintView.image = generateTintedImage(image: UIImage(bundleImageName: "Components/Search Bar/Clear"), color: .white)
|
||||
self.clearIconTintView.image = generateTintedImage(image: UIImage(bundleImageName: "Components/Search Bar/Clear"), color: .black)
|
||||
}
|
||||
|
||||
self.params = params
|
||||
@ -402,13 +402,13 @@ public final class EmojiSearchHeaderView: UIView, UITextFieldDelegate {
|
||||
|
||||
if theme.overallDarkAppearance && forceNeedsVibrancy {
|
||||
self.backgroundLayer.backgroundColor = theme.chat.inputMediaPanel.panelContentControlVibrantSelectionColor.withMultipliedAlpha(0.3).cgColor
|
||||
self.tintBackgroundLayer.backgroundColor = UIColor(white: 1.0, alpha: 0.2).cgColor
|
||||
self.tintBackgroundLayer.backgroundColor = UIColor(white: 0.0, alpha: 0.2).cgColor
|
||||
} else if useOpaqueTheme {
|
||||
self.backgroundLayer.backgroundColor = theme.chat.inputMediaPanel.panelContentControlOpaqueSelectionColor.cgColor
|
||||
self.tintBackgroundLayer.backgroundColor = UIColor.white.cgColor
|
||||
self.tintBackgroundLayer.backgroundColor = UIColor.black.cgColor
|
||||
} else {
|
||||
self.backgroundLayer.backgroundColor = theme.chat.inputMediaPanel.panelContentControlVibrantSelectionColor.cgColor
|
||||
self.tintBackgroundLayer.backgroundColor = UIColor(white: 1.0, alpha: 0.2).cgColor
|
||||
self.tintBackgroundLayer.backgroundColor = UIColor(white: 0.0, alpha: 0.2).cgColor
|
||||
}
|
||||
|
||||
self.backgroundLayer.cornerRadius = inputHeight * 0.5
|
||||
@ -436,7 +436,7 @@ public final class EmojiSearchHeaderView: UIView, UITextFieldDelegate {
|
||||
component: AnyComponent(Text(
|
||||
text: strings.Common_Cancel,
|
||||
font: Font.regular(17.0),
|
||||
color: .white
|
||||
color: .black
|
||||
)),
|
||||
environment: {},
|
||||
containerSize: CGSize(width: size.width - 32.0, height: 100.0)
|
||||
|
@ -514,7 +514,7 @@ final class EmojiSearchSearchBarComponent: Component {
|
||||
containerSize: itemLayout.itemSize
|
||||
)
|
||||
|
||||
itemView.tintView.tintColor = .white
|
||||
itemView.tintView.tintColor = .black
|
||||
|
||||
if let view = itemView.view.view as? LottieComponent.View {
|
||||
if view.superview == nil {
|
||||
@ -592,7 +592,7 @@ final class EmojiSearchSearchBarComponent: Component {
|
||||
let selectedItemCenter = itemLayout.frame(at: index).center
|
||||
let selectionSize = CGSize(width: 28.0, height: 28.0)
|
||||
self.selectedItemBackground.backgroundColor = selectedColor.cgColor
|
||||
self.selectedItemTintBackground.backgroundColor = UIColor(white: 1.0, alpha: 0.15).cgColor
|
||||
self.selectedItemTintBackground.backgroundColor = UIColor(white: 0.0, alpha: 0.15).cgColor
|
||||
self.selectedItemBackground.cornerRadius = selectionSize.height * 0.5
|
||||
self.selectedItemTintBackground.cornerRadius = selectionSize.height * 0.5
|
||||
|
||||
@ -678,7 +678,7 @@ final class EmojiSearchSearchBarComponent: Component {
|
||||
component: AnyComponent(Text(
|
||||
text: component.strings.Common_Search,
|
||||
font: Font.regular(17.0),
|
||||
color: .white
|
||||
color: .black
|
||||
)),
|
||||
environment: {},
|
||||
containerSize: CGSize(width: availableSize.width - 32.0, height: 100.0)
|
||||
|
@ -443,7 +443,7 @@ final class EmojiSearchStatusComponent: Component {
|
||||
overlayColor = component.useOpaqueTheme ? component.theme.chat.inputMediaPanel.panelContentOpaqueSearchOverlayColor : component.theme.chat.inputMediaPanel.panelContentVibrantSearchOverlayColor
|
||||
}
|
||||
|
||||
let baseColor: UIColor = .white
|
||||
let baseColor: UIColor = .black
|
||||
|
||||
if self.contentView.tintColor != overlayColor {
|
||||
self.contentView.tintColor = overlayColor
|
||||
|
@ -68,7 +68,7 @@ final class EmptySearchResultsView: UIView {
|
||||
)
|
||||
let _ = self.titleTintLabel.update(
|
||||
transition: .immediate,
|
||||
component: AnyComponent(Text(text: text, font: Font.regular(15.0), color: .white)),
|
||||
component: AnyComponent(Text(text: text, font: Font.regular(15.0), color: .black)),
|
||||
environment: {},
|
||||
containerSize: CGSize(width: size.width, height: 100.0)
|
||||
)
|
||||
|
@ -88,7 +88,7 @@ final class GroupExpandActionButton: UIButton {
|
||||
} else {
|
||||
self.backgroundLayer.backgroundColor = theme.chat.inputMediaPanel.panelContentControlVibrantOverlayColor.cgColor
|
||||
}
|
||||
self.tintContainerLayer.backgroundColor = UIColor.white.cgColor
|
||||
self.tintContainerLayer.backgroundColor = UIColor.black.cgColor
|
||||
|
||||
let textSize: CGSize
|
||||
if let currentTextLayout = self.currentTextLayout, currentTextLayout.string == title, currentTextLayout.color == color, currentTextLayout.constrainedWidth == textConstrainedWidth {
|
||||
|
@ -100,7 +100,7 @@ final class GroupHeaderActionButton: UIButton {
|
||||
}
|
||||
|
||||
self.backgroundLayer.backgroundColor = backgroundColor.cgColor
|
||||
self.tintBackgroundLayer.backgroundColor = UIColor.white.withAlphaComponent(0.2).cgColor
|
||||
self.tintBackgroundLayer.backgroundColor = UIColor.black.withAlphaComponent(0.2).cgColor
|
||||
|
||||
self.tintContainerLayer.isHidden = !needsVibrancy
|
||||
|
||||
@ -110,7 +110,7 @@ final class GroupHeaderActionButton: UIButton {
|
||||
} else {
|
||||
let font: UIFont = compact ? Font.medium(11.0) : Font.semibold(15.0)
|
||||
let string = NSAttributedString(string: title.uppercased(), font: font, textColor: foregroundColor)
|
||||
let tintString = NSAttributedString(string: title.uppercased(), font: font, textColor: .white)
|
||||
let tintString = NSAttributedString(string: title.uppercased(), font: font, textColor: .black)
|
||||
let stringBounds = string.boundingRect(with: CGSize(width: textConstrainedWidth, height: 100.0), options: .usesLineFragmentOrigin, context: nil)
|
||||
textSize = CGSize(width: ceil(stringBounds.width), height: ceil(stringBounds.height))
|
||||
self.textLayer.contents = generateImage(textSize, opaque: false, scale: 0.0, rotatedContext: { size, context in
|
||||
|
@ -172,7 +172,7 @@ final class GroupHeaderLayer: UIView {
|
||||
clearSize = image.size
|
||||
clearIconLayer.contents = image.cgImage
|
||||
}
|
||||
if updateImage, let image = PresentationResourcesChat.chatInputMediaPanelGridDismissImage(theme, color: .white) {
|
||||
if updateImage, let image = PresentationResourcesChat.chatInputMediaPanelGridDismissImage(theme, color: .black) {
|
||||
tintClearIconLayer.contents = image.cgImage
|
||||
}
|
||||
|
||||
@ -215,7 +215,7 @@ final class GroupHeaderLayer: UIView {
|
||||
stringValue = title
|
||||
}
|
||||
let string = NSAttributedString(string: stringValue, font: font, textColor: color)
|
||||
let whiteString = NSAttributedString(string: stringValue, font: font, textColor: .white)
|
||||
let whiteString = NSAttributedString(string: stringValue, font: font, textColor: .black)
|
||||
let stringBounds = string.boundingRect(with: CGSize(width: textConstrainedWidth, height: 18.0), options: [.usesLineFragmentOrigin, .truncatesLastVisibleLine], context: nil)
|
||||
textSize = CGSize(width: ceil(stringBounds.width), height: ceil(stringBounds.height))
|
||||
self.textLayer.contents = generateImage(textSize, opaque: false, scale: 0.0, rotatedContext: { size, context in
|
||||
@ -231,7 +231,6 @@ final class GroupHeaderLayer: UIView {
|
||||
context.clear(CGRect(origin: CGPoint(), size: size))
|
||||
UIGraphicsPushContext(context)
|
||||
|
||||
//whiteString.draw(in: stringBounds)
|
||||
whiteString.draw(with: stringBounds, options: [.usesLineFragmentOrigin, .truncatesLastVisibleLine], context: nil)
|
||||
|
||||
UIGraphicsPopContext()
|
||||
@ -287,7 +286,7 @@ final class GroupHeaderLayer: UIView {
|
||||
self.tintBadgeLayer = tintBadgeLayer
|
||||
self.tintContentLayer.addSublayer(tintBadgeLayer)
|
||||
|
||||
if let image = generateBadgeImage(color: .white) {
|
||||
if let image = generateBadgeImage(color: .black) {
|
||||
tintBadgeLayer.contents = image.cgImage
|
||||
}
|
||||
}
|
||||
@ -342,7 +341,7 @@ final class GroupHeaderLayer: UIView {
|
||||
self.tintLockIconLayer = tintLockIconLayer
|
||||
self.tintContentLayer.addSublayer(tintLockIconLayer)
|
||||
}
|
||||
if let image = PresentationResourcesChat.chatEntityKeyboardLock(theme, color: .white) {
|
||||
if let image = PresentationResourcesChat.chatEntityKeyboardLock(theme, color: .black) {
|
||||
tintLockIconLayer.contents = image.cgImage
|
||||
tintLockIconLayer.frame = lockIconLayer.frame
|
||||
tintLockIconLayer.isHidden = !needsVibrancy
|
||||
@ -368,7 +367,7 @@ final class GroupHeaderLayer: UIView {
|
||||
subtitleSize = currentSubtitleLayout.size
|
||||
} else {
|
||||
let string = NSAttributedString(string: subtitle, font: Font.regular(15.0), textColor: subtitleColor)
|
||||
let whiteString = NSAttributedString(string: subtitle, font: Font.regular(15.0), textColor: .white)
|
||||
let whiteString = NSAttributedString(string: subtitle, font: Font.regular(15.0), textColor: .black)
|
||||
let stringBounds = string.boundingRect(with: CGSize(width: textConstrainedWidth, height: 100.0), options: .usesLineFragmentOrigin, context: nil)
|
||||
subtitleSize = CGSize(width: ceil(stringBounds.width), height: ceil(stringBounds.height))
|
||||
updateSubtitleContents = generateImage(subtitleSize, opaque: false, scale: 0.0, rotatedContext: { size, context in
|
||||
@ -493,7 +492,7 @@ final class GroupHeaderLayer: UIView {
|
||||
self.tintSeparatorLayer = tintSeparatorLayer
|
||||
self.tintContentLayer.addSublayer(tintSeparatorLayer)
|
||||
}
|
||||
tintSeparatorLayer.backgroundColor = UIColor.white.cgColor
|
||||
tintSeparatorLayer.backgroundColor = UIColor.black.cgColor
|
||||
tintSeparatorLayer.frame = CGRect(origin: CGPoint(x: 0.0, y: 0.0), size: CGSize(width: size.width, height: UIScreenPixel))
|
||||
|
||||
tintSeparatorLayer.isHidden = !needsVibrancy
|
||||
|
@ -27,8 +27,15 @@ public struct HLSCodecConfiguration {
|
||||
|
||||
public extension HLSCodecConfiguration {
|
||||
init(context: AccountContext) {
|
||||
var isHardwareAv1Supported = internal_isHardwareAv1Supported
|
||||
var isSoftwareAv1Supported = false
|
||||
/*var isSoftwareAv1Supported = false
|
||||
var isHardwareAv1Supported = false
|
||||
|
||||
var length: Int = 4
|
||||
var cpuCount: UInt32 = 0
|
||||
sysctlbyname("hw.ncpu", &cpuCount, &length, nil, 0)
|
||||
if cpuCount >= 6 {
|
||||
isSoftwareAv1Supported = true
|
||||
}
|
||||
|
||||
if let data = context.currentAppConfiguration.with({ $0 }).data, let value = data["ios_enable_hardware_av1"] as? Double {
|
||||
isHardwareAv1Supported = value != 0.0
|
||||
@ -37,7 +44,9 @@ public extension HLSCodecConfiguration {
|
||||
isSoftwareAv1Supported = value != 0.0
|
||||
}
|
||||
|
||||
self.init(isHardwareAv1Supported: isHardwareAv1Supported, isSoftwareAv1Supported: isSoftwareAv1Supported)
|
||||
self.init(isHardwareAv1Supported: isHardwareAv1Supported, isSoftwareAv1Supported: isSoftwareAv1Supported)*/
|
||||
|
||||
self.init(isHardwareAv1Supported: false, isSoftwareAv1Supported: false)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1076,7 +1076,7 @@ final class HLSVideoJSNativeContentNode: ASDisplayNode, UniversalVideoContentNod
|
||||
var onSeeked: (() -> Void)?
|
||||
self.player = ChunkMediaPlayerV2(
|
||||
audioSessionManager: audioSessionManager,
|
||||
partsState: self.chunkPlayerPartsState.get(),
|
||||
source: .externalParts(self.chunkPlayerPartsState.get()),
|
||||
video: true,
|
||||
enableSound: self.enableSound,
|
||||
baseRate: baseRate,
|
||||
@ -1085,25 +1085,14 @@ final class HLSVideoJSNativeContentNode: ASDisplayNode, UniversalVideoContentNod
|
||||
},
|
||||
playerNode: self.playerNode
|
||||
)
|
||||
/*self.player = ChunkMediaPlayerImpl(
|
||||
postbox: postbox,
|
||||
audioSessionManager: audioSessionManager,
|
||||
partsState: self.chunkPlayerPartsState.get(),
|
||||
video: true,
|
||||
enableSound: self.enableSound,
|
||||
baseRate: baseRate,
|
||||
onSeeked: {
|
||||
onSeeked?()
|
||||
},
|
||||
playerNode: self.playerNode
|
||||
)*/
|
||||
|
||||
super.init()
|
||||
|
||||
self.contextDisposable = SharedHLSVideoJSContext.shared.register(context: self)
|
||||
|
||||
self.playerNode.frame = CGRect(origin: CGPoint(), size: self.intrinsicDimensions)
|
||||
var didProcessFramesToDisplay = false
|
||||
|
||||
/*var didProcessFramesToDisplay = false
|
||||
self.playerNode.isHidden = true
|
||||
self.playerNode.hasSentFramesToDisplay = { [weak self] in
|
||||
guard let self, !didProcessFramesToDisplay else {
|
||||
@ -1111,7 +1100,7 @@ final class HLSVideoJSNativeContentNode: ASDisplayNode, UniversalVideoContentNod
|
||||
}
|
||||
didProcessFramesToDisplay = true
|
||||
self.playerNode.isHidden = false
|
||||
}
|
||||
}*/
|
||||
|
||||
//let thumbnailVideoReference = HLSVideoContent.minimizedHLSQuality(file: fileReference)?.file ?? fileReference
|
||||
|
||||
@ -1843,7 +1832,7 @@ private final class SourceBuffer {
|
||||
let item = ChunkMediaPlayerPart(
|
||||
startTime: fragmentInfo.startTime.seconds,
|
||||
endTime: fragmentInfo.startTime.seconds + fragmentInfo.duration.seconds,
|
||||
file: tempFile,
|
||||
content: .tempFile(ChunkMediaPlayerPart.Content.TempFile(file: tempFile)),
|
||||
codecName: videoCodecName
|
||||
)
|
||||
self.items.append(item)
|
||||
|
@ -146,6 +146,137 @@ public final class NativeVideoContent: UniversalVideoContent {
|
||||
}
|
||||
}
|
||||
|
||||
private enum PlayerImpl {
|
||||
case legacy(MediaPlayer)
|
||||
case chunked(ChunkMediaPlayerV2)
|
||||
|
||||
var actionAtEnd: MediaPlayerActionAtEnd {
|
||||
get {
|
||||
switch self {
|
||||
case let .legacy(player):
|
||||
return player.actionAtEnd
|
||||
case let .chunked(player):
|
||||
return player.actionAtEnd
|
||||
}
|
||||
} set(value) {
|
||||
switch self {
|
||||
case let .legacy(player):
|
||||
player.actionAtEnd = value
|
||||
case let .chunked(player):
|
||||
player.actionAtEnd = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var status: Signal<MediaPlayerStatus, NoError> {
|
||||
switch self {
|
||||
case let .legacy(player):
|
||||
return player.status
|
||||
case let .chunked(player):
|
||||
return player.status
|
||||
}
|
||||
}
|
||||
|
||||
func play() {
|
||||
switch self {
|
||||
case let .legacy(player):
|
||||
player.play()
|
||||
case let .chunked(player):
|
||||
player.play()
|
||||
}
|
||||
}
|
||||
|
||||
func pause() {
|
||||
switch self {
|
||||
case let .legacy(player):
|
||||
player.pause()
|
||||
case let .chunked(player):
|
||||
player.pause()
|
||||
}
|
||||
}
|
||||
|
||||
func togglePlayPause(faded: Bool = false) {
|
||||
switch self {
|
||||
case let .legacy(player):
|
||||
player.togglePlayPause(faded: faded)
|
||||
case let .chunked(player):
|
||||
player.togglePlayPause(faded: faded)
|
||||
}
|
||||
}
|
||||
|
||||
func playOnceWithSound(playAndRecord: Bool, seek: MediaPlayerSeek = .start) {
|
||||
switch self {
|
||||
case let .legacy(player):
|
||||
player.playOnceWithSound(playAndRecord: playAndRecord, seek: seek)
|
||||
case let .chunked(player):
|
||||
player.playOnceWithSound(playAndRecord: playAndRecord, seek: seek)
|
||||
}
|
||||
}
|
||||
|
||||
func continueWithOverridingAmbientMode(isAmbient: Bool) {
|
||||
switch self {
|
||||
case let .legacy(player):
|
||||
player.continueWithOverridingAmbientMode(isAmbient: isAmbient)
|
||||
case let .chunked(player):
|
||||
player.continueWithOverridingAmbientMode(isAmbient: isAmbient)
|
||||
}
|
||||
}
|
||||
|
||||
func continuePlayingWithoutSound(seek: MediaPlayerSeek = .start) {
|
||||
switch self {
|
||||
case let .legacy(player):
|
||||
player.continuePlayingWithoutSound(seek: seek)
|
||||
case let .chunked(player):
|
||||
player.continuePlayingWithoutSound(seek: seek)
|
||||
}
|
||||
}
|
||||
|
||||
func seek(timestamp: Double, play: Bool? = nil) {
|
||||
switch self {
|
||||
case let .legacy(player):
|
||||
player.seek(timestamp: timestamp, play: play)
|
||||
case let .chunked(player):
|
||||
player.seek(timestamp: timestamp, play: play)
|
||||
}
|
||||
}
|
||||
|
||||
func setForceAudioToSpeaker(_ value: Bool) {
|
||||
switch self {
|
||||
case let .legacy(player):
|
||||
player.setForceAudioToSpeaker(value)
|
||||
case let .chunked(player):
|
||||
player.setForceAudioToSpeaker(value)
|
||||
}
|
||||
}
|
||||
|
||||
func setSoundMuted(soundMuted: Bool) {
|
||||
switch self {
|
||||
case let .legacy(player):
|
||||
player.setSoundMuted(soundMuted: soundMuted)
|
||||
case let .chunked(player):
|
||||
player.setSoundMuted(soundMuted: soundMuted)
|
||||
}
|
||||
}
|
||||
|
||||
func setBaseRate(_ baseRate: Double) {
|
||||
switch self {
|
||||
case let .legacy(player):
|
||||
player.setBaseRate(baseRate)
|
||||
case let .chunked(player):
|
||||
player.setBaseRate(baseRate)
|
||||
}
|
||||
}
|
||||
|
||||
func setContinuePlayingWithoutSoundOnLostAudioSession(_ value: Bool) {
|
||||
switch self {
|
||||
case let .legacy(player):
|
||||
player.setContinuePlayingWithoutSoundOnLostAudioSession(value)
|
||||
case let .chunked(player):
|
||||
player.setContinuePlayingWithoutSoundOnLostAudioSession(value)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
private final class NativeVideoContentNode: ASDisplayNode, UniversalVideoContentNode {
|
||||
private let postbox: Postbox
|
||||
private let userLocation: MediaResourceUserLocation
|
||||
@ -165,7 +296,7 @@ private final class NativeVideoContentNode: ASDisplayNode, UniversalVideoContent
|
||||
private let continuePlayingWithoutSoundOnLostAudioSession: Bool
|
||||
private let displayImage: Bool
|
||||
|
||||
private var player: MediaPlayer
|
||||
private var player: PlayerImpl
|
||||
private var thumbnailPlayer: MediaPlayer?
|
||||
private let imageNode: TransformImageNode
|
||||
private let playerNode: MediaPlayerNode
|
||||
@ -252,7 +383,57 @@ private final class NativeVideoContentNode: ASDisplayNode, UniversalVideoContent
|
||||
|
||||
let selectedFile = fileReference.media
|
||||
|
||||
self.player = MediaPlayer(audioSessionManager: audioSessionManager, postbox: postbox, userLocation: userLocation, userContentType: userContentType, resourceReference: fileReference.resourceReference(selectedFile.resource), tempFilePath: tempFilePath, limitedFileRange: limitedFileRange, streamable: streamVideo, video: true, preferSoftwareDecoding: false, playAutomatically: false, enableSound: enableSound, baseRate: baseRate, fetchAutomatically: fetchAutomatically, soundMuted: soundMuted, ambient: beginWithAmbientSound, mixWithOthers: mixWithOthers, continuePlayingWithoutSoundOnLostAudioSession: continuePlayingWithoutSoundOnLostAudioSession, storeAfterDownload: storeAfterDownload, isAudioVideoMessage: isAudioVideoMessage)
|
||||
self.playerNode = MediaPlayerNode(backgroundThread: false, captureProtected: captureProtected)
|
||||
|
||||
if !"".isEmpty {
|
||||
let mediaPlayer = MediaPlayer(
|
||||
audioSessionManager: audioSessionManager,
|
||||
postbox: postbox,
|
||||
userLocation: userLocation,
|
||||
userContentType: userContentType,
|
||||
resourceReference: fileReference.resourceReference(selectedFile.resource),
|
||||
tempFilePath: tempFilePath,
|
||||
limitedFileRange: limitedFileRange,
|
||||
streamable: streamVideo,
|
||||
video: true,
|
||||
preferSoftwareDecoding: false,
|
||||
playAutomatically: false,
|
||||
enableSound: enableSound,
|
||||
baseRate: baseRate,
|
||||
fetchAutomatically: fetchAutomatically,
|
||||
soundMuted: soundMuted,
|
||||
ambient: beginWithAmbientSound,
|
||||
mixWithOthers: mixWithOthers,
|
||||
continuePlayingWithoutSoundOnLostAudioSession: continuePlayingWithoutSoundOnLostAudioSession,
|
||||
storeAfterDownload: storeAfterDownload,
|
||||
isAudioVideoMessage: isAudioVideoMessage
|
||||
)
|
||||
self.player = .legacy(mediaPlayer)
|
||||
mediaPlayer.attachPlayerNode(self.playerNode)
|
||||
} else {
|
||||
let mediaPlayer = ChunkMediaPlayerV2(
|
||||
audioSessionManager: audioSessionManager,
|
||||
source: .directFetch(ChunkMediaPlayerV2.SourceDescription.ResourceDescription(
|
||||
postbox: postbox,
|
||||
reference: fileReference.resourceReference(selectedFile.resource),
|
||||
userLocation: userLocation,
|
||||
userContentType: userContentType,
|
||||
statsCategory: statsCategoryForFileWithAttributes(fileReference.media.attributes),
|
||||
fetchAutomatically: fetchAutomatically
|
||||
)),
|
||||
video: true,
|
||||
playAutomatically: false,
|
||||
enableSound: enableSound,
|
||||
baseRate: baseRate,
|
||||
soundMuted: soundMuted,
|
||||
ambient: beginWithAmbientSound,
|
||||
mixWithOthers: mixWithOthers,
|
||||
continuePlayingWithoutSoundOnLostAudioSession: continuePlayingWithoutSoundOnLostAudioSession,
|
||||
isAudioVideoMessage: isAudioVideoMessage,
|
||||
playerNode: self.playerNode
|
||||
)
|
||||
self.player = .chunked(mediaPlayer)
|
||||
}
|
||||
|
||||
var actionAtEndImpl: (() -> Void)?
|
||||
if enableSound && !loopVideo {
|
||||
@ -264,8 +445,6 @@ private final class NativeVideoContentNode: ASDisplayNode, UniversalVideoContent
|
||||
actionAtEndImpl?()
|
||||
})
|
||||
}
|
||||
self.playerNode = MediaPlayerNode(backgroundThread: false, captureProtected: captureProtected)
|
||||
self.player.attachPlayerNode(self.playerNode)
|
||||
|
||||
self.dimensions = fileReference.media.dimensions?.cgSize
|
||||
if let dimensions = self.dimensions {
|
||||
@ -274,7 +453,7 @@ private final class NativeVideoContentNode: ASDisplayNode, UniversalVideoContent
|
||||
|
||||
super.init()
|
||||
|
||||
var didProcessFramesToDisplay = false
|
||||
/*var didProcessFramesToDisplay = false
|
||||
self.playerNode.isHidden = true
|
||||
self.playerNode.hasSentFramesToDisplay = { [weak self] in
|
||||
guard let self, !didProcessFramesToDisplay else {
|
||||
@ -283,7 +462,7 @@ private final class NativeVideoContentNode: ASDisplayNode, UniversalVideoContent
|
||||
didProcessFramesToDisplay = true
|
||||
self.playerNode.isHidden = false
|
||||
self.hasSentFramesToDisplay?()
|
||||
}
|
||||
}*/
|
||||
|
||||
if let dimensions = hintDimensions {
|
||||
self.dimensions = dimensions
|
||||
|
@ -29,6 +29,7 @@ UIView * _Nullable getPortalViewSourceView(UIView * _Nonnull portalView);
|
||||
|
||||
NSObject * _Nullable makeBlurFilter();
|
||||
NSObject * _Nullable makeLuminanceToAlphaFilter();
|
||||
NSObject * _Nullable makeColorInvertFilter();
|
||||
NSObject * _Nullable makeMonochromeFilter();
|
||||
|
||||
void setLayerDisableScreenshots(CALayer * _Nonnull layer, bool disableScreenshots);
|
||||
|
@ -234,6 +234,10 @@ NSObject * _Nullable makeLuminanceToAlphaFilter() {
|
||||
return [(id<GraphicsFilterProtocol>)NSClassFromString(@"CAFilter") filterWithName:@"luminanceToAlpha"];
|
||||
}
|
||||
|
||||
NSObject * _Nullable makeColorInvertFilter() {
|
||||
return [(id<GraphicsFilterProtocol>)NSClassFromString(@"CAFilter") filterWithName:@"colorInvert"];
|
||||
}
|
||||
|
||||
NSObject * _Nullable makeMonochromeFilter() {
|
||||
return [(id<GraphicsFilterProtocol>)NSClassFromString(@"CAFilter") filterWithName:@"colorMonochrome"];
|
||||
}
|
||||
|
7
third-party/dav1d/build-dav1d-bazel.sh
vendored
7
third-party/dav1d/build-dav1d-bazel.sh
vendored
@ -12,7 +12,12 @@ CROSSFILE=""
|
||||
if [ "$ARCH" = "arm64" ]; then
|
||||
CROSSFILE="../package/crossfiles/arm64-iPhoneOS.meson"
|
||||
elif [ "$ARCH" = "sim_arm64" ]; then
|
||||
CROSSFILE="../../arm64-iPhoneSimulator.meson"
|
||||
rm -f "arm64-iPhoneSimulator-custom.meson"
|
||||
TARGET_CROSSFILE="$BUILD_DIR/dav1d/package/crossfiles/arm64-iPhoneSimulator-custom.meson"
|
||||
cp "$BUILD_DIR/arm64-iPhoneSimulator.meson" "$TARGET_CROSSFILE"
|
||||
custom_xcode_path="$(xcode-select -p)/"
|
||||
sed -i '' "s|/Applications/Xcode.app/Contents/Developer/|$custom_xcode_path|g" "$TARGET_CROSSFILE"
|
||||
CROSSFILE="../package/crossfiles/arm64-iPhoneSimulator-custom.meson"
|
||||
else
|
||||
echo "Unsupported architecture $ARCH"
|
||||
exit 1
|
||||
|
Loading…
x
Reference in New Issue
Block a user