mirror of
https://github.com/Swiftgram/Telegram-iOS.git
synced 2025-06-16 05:55:20 +00:00
Merge branch 'master' of gitlab.com:peter-iakovlev/telegram-ios
This commit is contained in:
commit
dec40ef830
@ -796,6 +796,10 @@ public extension CALayer {
|
|||||||
static func luminanceToAlpha() -> NSObject? {
|
static func luminanceToAlpha() -> NSObject? {
|
||||||
return makeLuminanceToAlphaFilter()
|
return makeLuminanceToAlphaFilter()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static func colorInvert() -> NSObject? {
|
||||||
|
return makeColorInvertFilter()
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public extension CALayer {
|
public extension CALayer {
|
||||||
|
@ -24,10 +24,18 @@ typedef struct FFMpegStreamMetrics {
|
|||||||
int32_t extradataSize;
|
int32_t extradataSize;
|
||||||
} FFMpegStreamMetrics;
|
} FFMpegStreamMetrics;
|
||||||
|
|
||||||
|
typedef struct FFMpegAVIndexEntry {
|
||||||
|
int64_t pos;
|
||||||
|
int64_t timestamp;
|
||||||
|
bool isKeyframe;
|
||||||
|
int32_t size;
|
||||||
|
} FFMpegAVIndexEntry;
|
||||||
|
|
||||||
extern int FFMpegCodecIdH264;
|
extern int FFMpegCodecIdH264;
|
||||||
extern int FFMpegCodecIdHEVC;
|
extern int FFMpegCodecIdHEVC;
|
||||||
extern int FFMpegCodecIdMPEG4;
|
extern int FFMpegCodecIdMPEG4;
|
||||||
extern int FFMpegCodecIdVP9;
|
extern int FFMpegCodecIdVP9;
|
||||||
|
extern int FFMpegCodecIdVP8;
|
||||||
extern int FFMpegCodecIdAV1;
|
extern int FFMpegCodecIdAV1;
|
||||||
|
|
||||||
@class FFMpegAVCodecContext;
|
@class FFMpegAVCodecContext;
|
||||||
@ -40,6 +48,7 @@ extern int FFMpegCodecIdAV1;
|
|||||||
- (bool)openInputWithDirectFilePath:(NSString * _Nullable)directFilePath;
|
- (bool)openInputWithDirectFilePath:(NSString * _Nullable)directFilePath;
|
||||||
- (bool)findStreamInfo;
|
- (bool)findStreamInfo;
|
||||||
- (void)seekFrameForStreamIndex:(int32_t)streamIndex pts:(int64_t)pts positionOnKeyframe:(bool)positionOnKeyframe;
|
- (void)seekFrameForStreamIndex:(int32_t)streamIndex pts:(int64_t)pts positionOnKeyframe:(bool)positionOnKeyframe;
|
||||||
|
- (void)seekFrameForStreamIndex:(int32_t)streamIndex byteOffset:(int64_t)byteOffset;
|
||||||
- (bool)readFrameIntoPacket:(FFMpegPacket *)packet;
|
- (bool)readFrameIntoPacket:(FFMpegPacket *)packet;
|
||||||
- (NSArray<NSNumber *> *)streamIndicesForType:(FFMpegAVFormatStreamType)type;
|
- (NSArray<NSNumber *> *)streamIndicesForType:(FFMpegAVFormatStreamType)type;
|
||||||
- (bool)isAttachedPicAtStreamIndex:(int32_t)streamIndex;
|
- (bool)isAttachedPicAtStreamIndex:(int32_t)streamIndex;
|
||||||
@ -47,6 +56,8 @@ extern int FFMpegCodecIdAV1;
|
|||||||
- (double)duration;
|
- (double)duration;
|
||||||
- (int64_t)startTimeAtStreamIndex:(int32_t)streamIndex;
|
- (int64_t)startTimeAtStreamIndex:(int32_t)streamIndex;
|
||||||
- (int64_t)durationAtStreamIndex:(int32_t)streamIndex;
|
- (int64_t)durationAtStreamIndex:(int32_t)streamIndex;
|
||||||
|
- (int)numberOfIndexEntriesAtStreamIndex:(int32_t)streamIndex;
|
||||||
|
- (bool)fillIndexEntryAtStreamIndex:(int32_t)streamIndex entryIndex:(int32_t)entryIndex outEntry:(FFMpegAVIndexEntry * _Nonnull)outEntry;
|
||||||
- (bool)codecParamsAtStreamIndex:(int32_t)streamIndex toContext:(FFMpegAVCodecContext *)context;
|
- (bool)codecParamsAtStreamIndex:(int32_t)streamIndex toContext:(FFMpegAVCodecContext *)context;
|
||||||
- (FFMpegFpsAndTimebase)fpsAndTimebaseForStreamIndex:(int32_t)streamIndex defaultTimeBase:(CMTime)defaultTimeBase;
|
- (FFMpegFpsAndTimebase)fpsAndTimebaseForStreamIndex:(int32_t)streamIndex defaultTimeBase:(CMTime)defaultTimeBase;
|
||||||
- (FFMpegStreamMetrics)metricsForStreamAtIndex:(int32_t)streamIndex;
|
- (FFMpegStreamMetrics)metricsForStreamAtIndex:(int32_t)streamIndex;
|
||||||
|
@ -12,6 +12,7 @@ NS_ASSUME_NONNULL_BEGIN
|
|||||||
@property (nonatomic, readonly) int32_t streamIndex;
|
@property (nonatomic, readonly) int32_t streamIndex;
|
||||||
@property (nonatomic, readonly) int32_t size;
|
@property (nonatomic, readonly) int32_t size;
|
||||||
@property (nonatomic, readonly) uint8_t *data;
|
@property (nonatomic, readonly) uint8_t *data;
|
||||||
|
@property (nonatomic, readonly) bool isKeyframe;
|
||||||
|
|
||||||
- (void *)impl;
|
- (void *)impl;
|
||||||
- (int32_t)sendToDecoder:(FFMpegAVCodecContext *)codecContext;
|
- (int32_t)sendToDecoder:(FFMpegAVCodecContext *)codecContext;
|
||||||
|
@ -11,6 +11,7 @@ int FFMpegCodecIdH264 = AV_CODEC_ID_H264;
|
|||||||
int FFMpegCodecIdHEVC = AV_CODEC_ID_HEVC;
|
int FFMpegCodecIdHEVC = AV_CODEC_ID_HEVC;
|
||||||
int FFMpegCodecIdMPEG4 = AV_CODEC_ID_MPEG4;
|
int FFMpegCodecIdMPEG4 = AV_CODEC_ID_MPEG4;
|
||||||
int FFMpegCodecIdVP9 = AV_CODEC_ID_VP9;
|
int FFMpegCodecIdVP9 = AV_CODEC_ID_VP9;
|
||||||
|
int FFMpegCodecIdVP8 = AV_CODEC_ID_VP8;
|
||||||
int FFMpegCodecIdAV1 = AV_CODEC_ID_AV1;
|
int FFMpegCodecIdAV1 = AV_CODEC_ID_AV1;
|
||||||
|
|
||||||
@interface FFMpegAVFormatContext () {
|
@interface FFMpegAVFormatContext () {
|
||||||
@ -70,6 +71,11 @@ int FFMpegCodecIdAV1 = AV_CODEC_ID_AV1;
|
|||||||
av_seek_frame(_impl, streamIndex, pts, options);
|
av_seek_frame(_impl, streamIndex, pts, options);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
- (void)seekFrameForStreamIndex:(int32_t)streamIndex byteOffset:(int64_t)byteOffset {
|
||||||
|
int options = AVSEEK_FLAG_BYTE;
|
||||||
|
av_seek_frame(_impl, streamIndex, byteOffset, options);
|
||||||
|
}
|
||||||
|
|
||||||
- (bool)readFrameIntoPacket:(FFMpegPacket *)packet {
|
- (bool)readFrameIntoPacket:(FFMpegPacket *)packet {
|
||||||
int result = av_read_frame(_impl, (AVPacket *)[packet impl]);
|
int result = av_read_frame(_impl, (AVPacket *)[packet impl]);
|
||||||
return result >= 0;
|
return result >= 0;
|
||||||
@ -117,6 +123,28 @@ int FFMpegCodecIdAV1 = AV_CODEC_ID_AV1;
|
|||||||
return _impl->streams[streamIndex]->duration;
|
return _impl->streams[streamIndex]->duration;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
- (int)numberOfIndexEntriesAtStreamIndex:(int32_t)streamIndex {
|
||||||
|
return avformat_index_get_entries_count(_impl->streams[streamIndex]);
|
||||||
|
}
|
||||||
|
|
||||||
|
- (bool)fillIndexEntryAtStreamIndex:(int32_t)streamIndex entryIndex:(int32_t)entryIndex outEntry:(FFMpegAVIndexEntry * _Nonnull)outEntry {
|
||||||
|
const AVIndexEntry *entry = avformat_index_get_entry(_impl->streams[streamIndex], entryIndex);
|
||||||
|
if (!entry) {
|
||||||
|
outEntry->pos = -1;
|
||||||
|
outEntry->timestamp = 0;
|
||||||
|
outEntry->isKeyframe = false;
|
||||||
|
outEntry->size = 0;
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
outEntry->pos = entry->pos;
|
||||||
|
outEntry->timestamp = entry->timestamp;
|
||||||
|
outEntry->isKeyframe = (entry->flags & AVINDEX_KEYFRAME) != 0;
|
||||||
|
outEntry->size = entry->size;
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
- (bool)codecParamsAtStreamIndex:(int32_t)streamIndex toContext:(FFMpegAVCodecContext *)context {
|
- (bool)codecParamsAtStreamIndex:(int32_t)streamIndex toContext:(FFMpegAVCodecContext *)context {
|
||||||
int result = avcodec_parameters_to_context((AVCodecContext *)[context impl], _impl->streams[streamIndex]->codecpar);
|
int result = avcodec_parameters_to_context((AVCodecContext *)[context impl], _impl->streams[streamIndex]->codecpar);
|
||||||
return result >= 0;
|
return result >= 0;
|
||||||
|
@ -53,6 +53,10 @@
|
|||||||
return (int32_t)_impl->size;
|
return (int32_t)_impl->size;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
- (bool)isKeyframe {
|
||||||
|
return (_impl->flags & AV_PKT_FLAG_KEY) != 0;
|
||||||
|
}
|
||||||
|
|
||||||
- (uint8_t *)data {
|
- (uint8_t *)data {
|
||||||
return _impl->data;
|
return _impl->data;
|
||||||
}
|
}
|
||||||
|
@ -10,7 +10,7 @@ public func convertOpusToAAC(sourcePath: String, allocateTempFile: @escaping ()
|
|||||||
|
|
||||||
queue.async {
|
queue.async {
|
||||||
do {
|
do {
|
||||||
let audioSource = SoftwareAudioSource(path: sourcePath)
|
let audioSource = SoftwareAudioSource(path: sourcePath, focusedPart: nil)
|
||||||
|
|
||||||
let outputPath = allocateTempFile()
|
let outputPath = allocateTempFile()
|
||||||
|
|
||||||
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,486 @@
|
|||||||
|
import Foundation
|
||||||
|
import UIKit
|
||||||
|
import SwiftSignalKit
|
||||||
|
import Postbox
|
||||||
|
import TelegramCore
|
||||||
|
import FFMpegBinding
|
||||||
|
import RangeSet
|
||||||
|
|
||||||
|
private final class FFMpegMediaFrameExtractContext {
|
||||||
|
let fd: Int32
|
||||||
|
var readPosition: Int = 0
|
||||||
|
let size: Int
|
||||||
|
|
||||||
|
var accessedRanges = RangeSet<Int>()
|
||||||
|
var maskRanges: RangeSet<Int>?
|
||||||
|
var recordAccessedRanges = false
|
||||||
|
|
||||||
|
init(fd: Int32, size: Int) {
|
||||||
|
self.fd = fd
|
||||||
|
self.size = size
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private func FFMpegMediaFrameExtractContextReadPacketCallback(userData: UnsafeMutableRawPointer?, buffer: UnsafeMutablePointer<UInt8>?, bufferSize: Int32) -> Int32 {
|
||||||
|
let context = Unmanaged<FFMpegMediaFrameExtractContext>.fromOpaque(userData!).takeUnretainedValue()
|
||||||
|
if context.recordAccessedRanges {
|
||||||
|
context.accessedRanges.insert(contentsOf: context.readPosition ..< (context.readPosition + Int(bufferSize)))
|
||||||
|
}
|
||||||
|
|
||||||
|
let result: Int
|
||||||
|
if let maskRanges = context.maskRanges {
|
||||||
|
let readRange = context.readPosition ..< (context.readPosition + Int(bufferSize))
|
||||||
|
let _ = maskRanges
|
||||||
|
let _ = readRange
|
||||||
|
result = read(context.fd, buffer, Int(bufferSize))
|
||||||
|
} else {
|
||||||
|
result = read(context.fd, buffer, Int(bufferSize))
|
||||||
|
}
|
||||||
|
context.readPosition += Int(bufferSize)
|
||||||
|
if result == 0 {
|
||||||
|
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||||
|
}
|
||||||
|
return Int32(result)
|
||||||
|
}
|
||||||
|
|
||||||
|
private func FFMpegMediaFrameExtractContextSeekCallback(userData: UnsafeMutableRawPointer?, offset: Int64, whence: Int32) -> Int64 {
|
||||||
|
let context = Unmanaged<FFMpegMediaFrameExtractContext>.fromOpaque(userData!).takeUnretainedValue()
|
||||||
|
if (whence & FFMPEG_AVSEEK_SIZE) != 0 {
|
||||||
|
return Int64(context.size)
|
||||||
|
} else {
|
||||||
|
context.readPosition = Int(offset)
|
||||||
|
lseek(context.fd, off_t(offset), SEEK_SET)
|
||||||
|
return offset
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private struct FFMpegFrameSegment {
|
||||||
|
struct Stream {
|
||||||
|
let index: Int
|
||||||
|
let startPts: CMTime
|
||||||
|
let startPosition: Int64
|
||||||
|
var endPts: CMTime
|
||||||
|
var endPosition: Int64
|
||||||
|
var duration: Double
|
||||||
|
}
|
||||||
|
|
||||||
|
var audio: Stream?
|
||||||
|
var video: Stream?
|
||||||
|
|
||||||
|
init() {
|
||||||
|
}
|
||||||
|
|
||||||
|
mutating func addFrame(isVideo: Bool, index: Int, pts: CMTime, duration: Double, position: Int64, size: Int64) {
|
||||||
|
if var stream = isVideo ? self.video : self.audio {
|
||||||
|
stream.endPts = pts
|
||||||
|
stream.duration += duration
|
||||||
|
stream.endPosition = max(stream.endPosition, position + size)
|
||||||
|
if isVideo {
|
||||||
|
self.video = stream
|
||||||
|
} else {
|
||||||
|
self.audio = stream
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
let stream = Stream(index: index, startPts: pts, startPosition: position, endPts: pts, endPosition: position + size, duration: duration)
|
||||||
|
if isVideo {
|
||||||
|
self.video = stream
|
||||||
|
} else {
|
||||||
|
self.audio = stream
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private final class FFMpegFrameSegmentInfo {
|
||||||
|
let headerAccessRanges: RangeSet<Int>
|
||||||
|
let segments: [FFMpegFrameSegment]
|
||||||
|
|
||||||
|
init(headerAccessRanges: RangeSet<Int>, segments: [FFMpegFrameSegment]) {
|
||||||
|
self.headerAccessRanges = headerAccessRanges
|
||||||
|
self.segments = segments
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private func extractFFMpegFrameSegmentInfo(path: String) -> FFMpegFrameSegmentInfo? {
|
||||||
|
let _ = FFMpegMediaFrameSourceContextHelpers.registerFFMpegGlobals
|
||||||
|
|
||||||
|
var s = stat()
|
||||||
|
stat(path, &s)
|
||||||
|
let size = Int32(s.st_size)
|
||||||
|
|
||||||
|
let fd = open(path, O_RDONLY, S_IRUSR)
|
||||||
|
if fd < 0 {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
defer {
|
||||||
|
close(fd)
|
||||||
|
}
|
||||||
|
|
||||||
|
let avFormatContext = FFMpegAVFormatContext()
|
||||||
|
let ioBufferSize = 32 * 1024
|
||||||
|
|
||||||
|
let context = FFMpegMediaFrameExtractContext(fd: fd, size: Int(size))
|
||||||
|
context.recordAccessedRanges = true
|
||||||
|
|
||||||
|
guard let avIoContext = FFMpegAVIOContext(bufferSize: Int32(ioBufferSize), opaqueContext: Unmanaged.passUnretained(context).toOpaque(), readPacket: FFMpegMediaFrameExtractContextReadPacketCallback, writePacket: nil, seek: FFMpegMediaFrameExtractContextSeekCallback, isSeekable: true) else {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
avFormatContext.setIO(avIoContext)
|
||||||
|
|
||||||
|
if !avFormatContext.openInput(withDirectFilePath: nil) {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if !avFormatContext.findStreamInfo() {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
var audioStream: FFMpegMediaInfo.Info?
|
||||||
|
var videoStream: FFMpegMediaInfo.Info?
|
||||||
|
|
||||||
|
for typeIndex in 0 ..< 2 {
|
||||||
|
let isVideo = typeIndex == 0
|
||||||
|
|
||||||
|
for streamIndexNumber in avFormatContext.streamIndices(for: isVideo ? FFMpegAVFormatStreamTypeVideo : FFMpegAVFormatStreamTypeAudio) {
|
||||||
|
let streamIndex = streamIndexNumber.int32Value
|
||||||
|
if avFormatContext.isAttachedPic(atStreamIndex: streamIndex) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
let fpsAndTimebase = avFormatContext.fpsAndTimebase(forStreamIndex: streamIndex, defaultTimeBase: CMTimeMake(value: 1, timescale: 40000))
|
||||||
|
let (fps, timebase) = (fpsAndTimebase.fps, fpsAndTimebase.timebase)
|
||||||
|
|
||||||
|
let startTime: CMTime
|
||||||
|
let rawStartTime = avFormatContext.startTime(atStreamIndex: streamIndex)
|
||||||
|
if rawStartTime == Int64(bitPattern: 0x8000000000000000 as UInt64) {
|
||||||
|
startTime = CMTime(value: 0, timescale: timebase.timescale)
|
||||||
|
} else {
|
||||||
|
startTime = CMTimeMake(value: rawStartTime, timescale: timebase.timescale)
|
||||||
|
}
|
||||||
|
var duration = CMTimeMake(value: avFormatContext.duration(atStreamIndex: streamIndex), timescale: timebase.timescale)
|
||||||
|
duration = CMTimeMaximum(CMTime(value: 0, timescale: duration.timescale), CMTimeSubtract(duration, startTime))
|
||||||
|
|
||||||
|
var codecName: String?
|
||||||
|
let codecId = avFormatContext.codecId(atStreamIndex: streamIndex)
|
||||||
|
if codecId == FFMpegCodecIdMPEG4 {
|
||||||
|
codecName = "mpeg4"
|
||||||
|
} else if codecId == FFMpegCodecIdH264 {
|
||||||
|
codecName = "h264"
|
||||||
|
} else if codecId == FFMpegCodecIdHEVC {
|
||||||
|
codecName = "hevc"
|
||||||
|
} else if codecId == FFMpegCodecIdAV1 {
|
||||||
|
codecName = "av1"
|
||||||
|
} else if codecId == FFMpegCodecIdVP9 {
|
||||||
|
codecName = "vp9"
|
||||||
|
} else if codecId == FFMpegCodecIdVP8 {
|
||||||
|
codecName = "vp8"
|
||||||
|
}
|
||||||
|
|
||||||
|
let info = FFMpegMediaInfo.Info(
|
||||||
|
index: Int(streamIndex),
|
||||||
|
timescale: timebase.timescale,
|
||||||
|
startTime: startTime,
|
||||||
|
duration: duration,
|
||||||
|
fps: fps,
|
||||||
|
codecName: codecName
|
||||||
|
)
|
||||||
|
|
||||||
|
if isVideo {
|
||||||
|
videoStream = info
|
||||||
|
} else {
|
||||||
|
audioStream = info
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var segments: [FFMpegFrameSegment] = []
|
||||||
|
let maxSegmentDuration: Double = 5.0
|
||||||
|
|
||||||
|
if let videoStream {
|
||||||
|
let indexEntryCount = avFormatContext.numberOfIndexEntries(atStreamIndex: Int32(videoStream.index))
|
||||||
|
|
||||||
|
if indexEntryCount > 0 {
|
||||||
|
let frameDuration = 1.0 / videoStream.fps.seconds
|
||||||
|
|
||||||
|
var indexEntry = FFMpegAVIndexEntry()
|
||||||
|
for i in 0 ..< indexEntryCount {
|
||||||
|
if !avFormatContext.fillIndexEntry(atStreamIndex: Int32(videoStream.index), entryIndex: Int32(i), outEntry: &indexEntry) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
let packetPts = CMTime(value: indexEntry.timestamp, timescale: videoStream.timescale)
|
||||||
|
//print("index: \(packetPts.seconds), isKeyframe: \(indexEntry.isKeyframe), position: \(indexEntry.pos), size: \(indexEntry.size)")
|
||||||
|
|
||||||
|
var startNewSegment = segments.isEmpty
|
||||||
|
if indexEntry.isKeyframe {
|
||||||
|
if segments.isEmpty {
|
||||||
|
startNewSegment = true
|
||||||
|
} else if let video = segments[segments.count - 1].video {
|
||||||
|
if packetPts.seconds - video.startPts.seconds > maxSegmentDuration {
|
||||||
|
startNewSegment = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if startNewSegment {
|
||||||
|
segments.append(FFMpegFrameSegment())
|
||||||
|
}
|
||||||
|
segments[segments.count - 1].addFrame(isVideo: true, index: videoStream.index, pts: packetPts, duration: frameDuration, position: indexEntry.pos, size: Int64(indexEntry.size))
|
||||||
|
}
|
||||||
|
if !segments.isEmpty, let video = segments[segments.count - 1].video {
|
||||||
|
if video.endPts.seconds + 1.0 / videoStream.fps.seconds + 0.001 < videoStream.duration.seconds {
|
||||||
|
segments[segments.count - 1].video?.duration = videoStream.duration.seconds - video.startPts.seconds
|
||||||
|
segments[segments.count - 1].video?.endPts = videoStream.duration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let audioStream {
|
||||||
|
let indexEntryCount = avFormatContext.numberOfIndexEntries(atStreamIndex: Int32(audioStream.index))
|
||||||
|
if indexEntryCount > 0 {
|
||||||
|
var minSegmentIndex = 0
|
||||||
|
var minSegmentStartTime: Double = -100000.0
|
||||||
|
|
||||||
|
let frameDuration = 1.0 / audioStream.fps.seconds
|
||||||
|
|
||||||
|
var indexEntry = FFMpegAVIndexEntry()
|
||||||
|
for i in 0 ..< indexEntryCount {
|
||||||
|
if !avFormatContext.fillIndexEntry(atStreamIndex: Int32(audioStream.index), entryIndex: Int32(i), outEntry: &indexEntry) {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
let packetPts = CMTime(value: indexEntry.timestamp, timescale: audioStream.timescale)
|
||||||
|
//print("index: \(packetPts.value), timestamp: \(packetPts.seconds), isKeyframe: \(indexEntry.isKeyframe), position: \(indexEntry.pos), size: \(indexEntry.size)")
|
||||||
|
|
||||||
|
if videoStream != nil {
|
||||||
|
for i in minSegmentIndex ..< segments.count {
|
||||||
|
if let video = segments[i].video {
|
||||||
|
if minSegmentStartTime <= packetPts.seconds && video.endPts.seconds >= packetPts.seconds {
|
||||||
|
segments[i].addFrame(isVideo: false, index: audioStream.index, pts: packetPts, duration: frameDuration, position: indexEntry.pos, size: Int64(indexEntry.size))
|
||||||
|
if minSegmentIndex != i {
|
||||||
|
minSegmentIndex = i
|
||||||
|
minSegmentStartTime = video.startPts.seconds
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if segments.isEmpty {
|
||||||
|
segments.append(FFMpegFrameSegment())
|
||||||
|
}
|
||||||
|
segments[segments.count - 1].addFrame(isVideo: false, index: audioStream.index, pts: packetPts, duration: frameDuration, position: indexEntry.pos, size: Int64(indexEntry.size))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if !segments.isEmpty, let audio = segments[segments.count - 1].audio {
|
||||||
|
if audio.endPts.seconds + 0.001 < audioStream.duration.seconds {
|
||||||
|
segments[segments.count - 1].audio?.duration = audioStream.duration.seconds - audio.startPts.seconds
|
||||||
|
segments[segments.count - 1].audio?.endPts = audioStream.duration
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let headerAccessRanges = context.accessedRanges
|
||||||
|
|
||||||
|
for i in 1 ..< segments.count {
|
||||||
|
let segment = segments[i]
|
||||||
|
|
||||||
|
if let video = segment.video {
|
||||||
|
context.maskRanges = headerAccessRanges
|
||||||
|
context.maskRanges?.insert(contentsOf: Int(video.startPosition) ..< Int(video.endPosition))
|
||||||
|
|
||||||
|
context.accessedRanges = RangeSet()
|
||||||
|
context.recordAccessedRanges = true
|
||||||
|
|
||||||
|
avFormatContext.seekFrame(forStreamIndex: Int32(video.index), byteOffset: video.startPosition)
|
||||||
|
|
||||||
|
let packet = FFMpegPacket()
|
||||||
|
while true {
|
||||||
|
if !avFormatContext.readFrame(into: packet) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if Int(packet.streamIndex) == video.index {
|
||||||
|
let packetPts = CMTime(value: packet.pts, timescale: video.startPts.timescale)
|
||||||
|
if packetPts.value >= video.endPts.value {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
print("Segment \(i): \(video.startPosition) ..< \(video.endPosition) accessed \(context.accessedRanges.ranges)")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
/*{
|
||||||
|
if let videoStream {
|
||||||
|
avFormatContext.seekFrame(forStreamIndex: Int32(videoStream.index), pts: 0, positionOnKeyframe: true)
|
||||||
|
|
||||||
|
let packet = FFMpegPacket()
|
||||||
|
while true {
|
||||||
|
if !avFormatContext.readFrame(into: packet) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if Int(packet.streamIndex) == videoStream.index {
|
||||||
|
let packetPts = CMTime(value: packet.pts, timescale: videoStream.timescale)
|
||||||
|
let packetDuration = CMTime(value: packet.duration, timescale: videoStream.timescale)
|
||||||
|
|
||||||
|
var startNewSegment = segments.isEmpty
|
||||||
|
if packet.isKeyframe {
|
||||||
|
if segments.isEmpty {
|
||||||
|
startNewSegment = true
|
||||||
|
} else if let video = segments[segments.count - 1].video {
|
||||||
|
if packetPts.seconds - video.startPts.seconds > maxSegmentDuration {
|
||||||
|
startNewSegment = true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if startNewSegment {
|
||||||
|
segments.append(FFMpegFrameSegment())
|
||||||
|
}
|
||||||
|
segments[segments.count - 1].addFrame(isVideo: true, index: Int(packet.streamIndex), pts: packetPts, duration: packetDuration.seconds)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if let audioStream {
|
||||||
|
avFormatContext.seekFrame(forStreamIndex: Int32(audioStream.index), pts: 0, positionOnKeyframe: true)
|
||||||
|
|
||||||
|
var minSegmentIndex = 0
|
||||||
|
|
||||||
|
let packet = FFMpegPacket()
|
||||||
|
while true {
|
||||||
|
if !avFormatContext.readFrame(into: packet) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if Int(packet.streamIndex) == audioStream.index {
|
||||||
|
let packetPts = CMTime(value: packet.pts, timescale: audioStream.timescale)
|
||||||
|
let packetDuration = CMTime(value: packet.duration, timescale: audioStream.timescale)
|
||||||
|
|
||||||
|
if videoStream != nil {
|
||||||
|
for i in minSegmentIndex ..< segments.count {
|
||||||
|
if let video = segments[i].video {
|
||||||
|
if video.startPts.seconds <= packetPts.seconds && video.endPts.seconds >= packetPts.seconds {
|
||||||
|
segments[i].addFrame(isVideo: false, index: Int(audioStream.index), pts: packetPts, duration: packetDuration.seconds)
|
||||||
|
minSegmentIndex = i
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
if segments.isEmpty {
|
||||||
|
segments.append(FFMpegFrameSegment())
|
||||||
|
}
|
||||||
|
segments[segments.count - 1].addFrame(isVideo: false, index: Int(packet.streamIndex), pts: packetPts, duration: packetDuration.seconds)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}*/
|
||||||
|
|
||||||
|
/*for i in 0 ..< segments.count {
|
||||||
|
print("Segment \(i):\n video \(segments[i].video?.startPts.seconds ?? -1.0) ... \(segments[i].video?.endPts.seconds ?? -1.0)\n audio \(segments[i].audio?.startPts.seconds ?? -1.0) ... \(segments[i].audio?.endPts.seconds ?? -1.0)")
|
||||||
|
}*/
|
||||||
|
|
||||||
|
return FFMpegFrameSegmentInfo(
|
||||||
|
headerAccessRanges: context.accessedRanges,
|
||||||
|
segments: segments
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
final class ChunkMediaPlayerDirectFetchSourceImpl: ChunkMediaPlayerSourceImpl {
|
||||||
|
private let resource: ChunkMediaPlayerV2.SourceDescription.ResourceDescription
|
||||||
|
|
||||||
|
private let partsStateValue = Promise<ChunkMediaPlayerPartsState>()
|
||||||
|
var partsState: Signal<ChunkMediaPlayerPartsState, NoError> {
|
||||||
|
return self.partsStateValue.get()
|
||||||
|
}
|
||||||
|
|
||||||
|
private var completeFetchDisposable: Disposable?
|
||||||
|
private var dataDisposable: Disposable?
|
||||||
|
|
||||||
|
init(resource: ChunkMediaPlayerV2.SourceDescription.ResourceDescription) {
|
||||||
|
self.resource = resource
|
||||||
|
|
||||||
|
if resource.fetchAutomatically {
|
||||||
|
self.completeFetchDisposable = fetchedMediaResource(
|
||||||
|
mediaBox: resource.postbox.mediaBox,
|
||||||
|
userLocation: resource.userLocation,
|
||||||
|
userContentType: resource.userContentType,
|
||||||
|
reference: resource.reference,
|
||||||
|
statsCategory: resource.statsCategory,
|
||||||
|
preferBackgroundReferenceRevalidation: true
|
||||||
|
).startStrict()
|
||||||
|
}
|
||||||
|
|
||||||
|
self.dataDisposable = (resource.postbox.mediaBox.resourceData(resource.reference.resource)
|
||||||
|
|> deliverOnMainQueue).startStrict(next: { [weak self] data in
|
||||||
|
guard let self else {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if data.complete {
|
||||||
|
if let mediaInfo = extractFFMpegMediaInfo(path: data.path), let mainTrack = mediaInfo.audio ?? mediaInfo.video, let segmentInfo = extractFFMpegFrameSegmentInfo(path: data.path) {
|
||||||
|
var parts: [ChunkMediaPlayerPart] = []
|
||||||
|
for segment in segmentInfo.segments {
|
||||||
|
guard let mainStream = segment.video ?? segment.audio else {
|
||||||
|
assertionFailure()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
parts.append(ChunkMediaPlayerPart(
|
||||||
|
startTime: mainStream.startPts.seconds,
|
||||||
|
endTime: mainStream.startPts.seconds + mainStream.duration,
|
||||||
|
content: .directFile(ChunkMediaPlayerPart.Content.FFMpegDirectFile(
|
||||||
|
path: data.path,
|
||||||
|
audio: segment.audio.flatMap { stream in
|
||||||
|
return ChunkMediaPlayerPart.DirectStream(
|
||||||
|
index: stream.index,
|
||||||
|
startPts: stream.startPts,
|
||||||
|
endPts: stream.endPts,
|
||||||
|
duration: stream.duration
|
||||||
|
)
|
||||||
|
},
|
||||||
|
video: segment.video.flatMap { stream in
|
||||||
|
return ChunkMediaPlayerPart.DirectStream(
|
||||||
|
index: stream.index,
|
||||||
|
startPts: stream.startPts,
|
||||||
|
endPts: stream.endPts,
|
||||||
|
duration: stream.duration
|
||||||
|
)
|
||||||
|
}
|
||||||
|
)),
|
||||||
|
codecName: mediaInfo.video?.codecName
|
||||||
|
))
|
||||||
|
}
|
||||||
|
|
||||||
|
self.partsStateValue.set(.single(ChunkMediaPlayerPartsState(
|
||||||
|
duration: mainTrack.duration.seconds,
|
||||||
|
parts: parts
|
||||||
|
)))
|
||||||
|
} else {
|
||||||
|
self.partsStateValue.set(.single(ChunkMediaPlayerPartsState(
|
||||||
|
duration: nil,
|
||||||
|
parts: []
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
self.partsStateValue.set(.single(ChunkMediaPlayerPartsState(
|
||||||
|
duration: nil,
|
||||||
|
parts: []
|
||||||
|
)))
|
||||||
|
}
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
deinit {
|
||||||
|
self.completeFetchDisposable?.dispose()
|
||||||
|
self.dataDisposable?.dispose()
|
||||||
|
}
|
||||||
|
|
||||||
|
func updatePlaybackState(position: Double, isPlaying: Bool) {
|
||||||
|
|
||||||
|
}
|
||||||
|
}
|
@ -11,11 +11,51 @@ public let internal_isHardwareAv1Supported: Bool = {
|
|||||||
return value
|
return value
|
||||||
}()
|
}()
|
||||||
|
|
||||||
|
protocol ChunkMediaPlayerSourceImpl: AnyObject {
|
||||||
|
var partsState: Signal<ChunkMediaPlayerPartsState, NoError> { get }
|
||||||
|
|
||||||
|
func updatePlaybackState(position: Double, isPlaying: Bool)
|
||||||
|
}
|
||||||
|
|
||||||
|
private final class ChunkMediaPlayerExternalSourceImpl: ChunkMediaPlayerSourceImpl {
|
||||||
|
let partsState: Signal<ChunkMediaPlayerPartsState, NoError>
|
||||||
|
|
||||||
|
init(partsState: Signal<ChunkMediaPlayerPartsState, NoError>) {
|
||||||
|
self.partsState = partsState
|
||||||
|
}
|
||||||
|
|
||||||
|
func updatePlaybackState(position: Double, isPlaying: Bool) {
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
||||||
|
public enum SourceDescription {
|
||||||
|
public final class ResourceDescription {
|
||||||
|
public let postbox: Postbox
|
||||||
|
public let reference: MediaResourceReference
|
||||||
|
public let userLocation: MediaResourceUserLocation
|
||||||
|
public let userContentType: MediaResourceUserContentType
|
||||||
|
public let statsCategory: MediaResourceStatsCategory
|
||||||
|
public let fetchAutomatically: Bool
|
||||||
|
|
||||||
|
public init(postbox: Postbox, reference: MediaResourceReference, userLocation: MediaResourceUserLocation, userContentType: MediaResourceUserContentType, statsCategory: MediaResourceStatsCategory, fetchAutomatically: Bool) {
|
||||||
|
self.postbox = postbox
|
||||||
|
self.reference = reference
|
||||||
|
self.userLocation = userLocation
|
||||||
|
self.userContentType = userContentType
|
||||||
|
self.statsCategory = statsCategory
|
||||||
|
self.fetchAutomatically = fetchAutomatically
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
case externalParts(Signal<ChunkMediaPlayerPartsState, NoError>)
|
||||||
|
case directFetch(ResourceDescription)
|
||||||
|
}
|
||||||
|
|
||||||
private final class LoadedPart {
|
private final class LoadedPart {
|
||||||
final class Media {
|
final class Media {
|
||||||
let queue: Queue
|
let queue: Queue
|
||||||
let tempFile: TempBoxFile
|
let content: ChunkMediaPlayerPart.Content
|
||||||
let mediaType: AVMediaType
|
let mediaType: AVMediaType
|
||||||
let codecName: String?
|
let codecName: String?
|
||||||
|
|
||||||
@ -24,11 +64,11 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
var didBeginReading: Bool = false
|
var didBeginReading: Bool = false
|
||||||
var isFinished: Bool = false
|
var isFinished: Bool = false
|
||||||
|
|
||||||
init(queue: Queue, tempFile: TempBoxFile, mediaType: AVMediaType, codecName: String?) {
|
init(queue: Queue, content: ChunkMediaPlayerPart.Content, mediaType: AVMediaType, codecName: String?) {
|
||||||
assert(queue.isCurrent())
|
assert(queue.isCurrent())
|
||||||
|
|
||||||
self.queue = queue
|
self.queue = queue
|
||||||
self.tempFile = tempFile
|
self.content = content
|
||||||
self.mediaType = mediaType
|
self.mediaType = mediaType
|
||||||
self.codecName = codecName
|
self.codecName = codecName
|
||||||
}
|
}
|
||||||
@ -39,10 +79,10 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
|
|
||||||
func load() {
|
func load() {
|
||||||
let reader: MediaDataReader
|
let reader: MediaDataReader
|
||||||
if self.mediaType == .video && (self.codecName == "av1" || self.codecName == "av01") && internal_isHardwareAv1Supported {
|
if case let .tempFile(tempFile) = self.content, self.mediaType == .video, (self.codecName == "av1" || self.codecName == "av01"), internal_isHardwareAv1Supported {
|
||||||
reader = AVAssetVideoDataReader(filePath: self.tempFile.path, isVideo: self.mediaType == .video)
|
reader = AVAssetVideoDataReader(filePath: tempFile.file.path, isVideo: self.mediaType == .video)
|
||||||
} else {
|
} else {
|
||||||
reader = FFMpegMediaDataReader(filePath: self.tempFile.path, isVideo: self.mediaType == .video, codecName: self.codecName)
|
reader = FFMpegMediaDataReader(content: self.content, isVideo: self.mediaType == .video, codecName: self.codecName)
|
||||||
}
|
}
|
||||||
if self.mediaType == .video {
|
if self.mediaType == .video {
|
||||||
if reader.hasVideo {
|
if reader.hasVideo {
|
||||||
@ -91,12 +131,10 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
private let renderSynchronizer: AVSampleBufferRenderSynchronizer
|
private let renderSynchronizer: AVSampleBufferRenderSynchronizer
|
||||||
private var videoRenderer: AVSampleBufferDisplayLayer
|
private var videoRenderer: AVSampleBufferDisplayLayer
|
||||||
private var audioRenderer: AVSampleBufferAudioRenderer?
|
private var audioRenderer: AVSampleBufferAudioRenderer?
|
||||||
private weak var videoNode: MediaPlayerNode?
|
|
||||||
|
|
||||||
private var partsState = ChunkMediaPlayerPartsState(duration: nil, parts: [])
|
private var partsState = ChunkMediaPlayerPartsState(duration: nil, parts: [])
|
||||||
private var loadedParts: [LoadedPart] = []
|
private var loadedParts: [LoadedPart] = []
|
||||||
private var loadedPartsMediaData: QueueLocalObject<LoadedPartsMediaData>
|
private var loadedPartsMediaData: QueueLocalObject<LoadedPartsMediaData>
|
||||||
private var reportedDidEnqueueVideo: Bool = false
|
|
||||||
private var hasSound: Bool = false
|
private var hasSound: Bool = false
|
||||||
|
|
||||||
private var statusValue: MediaPlayerStatus? {
|
private var statusValue: MediaPlayerStatus? {
|
||||||
@ -115,7 +153,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
return .never()
|
return .never()
|
||||||
}
|
}
|
||||||
|
|
||||||
public var actionAtEnd: ChunkMediaPlayerActionAtEnd = .stop
|
public var actionAtEnd: MediaPlayerActionAtEnd = .stop
|
||||||
|
|
||||||
private var isPlaying: Bool = false
|
private var isPlaying: Bool = false
|
||||||
private var baseRate: Double = 1.0
|
private var baseRate: Double = 1.0
|
||||||
@ -132,6 +170,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
private var videoIsRequestingMediaData: Bool = false
|
private var videoIsRequestingMediaData: Bool = false
|
||||||
private var audioIsRequestingMediaData: Bool = false
|
private var audioIsRequestingMediaData: Bool = false
|
||||||
|
|
||||||
|
private let source: ChunkMediaPlayerSourceImpl
|
||||||
private var partsStateDisposable: Disposable?
|
private var partsStateDisposable: Disposable?
|
||||||
private var updateTimer: Foundation.Timer?
|
private var updateTimer: Foundation.Timer?
|
||||||
|
|
||||||
@ -140,7 +179,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
|
|
||||||
public init(
|
public init(
|
||||||
audioSessionManager: ManagedAudioSession,
|
audioSessionManager: ManagedAudioSession,
|
||||||
partsState: Signal<ChunkMediaPlayerPartsState, NoError>,
|
source: SourceDescription,
|
||||||
video: Bool,
|
video: Bool,
|
||||||
playAutomatically: Bool = false,
|
playAutomatically: Bool = false,
|
||||||
enableSound: Bool,
|
enableSound: Bool,
|
||||||
@ -175,7 +214,13 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
assertionFailure()
|
assertionFailure()
|
||||||
}
|
}
|
||||||
self.videoRenderer = playerNode.videoLayer ?? AVSampleBufferDisplayLayer()
|
self.videoRenderer = playerNode.videoLayer ?? AVSampleBufferDisplayLayer()
|
||||||
self.videoNode = playerNode
|
|
||||||
|
switch source {
|
||||||
|
case let .externalParts(partsState):
|
||||||
|
self.source = ChunkMediaPlayerExternalSourceImpl(partsState: partsState)
|
||||||
|
case let .directFetch(resource):
|
||||||
|
self.source = ChunkMediaPlayerDirectFetchSourceImpl(resource: resource)
|
||||||
|
}
|
||||||
|
|
||||||
self.updateTimer = Foundation.Timer.scheduledTimer(withTimeInterval: 1.0 / 60.0, repeats: true, block: { [weak self] _ in
|
self.updateTimer = Foundation.Timer.scheduledTimer(withTimeInterval: 1.0 / 60.0, repeats: true, block: { [weak self] _ in
|
||||||
guard let self else {
|
guard let self else {
|
||||||
@ -184,7 +229,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
self.updateInternalState()
|
self.updateInternalState()
|
||||||
})
|
})
|
||||||
|
|
||||||
self.partsStateDisposable = (partsState
|
self.partsStateDisposable = (self.source.partsState
|
||||||
|> deliverOnMainQueue).startStrict(next: { [weak self] partsState in
|
|> deliverOnMainQueue).startStrict(next: { [weak self] partsState in
|
||||||
guard let self else {
|
guard let self else {
|
||||||
return
|
return
|
||||||
@ -291,6 +336,11 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
}
|
}
|
||||||
let timestampSeconds = timestamp.seconds
|
let timestampSeconds = timestamp.seconds
|
||||||
|
|
||||||
|
self.source.updatePlaybackState(
|
||||||
|
position: timestampSeconds,
|
||||||
|
isPlaying: self.isPlaying
|
||||||
|
)
|
||||||
|
|
||||||
var duration: Double = 0.0
|
var duration: Double = 0.0
|
||||||
if let partsStateDuration = self.partsState.duration {
|
if let partsStateDuration = self.partsState.duration {
|
||||||
duration = partsStateDuration
|
duration = partsStateDuration
|
||||||
@ -318,7 +368,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
startTime: part.startTime,
|
startTime: part.startTime,
|
||||||
clippedStartTime: partStartTime == part.startTime ? nil : partStartTime,
|
clippedStartTime: partStartTime == part.startTime ? nil : partStartTime,
|
||||||
endTime: part.endTime,
|
endTime: part.endTime,
|
||||||
file: part.file,
|
content: part.content,
|
||||||
codecName: part.codecName
|
codecName: part.codecName
|
||||||
))
|
))
|
||||||
minStartTime = max(minStartTime, partEndTime)
|
minStartTime = max(minStartTime, partEndTime)
|
||||||
@ -340,7 +390,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
startTime: part.startTime,
|
startTime: part.startTime,
|
||||||
clippedStartTime: partStartTime == part.startTime ? nil : partStartTime,
|
clippedStartTime: partStartTime == part.startTime ? nil : partStartTime,
|
||||||
endTime: part.endTime,
|
endTime: part.endTime,
|
||||||
file: part.file,
|
content: part.content,
|
||||||
codecName: part.codecName
|
codecName: part.codecName
|
||||||
))
|
))
|
||||||
minStartTime = max(minStartTime, partEndTime)
|
minStartTime = max(minStartTime, partEndTime)
|
||||||
@ -385,7 +435,12 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
for part in loadedParts {
|
for part in loadedParts {
|
||||||
if let loadedPart = loadedPartsMediaData.parts[part.part.id] {
|
if let loadedPart = loadedPartsMediaData.parts[part.part.id] {
|
||||||
if let audio = loadedPart.audio, audio.didBeginReading, !isSoundEnabled {
|
if let audio = loadedPart.audio, audio.didBeginReading, !isSoundEnabled {
|
||||||
let cleanAudio = LoadedPart.Media(queue: dataQueue, tempFile: part.part.file, mediaType: .audio, codecName: part.part.codecName)
|
let cleanAudio = LoadedPart.Media(
|
||||||
|
queue: dataQueue,
|
||||||
|
content: part.part.content,
|
||||||
|
mediaType: .audio,
|
||||||
|
codecName: part.part.codecName
|
||||||
|
)
|
||||||
cleanAudio.load()
|
cleanAudio.load()
|
||||||
|
|
||||||
loadedPartsMediaData.parts[part.part.id] = LoadedPart.MediaData(
|
loadedPartsMediaData.parts[part.part.id] = LoadedPart.MediaData(
|
||||||
@ -395,10 +450,20 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
)
|
)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
let video = LoadedPart.Media(queue: dataQueue, tempFile: part.part.file, mediaType: .video, codecName: part.part.codecName)
|
let video = LoadedPart.Media(
|
||||||
|
queue: dataQueue,
|
||||||
|
content: part.part.content,
|
||||||
|
mediaType: .video,
|
||||||
|
codecName: part.part.codecName
|
||||||
|
)
|
||||||
video.load()
|
video.load()
|
||||||
|
|
||||||
let audio = LoadedPart.Media(queue: dataQueue, tempFile: part.part.file, mediaType: .audio, codecName: part.part.codecName)
|
let audio = LoadedPart.Media(
|
||||||
|
queue: dataQueue,
|
||||||
|
content: part.part.content,
|
||||||
|
mediaType: .audio,
|
||||||
|
codecName: part.part.codecName
|
||||||
|
)
|
||||||
audio.load()
|
audio.load()
|
||||||
|
|
||||||
loadedPartsMediaData.parts[part.part.id] = LoadedPart.MediaData(
|
loadedPartsMediaData.parts[part.part.id] = LoadedPart.MediaData(
|
||||||
@ -680,8 +745,8 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
|
|
||||||
videoTarget.requestMediaDataWhenReady(on: self.dataQueue.queue, using: { [weak self] in
|
videoTarget.requestMediaDataWhenReady(on: self.dataQueue.queue, using: { [weak self] in
|
||||||
if let loadedPartsMediaData = loadedPartsMediaData.unsafeGet() {
|
if let loadedPartsMediaData = loadedPartsMediaData.unsafeGet() {
|
||||||
let fillResult = ChunkMediaPlayerV2.fillRendererBuffer(bufferTarget: videoTarget, loadedPartsMediaData: loadedPartsMediaData, isVideo: true)
|
let bufferIsReadyForMoreData = ChunkMediaPlayerV2.fillRendererBuffer(bufferTarget: videoTarget, loadedPartsMediaData: loadedPartsMediaData, isVideo: true)
|
||||||
if fillResult.isReadyForMoreData {
|
if bufferIsReadyForMoreData {
|
||||||
videoTarget.stopRequestingMediaData()
|
videoTarget.stopRequestingMediaData()
|
||||||
Queue.mainQueue().async {
|
Queue.mainQueue().async {
|
||||||
guard let self else {
|
guard let self else {
|
||||||
@ -698,12 +763,11 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
if !self.audioIsRequestingMediaData, let audioRenderer = self.audioRenderer {
|
if !self.audioIsRequestingMediaData, let audioRenderer = self.audioRenderer {
|
||||||
self.audioIsRequestingMediaData = true
|
self.audioIsRequestingMediaData = true
|
||||||
let loadedPartsMediaData = self.loadedPartsMediaData
|
let loadedPartsMediaData = self.loadedPartsMediaData
|
||||||
let reportedDidEnqueueVideo = self.reportedDidEnqueueVideo
|
|
||||||
let audioTarget = audioRenderer
|
let audioTarget = audioRenderer
|
||||||
audioTarget.requestMediaDataWhenReady(on: self.dataQueue.queue, using: { [weak self] in
|
audioTarget.requestMediaDataWhenReady(on: self.dataQueue.queue, using: { [weak self] in
|
||||||
if let loadedPartsMediaData = loadedPartsMediaData.unsafeGet() {
|
if let loadedPartsMediaData = loadedPartsMediaData.unsafeGet() {
|
||||||
let fillResult = ChunkMediaPlayerV2.fillRendererBuffer(bufferTarget: audioTarget, loadedPartsMediaData: loadedPartsMediaData, isVideo: false)
|
let bufferIsReadyForMoreData = ChunkMediaPlayerV2.fillRendererBuffer(bufferTarget: audioTarget, loadedPartsMediaData: loadedPartsMediaData, isVideo: false)
|
||||||
if fillResult.isReadyForMoreData {
|
if bufferIsReadyForMoreData {
|
||||||
audioTarget.stopRequestingMediaData()
|
audioTarget.stopRequestingMediaData()
|
||||||
Queue.mainQueue().async {
|
Queue.mainQueue().async {
|
||||||
guard let self else {
|
guard let self else {
|
||||||
@ -713,28 +777,13 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
self.updateInternalState()
|
self.updateInternalState()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if fillResult.didEnqueue && !reportedDidEnqueueVideo {
|
|
||||||
Queue.mainQueue().async {
|
|
||||||
guard let self else {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
self.reportedDidEnqueueVideo = true
|
|
||||||
if #available(iOS 17.4, *) {
|
|
||||||
} else {
|
|
||||||
if let videoNode = self.videoNode {
|
|
||||||
videoNode.notifyHasSentFramesToDisplay()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
private static func fillRendererBuffer(bufferTarget: AVQueuedSampleBufferRendering, loadedPartsMediaData: LoadedPartsMediaData, isVideo: Bool) -> (isReadyForMoreData: Bool, didEnqueue: Bool) {
|
private static func fillRendererBuffer(bufferTarget: AVQueuedSampleBufferRendering, loadedPartsMediaData: LoadedPartsMediaData, isVideo: Bool) -> Bool {
|
||||||
var bufferIsReadyForMoreData = true
|
var bufferIsReadyForMoreData = true
|
||||||
var didEnqeue = false
|
|
||||||
outer: while true {
|
outer: while true {
|
||||||
if !bufferTarget.isReadyForMoreMediaData {
|
if !bufferTarget.isReadyForMoreMediaData {
|
||||||
bufferIsReadyForMoreData = false
|
bufferIsReadyForMoreData = false
|
||||||
@ -774,7 +823,9 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
continue outer
|
continue outer
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
didEnqeue = true
|
/*if !isVideo {
|
||||||
|
print("Enqueue audio \(CMSampleBufferGetPresentationTimeStamp(sampleBuffer).value) next: \(CMSampleBufferGetPresentationTimeStamp(sampleBuffer).value + 1024)")
|
||||||
|
}*/
|
||||||
bufferTarget.enqueue(sampleBuffer)
|
bufferTarget.enqueue(sampleBuffer)
|
||||||
hasData = true
|
hasData = true
|
||||||
continue outer
|
continue outer
|
||||||
@ -787,7 +838,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return (bufferIsReadyForMoreData, didEnqeue)
|
return bufferIsReadyForMoreData
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -184,6 +184,10 @@ private func readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: Unsa
|
|||||||
}
|
}
|
||||||
fetchedCount = Int32(fetchedData.count)
|
fetchedCount = Int32(fetchedData.count)
|
||||||
context.readingOffset += Int64(fetchedCount)
|
context.readingOffset += Int64(fetchedCount)
|
||||||
|
|
||||||
|
if fetchedCount == 0 {
|
||||||
|
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if context.closed {
|
if context.closed {
|
||||||
|
@ -19,6 +19,7 @@ public protocol MediaDataReader: AnyObject {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public final class FFMpegMediaDataReader: MediaDataReader {
|
public final class FFMpegMediaDataReader: MediaDataReader {
|
||||||
|
private let content: ChunkMediaPlayerPart.Content
|
||||||
private let isVideo: Bool
|
private let isVideo: Bool
|
||||||
private let videoSource: SoftwareVideoReader?
|
private let videoSource: SoftwareVideoReader?
|
||||||
private let audioSource: SoftwareAudioSource?
|
private let audioSource: SoftwareAudioSource?
|
||||||
@ -31,15 +32,42 @@ public final class FFMpegMediaDataReader: MediaDataReader {
|
|||||||
return self.audioSource != nil
|
return self.audioSource != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
public init(filePath: String, isVideo: Bool, codecName: String?) {
|
public init(content: ChunkMediaPlayerPart.Content, isVideo: Bool, codecName: String?) {
|
||||||
|
self.content = content
|
||||||
self.isVideo = isVideo
|
self.isVideo = isVideo
|
||||||
|
|
||||||
|
let filePath: String
|
||||||
|
var focusedPart: MediaStreamFocusedPart?
|
||||||
|
switch content {
|
||||||
|
case let .tempFile(tempFile):
|
||||||
|
filePath = tempFile.file.path
|
||||||
|
case let .directFile(directFile):
|
||||||
|
filePath = directFile.path
|
||||||
|
|
||||||
|
let stream = isVideo ? directFile.video : directFile.audio
|
||||||
|
guard let stream else {
|
||||||
|
self.videoSource = nil
|
||||||
|
self.audioSource = nil
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
focusedPart = MediaStreamFocusedPart(
|
||||||
|
seekStreamIndex: stream.index,
|
||||||
|
startPts: stream.startPts,
|
||||||
|
endPts: stream.endPts
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
if self.isVideo {
|
if self.isVideo {
|
||||||
var passthroughDecoder = true
|
var passthroughDecoder = true
|
||||||
if (codecName == "av1" || codecName == "av01") && !internal_isHardwareAv1Supported {
|
if (codecName == "av1" || codecName == "av01") && !internal_isHardwareAv1Supported {
|
||||||
passthroughDecoder = false
|
passthroughDecoder = false
|
||||||
}
|
}
|
||||||
let videoSource = SoftwareVideoReader(path: filePath, hintVP9: false, passthroughDecoder: passthroughDecoder)
|
if codecName == "vp9" || codecName == "vp8" {
|
||||||
|
passthroughDecoder = false
|
||||||
|
}
|
||||||
|
|
||||||
|
let videoSource = SoftwareVideoReader(path: filePath, hintVP9: false, passthroughDecoder: passthroughDecoder, focusedPart: focusedPart)
|
||||||
if videoSource.hasStream {
|
if videoSource.hasStream {
|
||||||
self.videoSource = videoSource
|
self.videoSource = videoSource
|
||||||
} else {
|
} else {
|
||||||
@ -47,7 +75,7 @@ public final class FFMpegMediaDataReader: MediaDataReader {
|
|||||||
}
|
}
|
||||||
self.audioSource = nil
|
self.audioSource = nil
|
||||||
} else {
|
} else {
|
||||||
let audioSource = SoftwareAudioSource(path: filePath)
|
let audioSource = SoftwareAudioSource(path: filePath, focusedPart: focusedPart)
|
||||||
if audioSource.hasStream {
|
if audioSource.hasStream {
|
||||||
self.audioSource = audioSource
|
self.audioSource = audioSource
|
||||||
} else {
|
} else {
|
||||||
|
@ -444,11 +444,4 @@ public final class MediaPlayerNode: ASDisplayNode {
|
|||||||
}
|
}
|
||||||
self.updateVideoInHierarchy?(self.videoInHierarchy || self.canPlaybackWithoutHierarchy)
|
self.updateVideoInHierarchy?(self.videoInHierarchy || self.canPlaybackWithoutHierarchy)
|
||||||
}
|
}
|
||||||
|
|
||||||
func notifyHasSentFramesToDisplay() {
|
|
||||||
if !self.didNotifyVideoLayerReadyForDisplay {
|
|
||||||
self.didNotifyVideoLayerReadyForDisplay = true
|
|
||||||
self.hasSentFramesToDisplay?()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
@ -9,7 +9,7 @@ import CoreMedia
|
|||||||
import SwiftSignalKit
|
import SwiftSignalKit
|
||||||
import FFMpegBinding
|
import FFMpegBinding
|
||||||
|
|
||||||
private func readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: UnsafeMutablePointer<UInt8>?, bufferSize: Int32) -> Int32 {
|
private func SoftwareVideoSource_readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: UnsafeMutablePointer<UInt8>?, bufferSize: Int32) -> Int32 {
|
||||||
let context = Unmanaged<SoftwareVideoSource>.fromOpaque(userData!).takeUnretainedValue()
|
let context = Unmanaged<SoftwareVideoSource>.fromOpaque(userData!).takeUnretainedValue()
|
||||||
if let fd = context.fd {
|
if let fd = context.fd {
|
||||||
let result = read(fd, buffer, Int(bufferSize))
|
let result = read(fd, buffer, Int(bufferSize))
|
||||||
@ -21,7 +21,7 @@ private func readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: Unsa
|
|||||||
return FFMPEG_CONSTANT_AVERROR_EOF
|
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||||
}
|
}
|
||||||
|
|
||||||
private func seekCallback(userData: UnsafeMutableRawPointer?, offset: Int64, whence: Int32) -> Int64 {
|
private func SoftwareVideoSource_seekCallback(userData: UnsafeMutableRawPointer?, offset: Int64, whence: Int32) -> Int64 {
|
||||||
let context = Unmanaged<SoftwareVideoSource>.fromOpaque(userData!).takeUnretainedValue()
|
let context = Unmanaged<SoftwareVideoSource>.fromOpaque(userData!).takeUnretainedValue()
|
||||||
if let fd = context.fd {
|
if let fd = context.fd {
|
||||||
if (whence & FFMPEG_AVSEEK_SIZE) != 0 {
|
if (whence & FFMPEG_AVSEEK_SIZE) != 0 {
|
||||||
@ -102,7 +102,7 @@ public final class SoftwareVideoSource {
|
|||||||
}
|
}
|
||||||
let ioBufferSize = 64 * 1024
|
let ioBufferSize = 64 * 1024
|
||||||
|
|
||||||
let avIoContext = FFMpegAVIOContext(bufferSize: Int32(ioBufferSize), opaqueContext: Unmanaged.passUnretained(self).toOpaque(), readPacket: readPacketCallback, writePacket: nil, seek: seekCallback, isSeekable: true)
|
let avIoContext = FFMpegAVIOContext(bufferSize: Int32(ioBufferSize), opaqueContext: Unmanaged.passUnretained(self).toOpaque(), readPacket: SoftwareVideoSource_readPacketCallback, writePacket: nil, seek: SoftwareVideoSource_seekCallback, isSeekable: true)
|
||||||
self.avIoContext = avIoContext
|
self.avIoContext = avIoContext
|
||||||
|
|
||||||
avFormatContext.setIO(self.avIoContext!)
|
avFormatContext.setIO(self.avIoContext!)
|
||||||
@ -356,7 +356,33 @@ private final class SoftwareAudioStream {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private func SoftwareAudioSource_readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: UnsafeMutablePointer<UInt8>?, bufferSize: Int32) -> Int32 {
|
||||||
|
let context = Unmanaged<SoftwareAudioSource>.fromOpaque(userData!).takeUnretainedValue()
|
||||||
|
if let fd = context.fd {
|
||||||
|
let result = read(fd, buffer, Int(bufferSize))
|
||||||
|
if result == 0 {
|
||||||
|
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||||
|
}
|
||||||
|
return Int32(result)
|
||||||
|
}
|
||||||
|
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
private func SoftwareAudioSource_seekCallback(userData: UnsafeMutableRawPointer?, offset: Int64, whence: Int32) -> Int64 {
|
||||||
|
let context = Unmanaged<SoftwareAudioSource>.fromOpaque(userData!).takeUnretainedValue()
|
||||||
|
if let fd = context.fd {
|
||||||
|
if (whence & FFMPEG_AVSEEK_SIZE) != 0 {
|
||||||
|
return Int64(context.size)
|
||||||
|
} else {
|
||||||
|
lseek(fd, off_t(offset), SEEK_SET)
|
||||||
|
return offset
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
public final class SoftwareAudioSource {
|
public final class SoftwareAudioSource {
|
||||||
|
private let focusedPart: MediaStreamFocusedPart?
|
||||||
private var readingError = false
|
private var readingError = false
|
||||||
private var audioStream: SoftwareAudioStream?
|
private var audioStream: SoftwareAudioStream?
|
||||||
private var avIoContext: FFMpegAVIOContext?
|
private var avIoContext: FFMpegAVIOContext?
|
||||||
@ -371,9 +397,11 @@ public final class SoftwareAudioSource {
|
|||||||
return self.audioStream != nil
|
return self.audioStream != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
public init(path: String) {
|
public init(path: String, focusedPart: MediaStreamFocusedPart?) {
|
||||||
let _ = FFMpegMediaFrameSourceContextHelpers.registerFFMpegGlobals
|
let _ = FFMpegMediaFrameSourceContextHelpers.registerFFMpegGlobals
|
||||||
|
|
||||||
|
self.focusedPart = focusedPart
|
||||||
|
|
||||||
var s = stat()
|
var s = stat()
|
||||||
stat(path, &s)
|
stat(path, &s)
|
||||||
self.size = Int32(s.st_size)
|
self.size = Int32(s.st_size)
|
||||||
@ -391,7 +419,7 @@ public final class SoftwareAudioSource {
|
|||||||
|
|
||||||
let ioBufferSize = 64 * 1024
|
let ioBufferSize = 64 * 1024
|
||||||
|
|
||||||
let avIoContext = FFMpegAVIOContext(bufferSize: Int32(ioBufferSize), opaqueContext: Unmanaged.passUnretained(self).toOpaque(), readPacket: readPacketCallback, writePacket: nil, seek: seekCallback, isSeekable: true)
|
let avIoContext = FFMpegAVIOContext(bufferSize: Int32(ioBufferSize), opaqueContext: Unmanaged.passUnretained(self).toOpaque(), readPacket: SoftwareAudioSource_readPacketCallback, writePacket: nil, seek: SoftwareAudioSource_seekCallback, isSeekable: true)
|
||||||
self.avIoContext = avIoContext
|
self.avIoContext = avIoContext
|
||||||
|
|
||||||
avFormatContext.setIO(self.avIoContext!)
|
avFormatContext.setIO(self.avIoContext!)
|
||||||
@ -438,10 +466,14 @@ public final class SoftwareAudioSource {
|
|||||||
|
|
||||||
self.audioStream = audioStream
|
self.audioStream = audioStream
|
||||||
|
|
||||||
|
if let focusedPart = self.focusedPart {
|
||||||
|
avFormatContext.seekFrame(forStreamIndex: Int32(focusedPart.seekStreamIndex), pts: focusedPart.startPts.value, positionOnKeyframe: true)
|
||||||
|
} else {
|
||||||
if let audioStream = self.audioStream {
|
if let audioStream = self.audioStream {
|
||||||
avFormatContext.seekFrame(forStreamIndex: Int32(audioStream.index), pts: 0, positionOnKeyframe: false)
|
avFormatContext.seekFrame(forStreamIndex: Int32(audioStream.index), pts: 0, positionOnKeyframe: false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
deinit {
|
deinit {
|
||||||
if let fd = self.fd {
|
if let fd = self.fd {
|
||||||
@ -462,15 +494,18 @@ public final class SoftwareAudioSource {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func readDecodableFrame() -> (MediaTrackDecodableFrame?, Bool) {
|
func readDecodableFrame() -> MediaTrackDecodableFrame? {
|
||||||
var frames: [MediaTrackDecodableFrame] = []
|
var frames: [MediaTrackDecodableFrame] = []
|
||||||
var endOfStream = false
|
|
||||||
|
|
||||||
while !self.readingError && frames.isEmpty {
|
while !self.readingError && !self.hasReadToEnd && frames.isEmpty {
|
||||||
if let packet = self.readPacketInternal() {
|
if let packet = self.readPacketInternal() {
|
||||||
if let audioStream = audioStream, Int(packet.streamIndex) == audioStream.index {
|
if let audioStream = self.audioStream, Int(packet.streamIndex) == audioStream.index {
|
||||||
let packetPts = packet.pts
|
let packetPts = packet.pts
|
||||||
|
|
||||||
|
if let focusedPart = self.focusedPart, packetPts >= focusedPart.endPts.value {
|
||||||
|
self.hasReadToEnd = true
|
||||||
|
}
|
||||||
|
|
||||||
let pts = CMTimeMake(value: packetPts, timescale: audioStream.timebase.timescale)
|
let pts = CMTimeMake(value: packetPts, timescale: audioStream.timebase.timescale)
|
||||||
let dts = CMTimeMake(value: packet.dts, timescale: audioStream.timebase.timescale)
|
let dts = CMTimeMake(value: packet.dts, timescale: audioStream.timebase.timescale)
|
||||||
|
|
||||||
@ -487,21 +522,11 @@ public final class SoftwareAudioSource {
|
|||||||
frames.append(frame)
|
frames.append(frame)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if endOfStream {
|
|
||||||
break
|
break
|
||||||
} else {
|
|
||||||
if let _ = self.avFormatContext, let _ = self.audioStream {
|
|
||||||
endOfStream = true
|
|
||||||
break
|
|
||||||
} else {
|
|
||||||
endOfStream = true
|
|
||||||
break
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return (frames.first, endOfStream)
|
return frames.first
|
||||||
}
|
}
|
||||||
|
|
||||||
public func readFrame() -> Data? {
|
public func readFrame() -> Data? {
|
||||||
@ -509,8 +534,7 @@ public final class SoftwareAudioSource {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
let (decodableFrame, _) = self.readDecodableFrame()
|
if let decodableFrame = self.readDecodableFrame() {
|
||||||
if let decodableFrame = decodableFrame {
|
|
||||||
return audioStream.decoder.decodeRaw(frame: decodableFrame)
|
return audioStream.decoder.decodeRaw(frame: decodableFrame)
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
@ -523,8 +547,7 @@ public final class SoftwareAudioSource {
|
|||||||
}
|
}
|
||||||
|
|
||||||
while true {
|
while true {
|
||||||
let (decodableFrame, _) = self.readDecodableFrame()
|
if let decodableFrame = self.readDecodableFrame() {
|
||||||
if let decodableFrame = decodableFrame {
|
|
||||||
if audioStream.decoder.send(frame: decodableFrame) {
|
if audioStream.decoder.send(frame: decodableFrame) {
|
||||||
if let result = audioStream.decoder.decode() {
|
if let result = audioStream.decoder.decode() {
|
||||||
return result.sampleBuffer
|
return result.sampleBuffer
|
||||||
@ -541,8 +564,7 @@ public final class SoftwareAudioSource {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
let (decodableFrame, _) = self.readDecodableFrame()
|
if let decodableFrame = self.readDecodableFrame() {
|
||||||
if let decodableFrame = decodableFrame {
|
|
||||||
return (decodableFrame.copyPacketData(), Int(decodableFrame.packet.duration))
|
return (decodableFrame.copyPacketData(), Int(decodableFrame.packet.duration))
|
||||||
} else {
|
} else {
|
||||||
return nil
|
return nil
|
||||||
@ -557,7 +579,45 @@ public final class SoftwareAudioSource {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public struct MediaStreamFocusedPart {
|
||||||
|
public let seekStreamIndex: Int
|
||||||
|
public let startPts: CMTime
|
||||||
|
public let endPts: CMTime
|
||||||
|
|
||||||
|
public init(seekStreamIndex: Int, startPts: CMTime, endPts: CMTime) {
|
||||||
|
self.seekStreamIndex = seekStreamIndex
|
||||||
|
self.startPts = startPts
|
||||||
|
self.endPts = endPts
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private func SoftwareVideoReader_readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: UnsafeMutablePointer<UInt8>?, bufferSize: Int32) -> Int32 {
|
||||||
|
let context = Unmanaged<SoftwareVideoReader>.fromOpaque(userData!).takeUnretainedValue()
|
||||||
|
if let fd = context.fd {
|
||||||
|
let result = read(fd, buffer, Int(bufferSize))
|
||||||
|
if result == 0 {
|
||||||
|
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||||
|
}
|
||||||
|
return Int32(result)
|
||||||
|
}
|
||||||
|
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
private func SoftwareVideoReader_seekCallback(userData: UnsafeMutableRawPointer?, offset: Int64, whence: Int32) -> Int64 {
|
||||||
|
let context = Unmanaged<SoftwareVideoReader>.fromOpaque(userData!).takeUnretainedValue()
|
||||||
|
if let fd = context.fd {
|
||||||
|
if (whence & FFMPEG_AVSEEK_SIZE) != 0 {
|
||||||
|
return Int64(context.size)
|
||||||
|
} else {
|
||||||
|
lseek(fd, off_t(offset), SEEK_SET)
|
||||||
|
return offset
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
final class SoftwareVideoReader {
|
final class SoftwareVideoReader {
|
||||||
|
private let focusedPart: MediaStreamFocusedPart?
|
||||||
private var readingError = false
|
private var readingError = false
|
||||||
private var videoStream: SoftwareVideoStream?
|
private var videoStream: SoftwareVideoStream?
|
||||||
private var avIoContext: FFMpegAVIOContext?
|
private var avIoContext: FFMpegAVIOContext?
|
||||||
@ -576,9 +636,11 @@ final class SoftwareVideoReader {
|
|||||||
return self.videoStream != nil
|
return self.videoStream != nil
|
||||||
}
|
}
|
||||||
|
|
||||||
public init(path: String, hintVP9: Bool, passthroughDecoder: Bool = false) {
|
public init(path: String, hintVP9: Bool, passthroughDecoder: Bool = false, focusedPart: MediaStreamFocusedPart?) {
|
||||||
let _ = FFMpegMediaFrameSourceContextHelpers.registerFFMpegGlobals
|
let _ = FFMpegMediaFrameSourceContextHelpers.registerFFMpegGlobals
|
||||||
|
|
||||||
|
self.focusedPart = focusedPart
|
||||||
|
|
||||||
var s = stat()
|
var s = stat()
|
||||||
stat(path, &s)
|
stat(path, &s)
|
||||||
self.size = Int32(s.st_size)
|
self.size = Int32(s.st_size)
|
||||||
@ -598,7 +660,7 @@ final class SoftwareVideoReader {
|
|||||||
}
|
}
|
||||||
let ioBufferSize = 64 * 1024
|
let ioBufferSize = 64 * 1024
|
||||||
|
|
||||||
let avIoContext = FFMpegAVIOContext(bufferSize: Int32(ioBufferSize), opaqueContext: Unmanaged.passUnretained(self).toOpaque(), readPacket: readPacketCallback, writePacket: nil, seek: seekCallback, isSeekable: true)
|
let avIoContext = FFMpegAVIOContext(bufferSize: Int32(ioBufferSize), opaqueContext: Unmanaged.passUnretained(self).toOpaque(), readPacket: SoftwareVideoReader_readPacketCallback, writePacket: nil, seek: SoftwareVideoReader_seekCallback, isSeekable: true)
|
||||||
self.avIoContext = avIoContext
|
self.avIoContext = avIoContext
|
||||||
|
|
||||||
avFormatContext.setIO(self.avIoContext!)
|
avFormatContext.setIO(self.avIoContext!)
|
||||||
@ -675,10 +737,14 @@ final class SoftwareVideoReader {
|
|||||||
|
|
||||||
self.videoStream = videoStream
|
self.videoStream = videoStream
|
||||||
|
|
||||||
|
if let focusedPart = self.focusedPart {
|
||||||
|
avFormatContext.seekFrame(forStreamIndex: Int32(focusedPart.seekStreamIndex), pts: focusedPart.startPts.value, positionOnKeyframe: true)
|
||||||
|
} else {
|
||||||
if let videoStream = self.videoStream {
|
if let videoStream = self.videoStream {
|
||||||
avFormatContext.seekFrame(forStreamIndex: Int32(videoStream.index), pts: 0, positionOnKeyframe: true)
|
avFormatContext.seekFrame(forStreamIndex: Int32(videoStream.index), pts: 0, positionOnKeyframe: true)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
deinit {
|
deinit {
|
||||||
if let fd = self.fd {
|
if let fd = self.fd {
|
||||||
@ -709,6 +775,10 @@ final class SoftwareVideoReader {
|
|||||||
if let videoStream = self.videoStream, Int(packet.streamIndex) == videoStream.index {
|
if let videoStream = self.videoStream, Int(packet.streamIndex) == videoStream.index {
|
||||||
let packetPts = packet.pts
|
let packetPts = packet.pts
|
||||||
|
|
||||||
|
if let focusedPart = self.focusedPart, packetPts >= focusedPart.endPts.value {
|
||||||
|
self.hasReadToEnd = true
|
||||||
|
}
|
||||||
|
|
||||||
let pts = CMTimeMake(value: packetPts, timescale: videoStream.timebase.timescale)
|
let pts = CMTimeMake(value: packetPts, timescale: videoStream.timebase.timescale)
|
||||||
let dts = CMTimeMake(value: packet.dts, timescale: videoStream.timebase.timescale)
|
let dts = CMTimeMake(value: packet.dts, timescale: videoStream.timebase.timescale)
|
||||||
|
|
||||||
@ -784,8 +854,11 @@ final class SoftwareVideoReader {
|
|||||||
|
|
||||||
public final class FFMpegMediaInfo {
|
public final class FFMpegMediaInfo {
|
||||||
public struct Info {
|
public struct Info {
|
||||||
|
public let index: Int
|
||||||
|
public let timescale: CMTimeScale
|
||||||
public let startTime: CMTime
|
public let startTime: CMTime
|
||||||
public let duration: CMTime
|
public let duration: CMTime
|
||||||
|
public let fps: CMTime
|
||||||
public let codecName: String?
|
public let codecName: String?
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -863,7 +936,7 @@ public func extractFFMpegMediaInfo(path: String) -> FFMpegMediaInfo? {
|
|||||||
|
|
||||||
var streamInfos: [(isVideo: Bool, info: FFMpegMediaInfo.Info)] = []
|
var streamInfos: [(isVideo: Bool, info: FFMpegMediaInfo.Info)] = []
|
||||||
|
|
||||||
for typeIndex in 0 ..< 1 {
|
for typeIndex in 0 ..< 2 {
|
||||||
let isVideo = typeIndex == 0
|
let isVideo = typeIndex == 0
|
||||||
|
|
||||||
for streamIndexNumber in avFormatContext.streamIndices(for: isVideo ? FFMpegAVFormatStreamTypeVideo : FFMpegAVFormatStreamTypeAudio) {
|
for streamIndexNumber in avFormatContext.streamIndices(for: isVideo ? FFMpegAVFormatStreamTypeVideo : FFMpegAVFormatStreamTypeAudio) {
|
||||||
@ -873,7 +946,7 @@ public func extractFFMpegMediaInfo(path: String) -> FFMpegMediaInfo? {
|
|||||||
}
|
}
|
||||||
|
|
||||||
let fpsAndTimebase = avFormatContext.fpsAndTimebase(forStreamIndex: streamIndex, defaultTimeBase: CMTimeMake(value: 1, timescale: 40000))
|
let fpsAndTimebase = avFormatContext.fpsAndTimebase(forStreamIndex: streamIndex, defaultTimeBase: CMTimeMake(value: 1, timescale: 40000))
|
||||||
let (_, timebase) = (fpsAndTimebase.fps, fpsAndTimebase.timebase)
|
let (fps, timebase) = (fpsAndTimebase.fps, fpsAndTimebase.timebase)
|
||||||
|
|
||||||
let startTime: CMTime
|
let startTime: CMTime
|
||||||
let rawStartTime = avFormatContext.startTime(atStreamIndex: streamIndex)
|
let rawStartTime = avFormatContext.startTime(atStreamIndex: streamIndex)
|
||||||
@ -895,9 +968,20 @@ public func extractFFMpegMediaInfo(path: String) -> FFMpegMediaInfo? {
|
|||||||
codecName = "hevc"
|
codecName = "hevc"
|
||||||
} else if codecId == FFMpegCodecIdAV1 {
|
} else if codecId == FFMpegCodecIdAV1 {
|
||||||
codecName = "av1"
|
codecName = "av1"
|
||||||
|
} else if codecId == FFMpegCodecIdVP9 {
|
||||||
|
codecName = "vp9"
|
||||||
|
} else if codecId == FFMpegCodecIdVP8 {
|
||||||
|
codecName = "vp8"
|
||||||
}
|
}
|
||||||
|
|
||||||
streamInfos.append((isVideo: isVideo, info: FFMpegMediaInfo.Info(startTime: startTime, duration: duration, codecName: codecName)))
|
streamInfos.append((isVideo: isVideo, info: FFMpegMediaInfo.Info(
|
||||||
|
index: Int(streamIndex),
|
||||||
|
timescale: timebase.timescale,
|
||||||
|
startTime: startTime,
|
||||||
|
duration: duration,
|
||||||
|
fps: fps,
|
||||||
|
codecName: codecName
|
||||||
|
)))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -70,6 +70,9 @@ private func readPacketCallback(userData: UnsafeMutableRawPointer?, buffer: Unsa
|
|||||||
}
|
}
|
||||||
let fetchedCount = Int32(fetchedData.count)
|
let fetchedCount = Int32(fetchedData.count)
|
||||||
context.readingOffset += Int64(fetchedCount)
|
context.readingOffset += Int64(fetchedCount)
|
||||||
|
if fetchedCount == 0 {
|
||||||
|
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||||
|
}
|
||||||
return fetchedCount
|
return fetchedCount
|
||||||
} else {
|
} else {
|
||||||
return FFMPEG_CONSTANT_AVERROR_EOF
|
return FFMPEG_CONSTANT_AVERROR_EOF
|
||||||
|
@ -40,7 +40,8 @@ final class ReactionContextBackgroundNode: ASDisplayNode {
|
|||||||
private let smallCircleSize: CGFloat
|
private let smallCircleSize: CGFloat
|
||||||
|
|
||||||
private let backgroundView: BlurredBackgroundView
|
private let backgroundView: BlurredBackgroundView
|
||||||
private(set) var vibrancyEffectView: UIVisualEffectView?
|
private let backgroundTintView: UIView
|
||||||
|
let backgroundTintMaskContainer: UIView
|
||||||
let vibrantExpandedContentContainer: UIView
|
let vibrantExpandedContentContainer: UIView
|
||||||
|
|
||||||
private let maskLayer: SimpleLayer
|
private let maskLayer: SimpleLayer
|
||||||
@ -58,7 +59,10 @@ final class ReactionContextBackgroundNode: ASDisplayNode {
|
|||||||
self.largeCircleSize = largeCircleSize
|
self.largeCircleSize = largeCircleSize
|
||||||
self.smallCircleSize = smallCircleSize
|
self.smallCircleSize = smallCircleSize
|
||||||
|
|
||||||
self.backgroundView = BlurredBackgroundView(color: .clear, enableBlur: true)
|
self.backgroundView = BlurredBackgroundView(color: nil, enableBlur: true)
|
||||||
|
|
||||||
|
self.backgroundTintView = UIView()
|
||||||
|
self.backgroundTintMaskContainer = UIView()
|
||||||
|
|
||||||
self.maskLayer = SimpleLayer()
|
self.maskLayer = SimpleLayer()
|
||||||
self.backgroundClippingLayer = SimpleLayer()
|
self.backgroundClippingLayer = SimpleLayer()
|
||||||
@ -86,6 +90,7 @@ final class ReactionContextBackgroundNode: ASDisplayNode {
|
|||||||
}
|
}
|
||||||
|
|
||||||
self.vibrantExpandedContentContainer = UIView()
|
self.vibrantExpandedContentContainer = UIView()
|
||||||
|
self.backgroundTintMaskContainer.addSubview(self.vibrantExpandedContentContainer)
|
||||||
|
|
||||||
super.init()
|
super.init()
|
||||||
|
|
||||||
@ -97,6 +102,10 @@ final class ReactionContextBackgroundNode: ASDisplayNode {
|
|||||||
self.largeCircleShadowLayer.opacity = 0.0
|
self.largeCircleShadowLayer.opacity = 0.0
|
||||||
self.smallCircleShadowLayer.opacity = 0.0
|
self.smallCircleShadowLayer.opacity = 0.0
|
||||||
|
|
||||||
|
self.backgroundView.addSubview(self.backgroundTintView)
|
||||||
|
|
||||||
|
self.backgroundTintMaskContainer.backgroundColor = .white
|
||||||
|
|
||||||
self.view.addSubview(self.backgroundView)
|
self.view.addSubview(self.backgroundView)
|
||||||
|
|
||||||
self.maskLayer.addSublayer(self.smallCircleLayer)
|
self.maskLayer.addSublayer(self.smallCircleLayer)
|
||||||
@ -132,30 +141,23 @@ final class ReactionContextBackgroundNode: ASDisplayNode {
|
|||||||
if self.theme !== theme {
|
if self.theme !== theme {
|
||||||
self.theme = theme
|
self.theme = theme
|
||||||
|
|
||||||
if theme.overallDarkAppearance && !forceDark {
|
if theme.overallDarkAppearance {
|
||||||
if let vibrancyEffectView = self.vibrancyEffectView {
|
if let invertFilter = CALayer.colorInvert(), let filter = CALayer.luminanceToAlpha() {
|
||||||
self.vibrancyEffectView = nil
|
self.backgroundTintMaskContainer.layer.filters = [invertFilter, filter]
|
||||||
vibrancyEffectView.removeFromSuperview()
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
if self.vibrancyEffectView == nil {
|
|
||||||
let style: UIBlurEffect.Style
|
|
||||||
if forceDark {
|
|
||||||
style = .dark
|
|
||||||
} else {
|
|
||||||
style = .extraLight
|
|
||||||
}
|
|
||||||
let blurEffect = UIBlurEffect(style: style)
|
|
||||||
let vibrancyEffect = UIVibrancyEffect(blurEffect: blurEffect)
|
|
||||||
let vibrancyEffectView = UIVisualEffectView(effect: vibrancyEffect)
|
|
||||||
self.vibrancyEffectView = vibrancyEffectView
|
|
||||||
vibrancyEffectView.contentView.addSubview(self.vibrantExpandedContentContainer)
|
|
||||||
self.backgroundView.addSubview(vibrancyEffectView)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
self.backgroundTintView.mask = self.backgroundTintMaskContainer
|
||||||
|
|
||||||
self.backgroundView.updateColor(color: theme.contextMenu.backgroundColor, transition: .immediate)
|
self.backgroundView.updateColor(color: theme.contextMenu.backgroundColor, forceKeepBlur: true, transition: .immediate)
|
||||||
//self.backgroundView.updateColor(color: UIColor(white: 1.0, alpha: 0.0), forceKeepBlur: true, transition: .immediate)
|
self.backgroundTintView.backgroundColor = UIColor(white: 1.0, alpha: 0.5)
|
||||||
|
} else {
|
||||||
|
if let filter = CALayer.luminanceToAlpha() {
|
||||||
|
self.backgroundTintMaskContainer.layer.filters = [filter]
|
||||||
|
}
|
||||||
|
self.backgroundTintView.mask = self.backgroundTintMaskContainer
|
||||||
|
|
||||||
|
self.backgroundView.updateColor(color: .clear, forceKeepBlur: true, transition: .immediate)
|
||||||
|
self.backgroundTintView.backgroundColor = theme.contextMenu.backgroundColor
|
||||||
|
}
|
||||||
|
|
||||||
let shadowColor = UIColor(white: 0.0, alpha: 0.4)
|
let shadowColor = UIColor(white: 0.0, alpha: 0.4)
|
||||||
|
|
||||||
@ -213,9 +215,8 @@ final class ReactionContextBackgroundNode: ASDisplayNode {
|
|||||||
transition.updateFrame(view: self.backgroundView, frame: contentBounds, beginWithCurrentState: true)
|
transition.updateFrame(view: self.backgroundView, frame: contentBounds, beginWithCurrentState: true)
|
||||||
self.backgroundView.update(size: contentBounds.size, transition: transition)
|
self.backgroundView.update(size: contentBounds.size, transition: transition)
|
||||||
|
|
||||||
if let vibrancyEffectView = self.vibrancyEffectView {
|
transition.updateFrame(view: self.backgroundTintView, frame: CGRect(origin: CGPoint(x: -contentBounds.minX, y: -contentBounds.minY), size: contentBounds.size))
|
||||||
transition.updateFrame(view: vibrancyEffectView, frame: CGRect(origin: CGPoint(x: 10.0, y: 10.0), size: contentBounds.size), beginWithCurrentState: true)
|
transition.updateFrame(view: self.backgroundTintMaskContainer, frame: CGRect(origin: CGPoint(), size: contentBounds.size))
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func animateIn() {
|
func animateIn() {
|
||||||
|
@ -130,7 +130,7 @@ private final class ExpandItemView: UIView {
|
|||||||
|
|
||||||
override init(frame: CGRect) {
|
override init(frame: CGRect) {
|
||||||
self.tintView = UIView()
|
self.tintView = UIView()
|
||||||
self.tintView.backgroundColor = .white
|
self.tintView.backgroundColor = .black
|
||||||
|
|
||||||
self.arrowView = UIImageView()
|
self.arrowView = UIImageView()
|
||||||
self.arrowView.image = generateTintedImage(image: UIImage(bundleImageName: "Chat/Context Menu/ReactionExpandArrow"), color: .white)
|
self.arrowView.image = generateTintedImage(image: UIImage(bundleImageName: "Chat/Context Menu/ReactionExpandArrow"), color: .white)
|
||||||
@ -187,9 +187,9 @@ private final class TitleLabelView: UIView {
|
|||||||
return nil
|
return nil
|
||||||
})
|
})
|
||||||
|
|
||||||
let tintBody = MarkdownAttributeSet(font: Font.regular(13.0), textColor: .white)
|
let tintBody = MarkdownAttributeSet(font: Font.regular(13.0), textColor: .black)
|
||||||
let tintBold = MarkdownAttributeSet(font: Font.semibold(13.0), textColor: .white)
|
let tintBold = MarkdownAttributeSet(font: Font.semibold(13.0), textColor: .black)
|
||||||
let tintLink = MarkdownAttributeSet(font: Font.regular(13.0), textColor: .white, additionalAttributes: [TelegramTextAttributes.URL: true as NSNumber])
|
let tintLink = MarkdownAttributeSet(font: Font.regular(13.0), textColor: .black, additionalAttributes: [TelegramTextAttributes.URL: true as NSNumber])
|
||||||
let tintAttributes = MarkdownAttributes(body: tintBody, bold: tintBold, link: tintLink, linkAttribute: { _ in
|
let tintAttributes = MarkdownAttributes(body: tintBody, bold: tintBold, link: tintLink, linkAttribute: { _ in
|
||||||
return (TelegramTextAttributes.URL, "")
|
return (TelegramTextAttributes.URL, "")
|
||||||
})
|
})
|
||||||
@ -1593,10 +1593,8 @@ public final class ReactionContextNode: ASDisplayNode, ASScrollViewDelegate {
|
|||||||
transition: transition
|
transition: transition
|
||||||
)
|
)
|
||||||
|
|
||||||
if let vibrancyEffectView = self.backgroundNode.vibrancyEffectView {
|
if self.contentTintContainer.view.superview !== self.backgroundNode.backgroundTintMaskContainer {
|
||||||
if self.contentTintContainer.view.superview !== vibrancyEffectView.contentView {
|
self.backgroundNode.backgroundTintMaskContainer.addSubview(self.contentTintContainer.view)
|
||||||
vibrancyEffectView.contentView.addSubview(self.contentTintContainer.view)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if let animateInFromAnchorRect = animateInFromAnchorRect, !self.reduceMotion {
|
if let animateInFromAnchorRect = animateInFromAnchorRect, !self.reduceMotion {
|
||||||
@ -2431,7 +2429,7 @@ public final class ReactionContextNode: ASDisplayNode, ASScrollViewDelegate {
|
|||||||
chatPeerId: nil,
|
chatPeerId: nil,
|
||||||
peekBehavior: nil,
|
peekBehavior: nil,
|
||||||
customLayout: emojiContentLayout,
|
customLayout: emojiContentLayout,
|
||||||
externalBackground: self.backgroundNode.vibrancyEffectView == nil ? nil : EmojiPagerContentComponent.ExternalBackground(
|
externalBackground: self.backgroundNode.backgroundTintMaskContainer.isHidden ? nil : EmojiPagerContentComponent.ExternalBackground(
|
||||||
effectContainerView: self.backgroundNode.vibrantExpandedContentContainer
|
effectContainerView: self.backgroundNode.vibrantExpandedContentContainer
|
||||||
),
|
),
|
||||||
externalExpansionView: self.view,
|
externalExpansionView: self.view,
|
||||||
|
@ -417,7 +417,7 @@ public final class EmojiKeyboardItemLayer: MultiAnimationRenderTarget {
|
|||||||
let color = theme.chat.inputMediaPanel.panelContentVibrantOverlayColor
|
let color = theme.chat.inputMediaPanel.panelContentVibrantOverlayColor
|
||||||
|
|
||||||
iconLayer.contents = generateIcon(color: color)?.cgImage
|
iconLayer.contents = generateIcon(color: color)?.cgImage
|
||||||
tintIconLayer.contents = generateIcon(color: .white)?.cgImage
|
tintIconLayer.contents = generateIcon(color: .black)?.cgImage
|
||||||
|
|
||||||
tintIconLayer.isHidden = !needsVibrancy
|
tintIconLayer.isHidden = !needsVibrancy
|
||||||
}
|
}
|
||||||
|
@ -1342,9 +1342,10 @@ public final class EmojiPagerContentComponent: Component {
|
|||||||
private var isSearchActivated: Bool = false
|
private var isSearchActivated: Bool = false
|
||||||
|
|
||||||
private let backgroundView: BlurredBackgroundView
|
private let backgroundView: BlurredBackgroundView
|
||||||
|
private let backgroundTintView: UIView
|
||||||
private var fadingMaskLayer: FadingMaskLayer?
|
private var fadingMaskLayer: FadingMaskLayer?
|
||||||
private var vibrancyClippingView: UIView
|
private var vibrancyClippingView: UIView
|
||||||
private var vibrancyEffectView: UIVisualEffectView?
|
private var vibrancyEffectView: UIView?
|
||||||
public private(set) var mirrorContentClippingView: UIView?
|
public private(set) var mirrorContentClippingView: UIView?
|
||||||
private let mirrorContentScrollView: UIView
|
private let mirrorContentScrollView: UIView
|
||||||
private var warpView: WarpView?
|
private var warpView: WarpView?
|
||||||
@ -1398,6 +1399,7 @@ public final class EmojiPagerContentComponent: Component {
|
|||||||
|
|
||||||
override init(frame: CGRect) {
|
override init(frame: CGRect) {
|
||||||
self.backgroundView = BlurredBackgroundView(color: nil)
|
self.backgroundView = BlurredBackgroundView(color: nil)
|
||||||
|
self.backgroundTintView = UIView()
|
||||||
|
|
||||||
if ProcessInfo.processInfo.processorCount > 4 {
|
if ProcessInfo.processInfo.processorCount > 4 {
|
||||||
self.shimmerHostView = PortalSourceView()
|
self.shimmerHostView = PortalSourceView()
|
||||||
@ -1423,6 +1425,7 @@ public final class EmojiPagerContentComponent: Component {
|
|||||||
|
|
||||||
super.init(frame: frame)
|
super.init(frame: frame)
|
||||||
|
|
||||||
|
self.backgroundView.addSubview(self.backgroundTintView)
|
||||||
self.addSubview(self.backgroundView)
|
self.addSubview(self.backgroundView)
|
||||||
|
|
||||||
if let shimmerHostView = self.shimmerHostView {
|
if let shimmerHostView = self.shimmerHostView {
|
||||||
@ -1618,7 +1621,7 @@ public final class EmojiPagerContentComponent: Component {
|
|||||||
if let mirrorContentClippingView = self.mirrorContentClippingView {
|
if let mirrorContentClippingView = self.mirrorContentClippingView {
|
||||||
mirrorContentClippingView.addSubview(self.mirrorContentScrollView)
|
mirrorContentClippingView.addSubview(self.mirrorContentScrollView)
|
||||||
} else if let vibrancyEffectView = self.vibrancyEffectView {
|
} else if let vibrancyEffectView = self.vibrancyEffectView {
|
||||||
vibrancyEffectView.contentView.addSubview(self.mirrorContentScrollView)
|
vibrancyEffectView.addSubview(self.mirrorContentScrollView)
|
||||||
}
|
}
|
||||||
|
|
||||||
mirrorContentWarpView.removeFromSuperview()
|
mirrorContentWarpView.removeFromSuperview()
|
||||||
@ -3172,7 +3175,7 @@ public final class EmojiPagerContentComponent: Component {
|
|||||||
}
|
}
|
||||||
|
|
||||||
groupBorderLayer.strokeColor = borderColor.cgColor
|
groupBorderLayer.strokeColor = borderColor.cgColor
|
||||||
groupBorderLayer.tintContainerLayer.strokeColor = UIColor.white.cgColor
|
groupBorderLayer.tintContainerLayer.strokeColor = UIColor.black.cgColor
|
||||||
groupBorderLayer.lineWidth = 1.6
|
groupBorderLayer.lineWidth = 1.6
|
||||||
groupBorderLayer.lineCap = .round
|
groupBorderLayer.lineCap = .round
|
||||||
groupBorderLayer.fillColor = nil
|
groupBorderLayer.fillColor = nil
|
||||||
@ -3584,7 +3587,7 @@ public final class EmojiPagerContentComponent: Component {
|
|||||||
itemSelectionLayer.tintContainerLayer.backgroundColor = UIColor.clear.cgColor
|
itemSelectionLayer.tintContainerLayer.backgroundColor = UIColor.clear.cgColor
|
||||||
} else {
|
} else {
|
||||||
itemSelectionLayer.backgroundColor = keyboardChildEnvironment.theme.chat.inputMediaPanel.panelContentControlVibrantSelectionColor.cgColor
|
itemSelectionLayer.backgroundColor = keyboardChildEnvironment.theme.chat.inputMediaPanel.panelContentControlVibrantSelectionColor.cgColor
|
||||||
itemSelectionLayer.tintContainerLayer.backgroundColor = UIColor(white: 1.0, alpha: 0.2).cgColor
|
itemSelectionLayer.tintContainerLayer.backgroundColor = UIColor(white: 0.0, alpha: 0.2).cgColor
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4009,15 +4012,15 @@ public final class EmojiPagerContentComponent: Component {
|
|||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
if self.vibrancyEffectView == nil {
|
if self.vibrancyEffectView == nil {
|
||||||
let style: UIBlurEffect.Style
|
let vibrancyEffectView = UIView()
|
||||||
style = .extraLight
|
vibrancyEffectView.backgroundColor = .white
|
||||||
let blurEffect = UIBlurEffect(style: style)
|
if let filter = CALayer.luminanceToAlpha() {
|
||||||
let vibrancyEffect = UIVibrancyEffect(blurEffect: blurEffect)
|
vibrancyEffectView.layer.filters = [filter]
|
||||||
let vibrancyEffectView = UIVisualEffectView(effect: vibrancyEffect)
|
}
|
||||||
self.vibrancyEffectView = vibrancyEffectView
|
self.vibrancyEffectView = vibrancyEffectView
|
||||||
self.backgroundView.addSubview(vibrancyEffectView)
|
self.backgroundTintView.mask = vibrancyEffectView
|
||||||
self.vibrancyClippingView.addSubview(self.mirrorContentScrollView)
|
self.vibrancyClippingView.addSubview(self.mirrorContentScrollView)
|
||||||
vibrancyEffectView.contentView.addSubview(self.vibrancyClippingView)
|
vibrancyEffectView.addSubview(self.vibrancyClippingView)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4046,7 +4049,11 @@ public final class EmojiPagerContentComponent: Component {
|
|||||||
if hideBackground {
|
if hideBackground {
|
||||||
backgroundColor = backgroundColor.withAlphaComponent(0.01)
|
backgroundColor = backgroundColor.withAlphaComponent(0.01)
|
||||||
}
|
}
|
||||||
self.backgroundView.updateColor(color: backgroundColor, enableBlur: true, forceKeepBlur: false, transition: transition.containedViewLayoutTransition)
|
|
||||||
|
self.backgroundTintView.backgroundColor = backgroundColor
|
||||||
|
transition.setFrame(view: self.backgroundTintView, frame: CGRect(origin: CGPoint(), size: backgroundFrame.size))
|
||||||
|
|
||||||
|
self.backgroundView.updateColor(color: .clear, enableBlur: true, forceKeepBlur: true, transition: transition.containedViewLayoutTransition)
|
||||||
transition.setFrame(view: self.backgroundView, frame: backgroundFrame)
|
transition.setFrame(view: self.backgroundView, frame: backgroundFrame)
|
||||||
self.backgroundView.update(size: backgroundFrame.size, transition: transition.containedViewLayoutTransition)
|
self.backgroundView.update(size: backgroundFrame.size, transition: transition.containedViewLayoutTransition)
|
||||||
|
|
||||||
@ -4652,7 +4659,7 @@ public final class EmojiPagerContentComponent: Component {
|
|||||||
if let mirrorContentClippingView = self.mirrorContentClippingView {
|
if let mirrorContentClippingView = self.mirrorContentClippingView {
|
||||||
mirrorContentClippingView.addSubview(visibleEmptySearchResultsView.tintContainerView)
|
mirrorContentClippingView.addSubview(visibleEmptySearchResultsView.tintContainerView)
|
||||||
} else if let vibrancyEffectView = self.vibrancyEffectView {
|
} else if let vibrancyEffectView = self.vibrancyEffectView {
|
||||||
vibrancyEffectView.contentView.addSubview(visibleEmptySearchResultsView.tintContainerView)
|
vibrancyEffectView.addSubview(visibleEmptySearchResultsView.tintContainerView)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
let emptySearchResultsSize = CGSize(width: availableSize.width, height: availableSize.height - itemLayout.searchInsets.top - itemLayout.searchHeight)
|
let emptySearchResultsSize = CGSize(width: availableSize.width, height: availableSize.height - itemLayout.searchInsets.top - itemLayout.searchHeight)
|
||||||
|
@ -389,7 +389,7 @@ public final class EmojiSearchHeaderView: UIView, UITextFieldDelegate {
|
|||||||
self.clearIconView.image = generateTintedImage(image: UIImage(bundleImageName: "Components/Search Bar/Clear"), color: .white)?.withRenderingMode(.alwaysTemplate)
|
self.clearIconView.image = generateTintedImage(image: UIImage(bundleImageName: "Components/Search Bar/Clear"), color: .white)?.withRenderingMode(.alwaysTemplate)
|
||||||
self.clearIconView.tintColor = useOpaqueTheme ? theme.chat.inputMediaPanel.panelContentOpaqueSearchOverlayColor : theme.chat.inputMediaPanel.panelContentVibrantSearchOverlayColor
|
self.clearIconView.tintColor = useOpaqueTheme ? theme.chat.inputMediaPanel.panelContentOpaqueSearchOverlayColor : theme.chat.inputMediaPanel.panelContentVibrantSearchOverlayColor
|
||||||
|
|
||||||
self.clearIconTintView.image = generateTintedImage(image: UIImage(bundleImageName: "Components/Search Bar/Clear"), color: .white)
|
self.clearIconTintView.image = generateTintedImage(image: UIImage(bundleImageName: "Components/Search Bar/Clear"), color: .black)
|
||||||
}
|
}
|
||||||
|
|
||||||
self.params = params
|
self.params = params
|
||||||
@ -402,13 +402,13 @@ public final class EmojiSearchHeaderView: UIView, UITextFieldDelegate {
|
|||||||
|
|
||||||
if theme.overallDarkAppearance && forceNeedsVibrancy {
|
if theme.overallDarkAppearance && forceNeedsVibrancy {
|
||||||
self.backgroundLayer.backgroundColor = theme.chat.inputMediaPanel.panelContentControlVibrantSelectionColor.withMultipliedAlpha(0.3).cgColor
|
self.backgroundLayer.backgroundColor = theme.chat.inputMediaPanel.panelContentControlVibrantSelectionColor.withMultipliedAlpha(0.3).cgColor
|
||||||
self.tintBackgroundLayer.backgroundColor = UIColor(white: 1.0, alpha: 0.2).cgColor
|
self.tintBackgroundLayer.backgroundColor = UIColor(white: 0.0, alpha: 0.2).cgColor
|
||||||
} else if useOpaqueTheme {
|
} else if useOpaqueTheme {
|
||||||
self.backgroundLayer.backgroundColor = theme.chat.inputMediaPanel.panelContentControlOpaqueSelectionColor.cgColor
|
self.backgroundLayer.backgroundColor = theme.chat.inputMediaPanel.panelContentControlOpaqueSelectionColor.cgColor
|
||||||
self.tintBackgroundLayer.backgroundColor = UIColor.white.cgColor
|
self.tintBackgroundLayer.backgroundColor = UIColor.black.cgColor
|
||||||
} else {
|
} else {
|
||||||
self.backgroundLayer.backgroundColor = theme.chat.inputMediaPanel.panelContentControlVibrantSelectionColor.cgColor
|
self.backgroundLayer.backgroundColor = theme.chat.inputMediaPanel.panelContentControlVibrantSelectionColor.cgColor
|
||||||
self.tintBackgroundLayer.backgroundColor = UIColor(white: 1.0, alpha: 0.2).cgColor
|
self.tintBackgroundLayer.backgroundColor = UIColor(white: 0.0, alpha: 0.2).cgColor
|
||||||
}
|
}
|
||||||
|
|
||||||
self.backgroundLayer.cornerRadius = inputHeight * 0.5
|
self.backgroundLayer.cornerRadius = inputHeight * 0.5
|
||||||
@ -436,7 +436,7 @@ public final class EmojiSearchHeaderView: UIView, UITextFieldDelegate {
|
|||||||
component: AnyComponent(Text(
|
component: AnyComponent(Text(
|
||||||
text: strings.Common_Cancel,
|
text: strings.Common_Cancel,
|
||||||
font: Font.regular(17.0),
|
font: Font.regular(17.0),
|
||||||
color: .white
|
color: .black
|
||||||
)),
|
)),
|
||||||
environment: {},
|
environment: {},
|
||||||
containerSize: CGSize(width: size.width - 32.0, height: 100.0)
|
containerSize: CGSize(width: size.width - 32.0, height: 100.0)
|
||||||
|
@ -514,7 +514,7 @@ final class EmojiSearchSearchBarComponent: Component {
|
|||||||
containerSize: itemLayout.itemSize
|
containerSize: itemLayout.itemSize
|
||||||
)
|
)
|
||||||
|
|
||||||
itemView.tintView.tintColor = .white
|
itemView.tintView.tintColor = .black
|
||||||
|
|
||||||
if let view = itemView.view.view as? LottieComponent.View {
|
if let view = itemView.view.view as? LottieComponent.View {
|
||||||
if view.superview == nil {
|
if view.superview == nil {
|
||||||
@ -592,7 +592,7 @@ final class EmojiSearchSearchBarComponent: Component {
|
|||||||
let selectedItemCenter = itemLayout.frame(at: index).center
|
let selectedItemCenter = itemLayout.frame(at: index).center
|
||||||
let selectionSize = CGSize(width: 28.0, height: 28.0)
|
let selectionSize = CGSize(width: 28.0, height: 28.0)
|
||||||
self.selectedItemBackground.backgroundColor = selectedColor.cgColor
|
self.selectedItemBackground.backgroundColor = selectedColor.cgColor
|
||||||
self.selectedItemTintBackground.backgroundColor = UIColor(white: 1.0, alpha: 0.15).cgColor
|
self.selectedItemTintBackground.backgroundColor = UIColor(white: 0.0, alpha: 0.15).cgColor
|
||||||
self.selectedItemBackground.cornerRadius = selectionSize.height * 0.5
|
self.selectedItemBackground.cornerRadius = selectionSize.height * 0.5
|
||||||
self.selectedItemTintBackground.cornerRadius = selectionSize.height * 0.5
|
self.selectedItemTintBackground.cornerRadius = selectionSize.height * 0.5
|
||||||
|
|
||||||
@ -678,7 +678,7 @@ final class EmojiSearchSearchBarComponent: Component {
|
|||||||
component: AnyComponent(Text(
|
component: AnyComponent(Text(
|
||||||
text: component.strings.Common_Search,
|
text: component.strings.Common_Search,
|
||||||
font: Font.regular(17.0),
|
font: Font.regular(17.0),
|
||||||
color: .white
|
color: .black
|
||||||
)),
|
)),
|
||||||
environment: {},
|
environment: {},
|
||||||
containerSize: CGSize(width: availableSize.width - 32.0, height: 100.0)
|
containerSize: CGSize(width: availableSize.width - 32.0, height: 100.0)
|
||||||
|
@ -443,7 +443,7 @@ final class EmojiSearchStatusComponent: Component {
|
|||||||
overlayColor = component.useOpaqueTheme ? component.theme.chat.inputMediaPanel.panelContentOpaqueSearchOverlayColor : component.theme.chat.inputMediaPanel.panelContentVibrantSearchOverlayColor
|
overlayColor = component.useOpaqueTheme ? component.theme.chat.inputMediaPanel.panelContentOpaqueSearchOverlayColor : component.theme.chat.inputMediaPanel.panelContentVibrantSearchOverlayColor
|
||||||
}
|
}
|
||||||
|
|
||||||
let baseColor: UIColor = .white
|
let baseColor: UIColor = .black
|
||||||
|
|
||||||
if self.contentView.tintColor != overlayColor {
|
if self.contentView.tintColor != overlayColor {
|
||||||
self.contentView.tintColor = overlayColor
|
self.contentView.tintColor = overlayColor
|
||||||
|
@ -68,7 +68,7 @@ final class EmptySearchResultsView: UIView {
|
|||||||
)
|
)
|
||||||
let _ = self.titleTintLabel.update(
|
let _ = self.titleTintLabel.update(
|
||||||
transition: .immediate,
|
transition: .immediate,
|
||||||
component: AnyComponent(Text(text: text, font: Font.regular(15.0), color: .white)),
|
component: AnyComponent(Text(text: text, font: Font.regular(15.0), color: .black)),
|
||||||
environment: {},
|
environment: {},
|
||||||
containerSize: CGSize(width: size.width, height: 100.0)
|
containerSize: CGSize(width: size.width, height: 100.0)
|
||||||
)
|
)
|
||||||
|
@ -88,7 +88,7 @@ final class GroupExpandActionButton: UIButton {
|
|||||||
} else {
|
} else {
|
||||||
self.backgroundLayer.backgroundColor = theme.chat.inputMediaPanel.panelContentControlVibrantOverlayColor.cgColor
|
self.backgroundLayer.backgroundColor = theme.chat.inputMediaPanel.panelContentControlVibrantOverlayColor.cgColor
|
||||||
}
|
}
|
||||||
self.tintContainerLayer.backgroundColor = UIColor.white.cgColor
|
self.tintContainerLayer.backgroundColor = UIColor.black.cgColor
|
||||||
|
|
||||||
let textSize: CGSize
|
let textSize: CGSize
|
||||||
if let currentTextLayout = self.currentTextLayout, currentTextLayout.string == title, currentTextLayout.color == color, currentTextLayout.constrainedWidth == textConstrainedWidth {
|
if let currentTextLayout = self.currentTextLayout, currentTextLayout.string == title, currentTextLayout.color == color, currentTextLayout.constrainedWidth == textConstrainedWidth {
|
||||||
|
@ -100,7 +100,7 @@ final class GroupHeaderActionButton: UIButton {
|
|||||||
}
|
}
|
||||||
|
|
||||||
self.backgroundLayer.backgroundColor = backgroundColor.cgColor
|
self.backgroundLayer.backgroundColor = backgroundColor.cgColor
|
||||||
self.tintBackgroundLayer.backgroundColor = UIColor.white.withAlphaComponent(0.2).cgColor
|
self.tintBackgroundLayer.backgroundColor = UIColor.black.withAlphaComponent(0.2).cgColor
|
||||||
|
|
||||||
self.tintContainerLayer.isHidden = !needsVibrancy
|
self.tintContainerLayer.isHidden = !needsVibrancy
|
||||||
|
|
||||||
@ -110,7 +110,7 @@ final class GroupHeaderActionButton: UIButton {
|
|||||||
} else {
|
} else {
|
||||||
let font: UIFont = compact ? Font.medium(11.0) : Font.semibold(15.0)
|
let font: UIFont = compact ? Font.medium(11.0) : Font.semibold(15.0)
|
||||||
let string = NSAttributedString(string: title.uppercased(), font: font, textColor: foregroundColor)
|
let string = NSAttributedString(string: title.uppercased(), font: font, textColor: foregroundColor)
|
||||||
let tintString = NSAttributedString(string: title.uppercased(), font: font, textColor: .white)
|
let tintString = NSAttributedString(string: title.uppercased(), font: font, textColor: .black)
|
||||||
let stringBounds = string.boundingRect(with: CGSize(width: textConstrainedWidth, height: 100.0), options: .usesLineFragmentOrigin, context: nil)
|
let stringBounds = string.boundingRect(with: CGSize(width: textConstrainedWidth, height: 100.0), options: .usesLineFragmentOrigin, context: nil)
|
||||||
textSize = CGSize(width: ceil(stringBounds.width), height: ceil(stringBounds.height))
|
textSize = CGSize(width: ceil(stringBounds.width), height: ceil(stringBounds.height))
|
||||||
self.textLayer.contents = generateImage(textSize, opaque: false, scale: 0.0, rotatedContext: { size, context in
|
self.textLayer.contents = generateImage(textSize, opaque: false, scale: 0.0, rotatedContext: { size, context in
|
||||||
|
@ -172,7 +172,7 @@ final class GroupHeaderLayer: UIView {
|
|||||||
clearSize = image.size
|
clearSize = image.size
|
||||||
clearIconLayer.contents = image.cgImage
|
clearIconLayer.contents = image.cgImage
|
||||||
}
|
}
|
||||||
if updateImage, let image = PresentationResourcesChat.chatInputMediaPanelGridDismissImage(theme, color: .white) {
|
if updateImage, let image = PresentationResourcesChat.chatInputMediaPanelGridDismissImage(theme, color: .black) {
|
||||||
tintClearIconLayer.contents = image.cgImage
|
tintClearIconLayer.contents = image.cgImage
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -215,7 +215,7 @@ final class GroupHeaderLayer: UIView {
|
|||||||
stringValue = title
|
stringValue = title
|
||||||
}
|
}
|
||||||
let string = NSAttributedString(string: stringValue, font: font, textColor: color)
|
let string = NSAttributedString(string: stringValue, font: font, textColor: color)
|
||||||
let whiteString = NSAttributedString(string: stringValue, font: font, textColor: .white)
|
let whiteString = NSAttributedString(string: stringValue, font: font, textColor: .black)
|
||||||
let stringBounds = string.boundingRect(with: CGSize(width: textConstrainedWidth, height: 18.0), options: [.usesLineFragmentOrigin, .truncatesLastVisibleLine], context: nil)
|
let stringBounds = string.boundingRect(with: CGSize(width: textConstrainedWidth, height: 18.0), options: [.usesLineFragmentOrigin, .truncatesLastVisibleLine], context: nil)
|
||||||
textSize = CGSize(width: ceil(stringBounds.width), height: ceil(stringBounds.height))
|
textSize = CGSize(width: ceil(stringBounds.width), height: ceil(stringBounds.height))
|
||||||
self.textLayer.contents = generateImage(textSize, opaque: false, scale: 0.0, rotatedContext: { size, context in
|
self.textLayer.contents = generateImage(textSize, opaque: false, scale: 0.0, rotatedContext: { size, context in
|
||||||
@ -231,7 +231,6 @@ final class GroupHeaderLayer: UIView {
|
|||||||
context.clear(CGRect(origin: CGPoint(), size: size))
|
context.clear(CGRect(origin: CGPoint(), size: size))
|
||||||
UIGraphicsPushContext(context)
|
UIGraphicsPushContext(context)
|
||||||
|
|
||||||
//whiteString.draw(in: stringBounds)
|
|
||||||
whiteString.draw(with: stringBounds, options: [.usesLineFragmentOrigin, .truncatesLastVisibleLine], context: nil)
|
whiteString.draw(with: stringBounds, options: [.usesLineFragmentOrigin, .truncatesLastVisibleLine], context: nil)
|
||||||
|
|
||||||
UIGraphicsPopContext()
|
UIGraphicsPopContext()
|
||||||
@ -287,7 +286,7 @@ final class GroupHeaderLayer: UIView {
|
|||||||
self.tintBadgeLayer = tintBadgeLayer
|
self.tintBadgeLayer = tintBadgeLayer
|
||||||
self.tintContentLayer.addSublayer(tintBadgeLayer)
|
self.tintContentLayer.addSublayer(tintBadgeLayer)
|
||||||
|
|
||||||
if let image = generateBadgeImage(color: .white) {
|
if let image = generateBadgeImage(color: .black) {
|
||||||
tintBadgeLayer.contents = image.cgImage
|
tintBadgeLayer.contents = image.cgImage
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -342,7 +341,7 @@ final class GroupHeaderLayer: UIView {
|
|||||||
self.tintLockIconLayer = tintLockIconLayer
|
self.tintLockIconLayer = tintLockIconLayer
|
||||||
self.tintContentLayer.addSublayer(tintLockIconLayer)
|
self.tintContentLayer.addSublayer(tintLockIconLayer)
|
||||||
}
|
}
|
||||||
if let image = PresentationResourcesChat.chatEntityKeyboardLock(theme, color: .white) {
|
if let image = PresentationResourcesChat.chatEntityKeyboardLock(theme, color: .black) {
|
||||||
tintLockIconLayer.contents = image.cgImage
|
tintLockIconLayer.contents = image.cgImage
|
||||||
tintLockIconLayer.frame = lockIconLayer.frame
|
tintLockIconLayer.frame = lockIconLayer.frame
|
||||||
tintLockIconLayer.isHidden = !needsVibrancy
|
tintLockIconLayer.isHidden = !needsVibrancy
|
||||||
@ -368,7 +367,7 @@ final class GroupHeaderLayer: UIView {
|
|||||||
subtitleSize = currentSubtitleLayout.size
|
subtitleSize = currentSubtitleLayout.size
|
||||||
} else {
|
} else {
|
||||||
let string = NSAttributedString(string: subtitle, font: Font.regular(15.0), textColor: subtitleColor)
|
let string = NSAttributedString(string: subtitle, font: Font.regular(15.0), textColor: subtitleColor)
|
||||||
let whiteString = NSAttributedString(string: subtitle, font: Font.regular(15.0), textColor: .white)
|
let whiteString = NSAttributedString(string: subtitle, font: Font.regular(15.0), textColor: .black)
|
||||||
let stringBounds = string.boundingRect(with: CGSize(width: textConstrainedWidth, height: 100.0), options: .usesLineFragmentOrigin, context: nil)
|
let stringBounds = string.boundingRect(with: CGSize(width: textConstrainedWidth, height: 100.0), options: .usesLineFragmentOrigin, context: nil)
|
||||||
subtitleSize = CGSize(width: ceil(stringBounds.width), height: ceil(stringBounds.height))
|
subtitleSize = CGSize(width: ceil(stringBounds.width), height: ceil(stringBounds.height))
|
||||||
updateSubtitleContents = generateImage(subtitleSize, opaque: false, scale: 0.0, rotatedContext: { size, context in
|
updateSubtitleContents = generateImage(subtitleSize, opaque: false, scale: 0.0, rotatedContext: { size, context in
|
||||||
@ -493,7 +492,7 @@ final class GroupHeaderLayer: UIView {
|
|||||||
self.tintSeparatorLayer = tintSeparatorLayer
|
self.tintSeparatorLayer = tintSeparatorLayer
|
||||||
self.tintContentLayer.addSublayer(tintSeparatorLayer)
|
self.tintContentLayer.addSublayer(tintSeparatorLayer)
|
||||||
}
|
}
|
||||||
tintSeparatorLayer.backgroundColor = UIColor.white.cgColor
|
tintSeparatorLayer.backgroundColor = UIColor.black.cgColor
|
||||||
tintSeparatorLayer.frame = CGRect(origin: CGPoint(x: 0.0, y: 0.0), size: CGSize(width: size.width, height: UIScreenPixel))
|
tintSeparatorLayer.frame = CGRect(origin: CGPoint(x: 0.0, y: 0.0), size: CGSize(width: size.width, height: UIScreenPixel))
|
||||||
|
|
||||||
tintSeparatorLayer.isHidden = !needsVibrancy
|
tintSeparatorLayer.isHidden = !needsVibrancy
|
||||||
|
@ -27,8 +27,15 @@ public struct HLSCodecConfiguration {
|
|||||||
|
|
||||||
public extension HLSCodecConfiguration {
|
public extension HLSCodecConfiguration {
|
||||||
init(context: AccountContext) {
|
init(context: AccountContext) {
|
||||||
var isHardwareAv1Supported = internal_isHardwareAv1Supported
|
/*var isSoftwareAv1Supported = false
|
||||||
var isSoftwareAv1Supported = false
|
var isHardwareAv1Supported = false
|
||||||
|
|
||||||
|
var length: Int = 4
|
||||||
|
var cpuCount: UInt32 = 0
|
||||||
|
sysctlbyname("hw.ncpu", &cpuCount, &length, nil, 0)
|
||||||
|
if cpuCount >= 6 {
|
||||||
|
isSoftwareAv1Supported = true
|
||||||
|
}
|
||||||
|
|
||||||
if let data = context.currentAppConfiguration.with({ $0 }).data, let value = data["ios_enable_hardware_av1"] as? Double {
|
if let data = context.currentAppConfiguration.with({ $0 }).data, let value = data["ios_enable_hardware_av1"] as? Double {
|
||||||
isHardwareAv1Supported = value != 0.0
|
isHardwareAv1Supported = value != 0.0
|
||||||
@ -37,7 +44,9 @@ public extension HLSCodecConfiguration {
|
|||||||
isSoftwareAv1Supported = value != 0.0
|
isSoftwareAv1Supported = value != 0.0
|
||||||
}
|
}
|
||||||
|
|
||||||
self.init(isHardwareAv1Supported: isHardwareAv1Supported, isSoftwareAv1Supported: isSoftwareAv1Supported)
|
self.init(isHardwareAv1Supported: isHardwareAv1Supported, isSoftwareAv1Supported: isSoftwareAv1Supported)*/
|
||||||
|
|
||||||
|
self.init(isHardwareAv1Supported: false, isSoftwareAv1Supported: false)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1076,7 +1076,7 @@ final class HLSVideoJSNativeContentNode: ASDisplayNode, UniversalVideoContentNod
|
|||||||
var onSeeked: (() -> Void)?
|
var onSeeked: (() -> Void)?
|
||||||
self.player = ChunkMediaPlayerV2(
|
self.player = ChunkMediaPlayerV2(
|
||||||
audioSessionManager: audioSessionManager,
|
audioSessionManager: audioSessionManager,
|
||||||
partsState: self.chunkPlayerPartsState.get(),
|
source: .externalParts(self.chunkPlayerPartsState.get()),
|
||||||
video: true,
|
video: true,
|
||||||
enableSound: self.enableSound,
|
enableSound: self.enableSound,
|
||||||
baseRate: baseRate,
|
baseRate: baseRate,
|
||||||
@ -1085,25 +1085,14 @@ final class HLSVideoJSNativeContentNode: ASDisplayNode, UniversalVideoContentNod
|
|||||||
},
|
},
|
||||||
playerNode: self.playerNode
|
playerNode: self.playerNode
|
||||||
)
|
)
|
||||||
/*self.player = ChunkMediaPlayerImpl(
|
|
||||||
postbox: postbox,
|
|
||||||
audioSessionManager: audioSessionManager,
|
|
||||||
partsState: self.chunkPlayerPartsState.get(),
|
|
||||||
video: true,
|
|
||||||
enableSound: self.enableSound,
|
|
||||||
baseRate: baseRate,
|
|
||||||
onSeeked: {
|
|
||||||
onSeeked?()
|
|
||||||
},
|
|
||||||
playerNode: self.playerNode
|
|
||||||
)*/
|
|
||||||
|
|
||||||
super.init()
|
super.init()
|
||||||
|
|
||||||
self.contextDisposable = SharedHLSVideoJSContext.shared.register(context: self)
|
self.contextDisposable = SharedHLSVideoJSContext.shared.register(context: self)
|
||||||
|
|
||||||
self.playerNode.frame = CGRect(origin: CGPoint(), size: self.intrinsicDimensions)
|
self.playerNode.frame = CGRect(origin: CGPoint(), size: self.intrinsicDimensions)
|
||||||
var didProcessFramesToDisplay = false
|
|
||||||
|
/*var didProcessFramesToDisplay = false
|
||||||
self.playerNode.isHidden = true
|
self.playerNode.isHidden = true
|
||||||
self.playerNode.hasSentFramesToDisplay = { [weak self] in
|
self.playerNode.hasSentFramesToDisplay = { [weak self] in
|
||||||
guard let self, !didProcessFramesToDisplay else {
|
guard let self, !didProcessFramesToDisplay else {
|
||||||
@ -1111,7 +1100,7 @@ final class HLSVideoJSNativeContentNode: ASDisplayNode, UniversalVideoContentNod
|
|||||||
}
|
}
|
||||||
didProcessFramesToDisplay = true
|
didProcessFramesToDisplay = true
|
||||||
self.playerNode.isHidden = false
|
self.playerNode.isHidden = false
|
||||||
}
|
}*/
|
||||||
|
|
||||||
//let thumbnailVideoReference = HLSVideoContent.minimizedHLSQuality(file: fileReference)?.file ?? fileReference
|
//let thumbnailVideoReference = HLSVideoContent.minimizedHLSQuality(file: fileReference)?.file ?? fileReference
|
||||||
|
|
||||||
@ -1843,7 +1832,7 @@ private final class SourceBuffer {
|
|||||||
let item = ChunkMediaPlayerPart(
|
let item = ChunkMediaPlayerPart(
|
||||||
startTime: fragmentInfo.startTime.seconds,
|
startTime: fragmentInfo.startTime.seconds,
|
||||||
endTime: fragmentInfo.startTime.seconds + fragmentInfo.duration.seconds,
|
endTime: fragmentInfo.startTime.seconds + fragmentInfo.duration.seconds,
|
||||||
file: tempFile,
|
content: .tempFile(ChunkMediaPlayerPart.Content.TempFile(file: tempFile)),
|
||||||
codecName: videoCodecName
|
codecName: videoCodecName
|
||||||
)
|
)
|
||||||
self.items.append(item)
|
self.items.append(item)
|
||||||
|
@ -146,6 +146,137 @@ public final class NativeVideoContent: UniversalVideoContent {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private enum PlayerImpl {
|
||||||
|
case legacy(MediaPlayer)
|
||||||
|
case chunked(ChunkMediaPlayerV2)
|
||||||
|
|
||||||
|
var actionAtEnd: MediaPlayerActionAtEnd {
|
||||||
|
get {
|
||||||
|
switch self {
|
||||||
|
case let .legacy(player):
|
||||||
|
return player.actionAtEnd
|
||||||
|
case let .chunked(player):
|
||||||
|
return player.actionAtEnd
|
||||||
|
}
|
||||||
|
} set(value) {
|
||||||
|
switch self {
|
||||||
|
case let .legacy(player):
|
||||||
|
player.actionAtEnd = value
|
||||||
|
case let .chunked(player):
|
||||||
|
player.actionAtEnd = value
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
var status: Signal<MediaPlayerStatus, NoError> {
|
||||||
|
switch self {
|
||||||
|
case let .legacy(player):
|
||||||
|
return player.status
|
||||||
|
case let .chunked(player):
|
||||||
|
return player.status
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func play() {
|
||||||
|
switch self {
|
||||||
|
case let .legacy(player):
|
||||||
|
player.play()
|
||||||
|
case let .chunked(player):
|
||||||
|
player.play()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func pause() {
|
||||||
|
switch self {
|
||||||
|
case let .legacy(player):
|
||||||
|
player.pause()
|
||||||
|
case let .chunked(player):
|
||||||
|
player.pause()
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func togglePlayPause(faded: Bool = false) {
|
||||||
|
switch self {
|
||||||
|
case let .legacy(player):
|
||||||
|
player.togglePlayPause(faded: faded)
|
||||||
|
case let .chunked(player):
|
||||||
|
player.togglePlayPause(faded: faded)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func playOnceWithSound(playAndRecord: Bool, seek: MediaPlayerSeek = .start) {
|
||||||
|
switch self {
|
||||||
|
case let .legacy(player):
|
||||||
|
player.playOnceWithSound(playAndRecord: playAndRecord, seek: seek)
|
||||||
|
case let .chunked(player):
|
||||||
|
player.playOnceWithSound(playAndRecord: playAndRecord, seek: seek)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func continueWithOverridingAmbientMode(isAmbient: Bool) {
|
||||||
|
switch self {
|
||||||
|
case let .legacy(player):
|
||||||
|
player.continueWithOverridingAmbientMode(isAmbient: isAmbient)
|
||||||
|
case let .chunked(player):
|
||||||
|
player.continueWithOverridingAmbientMode(isAmbient: isAmbient)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func continuePlayingWithoutSound(seek: MediaPlayerSeek = .start) {
|
||||||
|
switch self {
|
||||||
|
case let .legacy(player):
|
||||||
|
player.continuePlayingWithoutSound(seek: seek)
|
||||||
|
case let .chunked(player):
|
||||||
|
player.continuePlayingWithoutSound(seek: seek)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func seek(timestamp: Double, play: Bool? = nil) {
|
||||||
|
switch self {
|
||||||
|
case let .legacy(player):
|
||||||
|
player.seek(timestamp: timestamp, play: play)
|
||||||
|
case let .chunked(player):
|
||||||
|
player.seek(timestamp: timestamp, play: play)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setForceAudioToSpeaker(_ value: Bool) {
|
||||||
|
switch self {
|
||||||
|
case let .legacy(player):
|
||||||
|
player.setForceAudioToSpeaker(value)
|
||||||
|
case let .chunked(player):
|
||||||
|
player.setForceAudioToSpeaker(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setSoundMuted(soundMuted: Bool) {
|
||||||
|
switch self {
|
||||||
|
case let .legacy(player):
|
||||||
|
player.setSoundMuted(soundMuted: soundMuted)
|
||||||
|
case let .chunked(player):
|
||||||
|
player.setSoundMuted(soundMuted: soundMuted)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setBaseRate(_ baseRate: Double) {
|
||||||
|
switch self {
|
||||||
|
case let .legacy(player):
|
||||||
|
player.setBaseRate(baseRate)
|
||||||
|
case let .chunked(player):
|
||||||
|
player.setBaseRate(baseRate)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func setContinuePlayingWithoutSoundOnLostAudioSession(_ value: Bool) {
|
||||||
|
switch self {
|
||||||
|
case let .legacy(player):
|
||||||
|
player.setContinuePlayingWithoutSoundOnLostAudioSession(value)
|
||||||
|
case let .chunked(player):
|
||||||
|
player.setContinuePlayingWithoutSoundOnLostAudioSession(value)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
private final class NativeVideoContentNode: ASDisplayNode, UniversalVideoContentNode {
|
private final class NativeVideoContentNode: ASDisplayNode, UniversalVideoContentNode {
|
||||||
private let postbox: Postbox
|
private let postbox: Postbox
|
||||||
private let userLocation: MediaResourceUserLocation
|
private let userLocation: MediaResourceUserLocation
|
||||||
@ -165,7 +296,7 @@ private final class NativeVideoContentNode: ASDisplayNode, UniversalVideoContent
|
|||||||
private let continuePlayingWithoutSoundOnLostAudioSession: Bool
|
private let continuePlayingWithoutSoundOnLostAudioSession: Bool
|
||||||
private let displayImage: Bool
|
private let displayImage: Bool
|
||||||
|
|
||||||
private var player: MediaPlayer
|
private var player: PlayerImpl
|
||||||
private var thumbnailPlayer: MediaPlayer?
|
private var thumbnailPlayer: MediaPlayer?
|
||||||
private let imageNode: TransformImageNode
|
private let imageNode: TransformImageNode
|
||||||
private let playerNode: MediaPlayerNode
|
private let playerNode: MediaPlayerNode
|
||||||
@ -252,7 +383,57 @@ private final class NativeVideoContentNode: ASDisplayNode, UniversalVideoContent
|
|||||||
|
|
||||||
let selectedFile = fileReference.media
|
let selectedFile = fileReference.media
|
||||||
|
|
||||||
self.player = MediaPlayer(audioSessionManager: audioSessionManager, postbox: postbox, userLocation: userLocation, userContentType: userContentType, resourceReference: fileReference.resourceReference(selectedFile.resource), tempFilePath: tempFilePath, limitedFileRange: limitedFileRange, streamable: streamVideo, video: true, preferSoftwareDecoding: false, playAutomatically: false, enableSound: enableSound, baseRate: baseRate, fetchAutomatically: fetchAutomatically, soundMuted: soundMuted, ambient: beginWithAmbientSound, mixWithOthers: mixWithOthers, continuePlayingWithoutSoundOnLostAudioSession: continuePlayingWithoutSoundOnLostAudioSession, storeAfterDownload: storeAfterDownload, isAudioVideoMessage: isAudioVideoMessage)
|
self.playerNode = MediaPlayerNode(backgroundThread: false, captureProtected: captureProtected)
|
||||||
|
|
||||||
|
if !"".isEmpty {
|
||||||
|
let mediaPlayer = MediaPlayer(
|
||||||
|
audioSessionManager: audioSessionManager,
|
||||||
|
postbox: postbox,
|
||||||
|
userLocation: userLocation,
|
||||||
|
userContentType: userContentType,
|
||||||
|
resourceReference: fileReference.resourceReference(selectedFile.resource),
|
||||||
|
tempFilePath: tempFilePath,
|
||||||
|
limitedFileRange: limitedFileRange,
|
||||||
|
streamable: streamVideo,
|
||||||
|
video: true,
|
||||||
|
preferSoftwareDecoding: false,
|
||||||
|
playAutomatically: false,
|
||||||
|
enableSound: enableSound,
|
||||||
|
baseRate: baseRate,
|
||||||
|
fetchAutomatically: fetchAutomatically,
|
||||||
|
soundMuted: soundMuted,
|
||||||
|
ambient: beginWithAmbientSound,
|
||||||
|
mixWithOthers: mixWithOthers,
|
||||||
|
continuePlayingWithoutSoundOnLostAudioSession: continuePlayingWithoutSoundOnLostAudioSession,
|
||||||
|
storeAfterDownload: storeAfterDownload,
|
||||||
|
isAudioVideoMessage: isAudioVideoMessage
|
||||||
|
)
|
||||||
|
self.player = .legacy(mediaPlayer)
|
||||||
|
mediaPlayer.attachPlayerNode(self.playerNode)
|
||||||
|
} else {
|
||||||
|
let mediaPlayer = ChunkMediaPlayerV2(
|
||||||
|
audioSessionManager: audioSessionManager,
|
||||||
|
source: .directFetch(ChunkMediaPlayerV2.SourceDescription.ResourceDescription(
|
||||||
|
postbox: postbox,
|
||||||
|
reference: fileReference.resourceReference(selectedFile.resource),
|
||||||
|
userLocation: userLocation,
|
||||||
|
userContentType: userContentType,
|
||||||
|
statsCategory: statsCategoryForFileWithAttributes(fileReference.media.attributes),
|
||||||
|
fetchAutomatically: fetchAutomatically
|
||||||
|
)),
|
||||||
|
video: true,
|
||||||
|
playAutomatically: false,
|
||||||
|
enableSound: enableSound,
|
||||||
|
baseRate: baseRate,
|
||||||
|
soundMuted: soundMuted,
|
||||||
|
ambient: beginWithAmbientSound,
|
||||||
|
mixWithOthers: mixWithOthers,
|
||||||
|
continuePlayingWithoutSoundOnLostAudioSession: continuePlayingWithoutSoundOnLostAudioSession,
|
||||||
|
isAudioVideoMessage: isAudioVideoMessage,
|
||||||
|
playerNode: self.playerNode
|
||||||
|
)
|
||||||
|
self.player = .chunked(mediaPlayer)
|
||||||
|
}
|
||||||
|
|
||||||
var actionAtEndImpl: (() -> Void)?
|
var actionAtEndImpl: (() -> Void)?
|
||||||
if enableSound && !loopVideo {
|
if enableSound && !loopVideo {
|
||||||
@ -264,8 +445,6 @@ private final class NativeVideoContentNode: ASDisplayNode, UniversalVideoContent
|
|||||||
actionAtEndImpl?()
|
actionAtEndImpl?()
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
self.playerNode = MediaPlayerNode(backgroundThread: false, captureProtected: captureProtected)
|
|
||||||
self.player.attachPlayerNode(self.playerNode)
|
|
||||||
|
|
||||||
self.dimensions = fileReference.media.dimensions?.cgSize
|
self.dimensions = fileReference.media.dimensions?.cgSize
|
||||||
if let dimensions = self.dimensions {
|
if let dimensions = self.dimensions {
|
||||||
@ -274,7 +453,7 @@ private final class NativeVideoContentNode: ASDisplayNode, UniversalVideoContent
|
|||||||
|
|
||||||
super.init()
|
super.init()
|
||||||
|
|
||||||
var didProcessFramesToDisplay = false
|
/*var didProcessFramesToDisplay = false
|
||||||
self.playerNode.isHidden = true
|
self.playerNode.isHidden = true
|
||||||
self.playerNode.hasSentFramesToDisplay = { [weak self] in
|
self.playerNode.hasSentFramesToDisplay = { [weak self] in
|
||||||
guard let self, !didProcessFramesToDisplay else {
|
guard let self, !didProcessFramesToDisplay else {
|
||||||
@ -283,7 +462,7 @@ private final class NativeVideoContentNode: ASDisplayNode, UniversalVideoContent
|
|||||||
didProcessFramesToDisplay = true
|
didProcessFramesToDisplay = true
|
||||||
self.playerNode.isHidden = false
|
self.playerNode.isHidden = false
|
||||||
self.hasSentFramesToDisplay?()
|
self.hasSentFramesToDisplay?()
|
||||||
}
|
}*/
|
||||||
|
|
||||||
if let dimensions = hintDimensions {
|
if let dimensions = hintDimensions {
|
||||||
self.dimensions = dimensions
|
self.dimensions = dimensions
|
||||||
|
@ -29,6 +29,7 @@ UIView * _Nullable getPortalViewSourceView(UIView * _Nonnull portalView);
|
|||||||
|
|
||||||
NSObject * _Nullable makeBlurFilter();
|
NSObject * _Nullable makeBlurFilter();
|
||||||
NSObject * _Nullable makeLuminanceToAlphaFilter();
|
NSObject * _Nullable makeLuminanceToAlphaFilter();
|
||||||
|
NSObject * _Nullable makeColorInvertFilter();
|
||||||
NSObject * _Nullable makeMonochromeFilter();
|
NSObject * _Nullable makeMonochromeFilter();
|
||||||
|
|
||||||
void setLayerDisableScreenshots(CALayer * _Nonnull layer, bool disableScreenshots);
|
void setLayerDisableScreenshots(CALayer * _Nonnull layer, bool disableScreenshots);
|
||||||
|
@ -234,6 +234,10 @@ NSObject * _Nullable makeLuminanceToAlphaFilter() {
|
|||||||
return [(id<GraphicsFilterProtocol>)NSClassFromString(@"CAFilter") filterWithName:@"luminanceToAlpha"];
|
return [(id<GraphicsFilterProtocol>)NSClassFromString(@"CAFilter") filterWithName:@"luminanceToAlpha"];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NSObject * _Nullable makeColorInvertFilter() {
|
||||||
|
return [(id<GraphicsFilterProtocol>)NSClassFromString(@"CAFilter") filterWithName:@"colorInvert"];
|
||||||
|
}
|
||||||
|
|
||||||
NSObject * _Nullable makeMonochromeFilter() {
|
NSObject * _Nullable makeMonochromeFilter() {
|
||||||
return [(id<GraphicsFilterProtocol>)NSClassFromString(@"CAFilter") filterWithName:@"colorMonochrome"];
|
return [(id<GraphicsFilterProtocol>)NSClassFromString(@"CAFilter") filterWithName:@"colorMonochrome"];
|
||||||
}
|
}
|
||||||
|
7
third-party/dav1d/build-dav1d-bazel.sh
vendored
7
third-party/dav1d/build-dav1d-bazel.sh
vendored
@ -12,7 +12,12 @@ CROSSFILE=""
|
|||||||
if [ "$ARCH" = "arm64" ]; then
|
if [ "$ARCH" = "arm64" ]; then
|
||||||
CROSSFILE="../package/crossfiles/arm64-iPhoneOS.meson"
|
CROSSFILE="../package/crossfiles/arm64-iPhoneOS.meson"
|
||||||
elif [ "$ARCH" = "sim_arm64" ]; then
|
elif [ "$ARCH" = "sim_arm64" ]; then
|
||||||
CROSSFILE="../../arm64-iPhoneSimulator.meson"
|
rm -f "arm64-iPhoneSimulator-custom.meson"
|
||||||
|
TARGET_CROSSFILE="$BUILD_DIR/dav1d/package/crossfiles/arm64-iPhoneSimulator-custom.meson"
|
||||||
|
cp "$BUILD_DIR/arm64-iPhoneSimulator.meson" "$TARGET_CROSSFILE"
|
||||||
|
custom_xcode_path="$(xcode-select -p)/"
|
||||||
|
sed -i '' "s|/Applications/Xcode.app/Contents/Developer/|$custom_xcode_path|g" "$TARGET_CROSSFILE"
|
||||||
|
CROSSFILE="../package/crossfiles/arm64-iPhoneSimulator-custom.meson"
|
||||||
else
|
else
|
||||||
echo "Unsupported architecture $ARCH"
|
echo "Unsupported architecture $ARCH"
|
||||||
exit 1
|
exit 1
|
||||||
|
Loading…
x
Reference in New Issue
Block a user