diff --git a/build_number_offset b/build_number_offset index 1a1e818a7b..d0f0d290cd 100644 --- a/build_number_offset +++ b/build_number_offset @@ -1 +1 @@ -2500 +2510 diff --git a/submodules/MediaPlayer/Sources/ChunkMediaPlayerV2.swift b/submodules/MediaPlayer/Sources/ChunkMediaPlayerV2.swift index b0a25c5d3f..96debdd7ce 100644 --- a/submodules/MediaPlayer/Sources/ChunkMediaPlayerV2.swift +++ b/submodules/MediaPlayer/Sources/ChunkMediaPlayerV2.swift @@ -168,11 +168,14 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer { private let mediaDataReaderParams: MediaDataReaderParams private let audioSessionManager: ManagedAudioSession private let onSeeked: (() -> Void)? + private weak var playerNode: MediaPlayerNode? private let renderSynchronizer: AVSampleBufferRenderSynchronizer private var videoRenderer: AVSampleBufferDisplayLayer private var audioRenderer: AVSampleBufferAudioRenderer? + private var didNotifySentVideoFrames: Bool = false + private var partsState = ChunkMediaPlayerPartsState(duration: nil, content: .parts([])) private var loadedParts: [LoadedPart] = [] private var loadedPartsMediaData: QueueLocalObject @@ -245,6 +248,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer { self.mediaDataReaderParams = params self.audioSessionManager = audioSessionManager self.onSeeked = onSeeked + self.playerNode = playerNode self.loadedPartsMediaData = QueueLocalObject(queue: self.dataQueue, generate: { return LoadedPartsMediaData() @@ -938,10 +942,11 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer { videoTarget = self.videoRenderer } + let didNotifySentVideoFrames = self.didNotifySentVideoFrames videoTarget.requestMediaDataWhenReady(on: self.dataQueue.queue, using: { [weak self] in if let loadedPartsMediaData = loadedPartsMediaData.unsafeGet() { - let bufferIsReadyForMoreData = ChunkMediaPlayerV2.fillRendererBuffer(bufferTarget: videoTarget, loadedPartsMediaData: loadedPartsMediaData, isVideo: true) - if bufferIsReadyForMoreData { + let bufferFillResult = ChunkMediaPlayerV2.fillRendererBuffer(bufferTarget: videoTarget, loadedPartsMediaData: loadedPartsMediaData, isVideo: true) + if bufferFillResult.bufferIsReadyForMoreData { videoTarget.stopRequestingMediaData() Queue.mainQueue().async { guard let self else { @@ -951,6 +956,21 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer { self.updateInternalState() } } + if !didNotifySentVideoFrames { + Queue.mainQueue().async { + guard let self else { + return + } + if self.didNotifySentVideoFrames { + return + } + self.didNotifySentVideoFrames = true + if #available(iOS 17.4, *) { + } else { + self.playerNode?.hasSentFramesToDisplay?() + } + } + } } }) } @@ -961,8 +981,8 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer { let audioTarget = audioRenderer audioTarget.requestMediaDataWhenReady(on: self.dataQueue.queue, using: { [weak self] in if let loadedPartsMediaData = loadedPartsMediaData.unsafeGet() { - let bufferIsReadyForMoreData = ChunkMediaPlayerV2.fillRendererBuffer(bufferTarget: audioTarget, loadedPartsMediaData: loadedPartsMediaData, isVideo: false) - if bufferIsReadyForMoreData { + let bufferFillResult = ChunkMediaPlayerV2.fillRendererBuffer(bufferTarget: audioTarget, loadedPartsMediaData: loadedPartsMediaData, isVideo: false) + if bufferFillResult.bufferIsReadyForMoreData { audioTarget.stopRequestingMediaData() Queue.mainQueue().async { guard let self else { @@ -977,8 +997,9 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer { } } - private static func fillRendererBuffer(bufferTarget: AVQueuedSampleBufferRendering, loadedPartsMediaData: LoadedPartsMediaData, isVideo: Bool) -> Bool { + private static func fillRendererBuffer(bufferTarget: AVQueuedSampleBufferRendering, loadedPartsMediaData: LoadedPartsMediaData, isVideo: Bool) -> (bufferIsReadyForMoreData: Bool, didEnqueue: Bool) { var bufferIsReadyForMoreData = true + var didEnqueue = false outer: while true { if !bufferTarget.isReadyForMoreMediaData { bufferIsReadyForMoreData = false @@ -1077,6 +1098,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer { print("Enqueue audio \(CMSampleBufferGetPresentationTimeStamp(sampleBuffer).value) next: \(CMSampleBufferGetPresentationTimeStamp(sampleBuffer).value + 1024)") }*/ bufferTarget.enqueue(sampleBuffer) + didEnqueue = true hasData = true continue outer case .waitingForMoreData: @@ -1090,7 +1112,7 @@ public final class ChunkMediaPlayerV2: ChunkMediaPlayer { } } - return bufferIsReadyForMoreData + return (bufferIsReadyForMoreData: bufferIsReadyForMoreData, didEnqueue: didEnqueue) } } diff --git a/submodules/TelegramCallsUI/Sources/PresentationGroupCall.swift b/submodules/TelegramCallsUI/Sources/PresentationGroupCall.swift index 51a965da2f..747643119a 100644 --- a/submodules/TelegramCallsUI/Sources/PresentationGroupCall.swift +++ b/submodules/TelegramCallsUI/Sources/PresentationGroupCall.swift @@ -1871,7 +1871,7 @@ public final class PresentationGroupCallImpl: PresentationGroupCall { } strongSelf.onMutedSpeechActivityDetected?(value) } - }, encryptionKey: encryptionKey, isConference: self.isConference, sharedAudioDevice: self.sharedAudioDevice)) + }, encryptionKey: encryptionKey, isConference: self.isConference, isStream: self.isStream, sharedAudioDevice: self.sharedAudioDevice)) } self.genericCallContext = genericCallContext diff --git a/submodules/TelegramVoip/Sources/GroupCallContext.swift b/submodules/TelegramVoip/Sources/GroupCallContext.swift index 796ff06f70..5acfdbef45 100644 --- a/submodules/TelegramVoip/Sources/GroupCallContext.swift +++ b/submodules/TelegramVoip/Sources/GroupCallContext.swift @@ -497,6 +497,7 @@ public final class OngoingGroupCallContext { onMutedSpeechActivityDetected: @escaping (Bool) -> Void, encryptionKey: Data?, isConference: Bool, + isStream: Bool, sharedAudioDevice: OngoingCallContext.AudioDevice? ) { self.queue = queue @@ -506,12 +507,14 @@ public final class OngoingGroupCallContext { self.tempStatsLogFile = EngineTempBox.shared.tempFile(fileName: "CallStats.json") let tempStatsLogPath = self.tempStatsLogFile.path - #if os(iOS) - - self.audioDevice = sharedAudioDevice +#if os(iOS) + if sharedAudioDevice == nil && !isStream { + self.audioDevice = OngoingCallContext.AudioDevice.create(enableSystemMute: false) + } else { + self.audioDevice = sharedAudioDevice + } let audioDevice = self.audioDevice - #endif - +#endif var networkStateUpdatedImpl: ((GroupCallNetworkState) -> Void)? var audioLevelsUpdatedImpl: (([NSNumber]) -> Void)? var activityUpdatedImpl: (([UInt32]) -> Void)? @@ -1178,10 +1181,10 @@ public final class OngoingGroupCallContext { } } - public init(inputDeviceId: String = "", outputDeviceId: String = "", audioSessionActive: Signal, video: OngoingCallVideoCapturer?, requestMediaChannelDescriptions: @escaping (Set, @escaping ([MediaChannelDescription]) -> Void) -> Disposable, rejoinNeeded: @escaping () -> Void, outgoingAudioBitrateKbit: Int32?, videoContentType: VideoContentType, enableNoiseSuppression: Bool, disableAudioInput: Bool, enableSystemMute: Bool, preferX264: Bool, logPath: String, onMutedSpeechActivityDetected: @escaping (Bool) -> Void, encryptionKey: Data?, isConference: Bool, sharedAudioDevice: OngoingCallContext.AudioDevice?) { + public init(inputDeviceId: String = "", outputDeviceId: String = "", audioSessionActive: Signal, video: OngoingCallVideoCapturer?, requestMediaChannelDescriptions: @escaping (Set, @escaping ([MediaChannelDescription]) -> Void) -> Disposable, rejoinNeeded: @escaping () -> Void, outgoingAudioBitrateKbit: Int32?, videoContentType: VideoContentType, enableNoiseSuppression: Bool, disableAudioInput: Bool, enableSystemMute: Bool, preferX264: Bool, logPath: String, onMutedSpeechActivityDetected: @escaping (Bool) -> Void, encryptionKey: Data?, isConference: Bool, isStream: Bool, sharedAudioDevice: OngoingCallContext.AudioDevice?) { let queue = self.queue self.impl = QueueLocalObject(queue: queue, generate: { - return Impl(queue: queue, inputDeviceId: inputDeviceId, outputDeviceId: outputDeviceId, audioSessionActive: audioSessionActive, video: video, requestMediaChannelDescriptions: requestMediaChannelDescriptions, rejoinNeeded: rejoinNeeded, outgoingAudioBitrateKbit: outgoingAudioBitrateKbit, videoContentType: videoContentType, enableNoiseSuppression: enableNoiseSuppression, disableAudioInput: disableAudioInput, enableSystemMute: enableSystemMute, preferX264: preferX264, logPath: logPath, onMutedSpeechActivityDetected: onMutedSpeechActivityDetected, encryptionKey: encryptionKey, isConference: isConference, sharedAudioDevice: sharedAudioDevice) + return Impl(queue: queue, inputDeviceId: inputDeviceId, outputDeviceId: outputDeviceId, audioSessionActive: audioSessionActive, video: video, requestMediaChannelDescriptions: requestMediaChannelDescriptions, rejoinNeeded: rejoinNeeded, outgoingAudioBitrateKbit: outgoingAudioBitrateKbit, videoContentType: videoContentType, enableNoiseSuppression: enableNoiseSuppression, disableAudioInput: disableAudioInput, enableSystemMute: enableSystemMute, preferX264: preferX264, logPath: logPath, onMutedSpeechActivityDetected: onMutedSpeechActivityDetected, encryptionKey: encryptionKey, isConference: isConference, isStream: isStream, sharedAudioDevice: sharedAudioDevice) }) }