mirror of
https://github.com/Swiftgram/Telegram-iOS.git
synced 2025-12-23 22:55:00 +00:00
Temp
This commit is contained in:
@@ -126,17 +126,19 @@ private final class MediaPlayerContext {
|
|||||||
private var lastStatusUpdateTimestamp: Double?
|
private var lastStatusUpdateTimestamp: Double?
|
||||||
private let playerStatus: Promise<MediaPlayerStatus>
|
private let playerStatus: Promise<MediaPlayerStatus>
|
||||||
private let playerStatusValue = Atomic<MediaPlayerStatus?>(value: nil)
|
private let playerStatusValue = Atomic<MediaPlayerStatus?>(value: nil)
|
||||||
|
private let audioLevelPipe: ValuePipe<Float>
|
||||||
|
|
||||||
fileprivate var actionAtEnd: MediaPlayerActionAtEnd = .stop
|
fileprivate var actionAtEnd: MediaPlayerActionAtEnd = .stop
|
||||||
|
|
||||||
private var stoppedAtEnd = false
|
private var stoppedAtEnd = false
|
||||||
|
|
||||||
init(queue: Queue, audioSessionManager: ManagedAudioSession, playerStatus: Promise<MediaPlayerStatus>, postbox: Postbox, resourceReference: MediaResourceReference, tempFilePath: String?, streamable: MediaPlayerStreaming, video: Bool, preferSoftwareDecoding: Bool, playAutomatically: Bool, enableSound: Bool, baseRate: Double, fetchAutomatically: Bool, playAndRecord: Bool, keepAudioSessionWhilePaused: Bool, continuePlayingWithoutSoundOnLostAudioSession: Bool) {
|
init(queue: Queue, audioSessionManager: ManagedAudioSession, playerStatus: Promise<MediaPlayerStatus>, audioLevelPipe: ValuePipe<Float>, postbox: Postbox, resourceReference: MediaResourceReference, tempFilePath: String?, streamable: MediaPlayerStreaming, video: Bool, preferSoftwareDecoding: Bool, playAutomatically: Bool, enableSound: Bool, baseRate: Double, fetchAutomatically: Bool, playAndRecord: Bool, keepAudioSessionWhilePaused: Bool, continuePlayingWithoutSoundOnLostAudioSession: Bool) {
|
||||||
assert(queue.isCurrent())
|
assert(queue.isCurrent())
|
||||||
|
|
||||||
self.queue = queue
|
self.queue = queue
|
||||||
self.audioSessionManager = audioSessionManager
|
self.audioSessionManager = audioSessionManager
|
||||||
self.playerStatus = playerStatus
|
self.playerStatus = playerStatus
|
||||||
|
self.audioLevelPipe = audioLevelPipe
|
||||||
self.postbox = postbox
|
self.postbox = postbox
|
||||||
self.resourceReference = resourceReference
|
self.resourceReference = resourceReference
|
||||||
self.tempFilePath = tempFilePath
|
self.tempFilePath = tempFilePath
|
||||||
@@ -366,7 +368,7 @@ private final class MediaPlayerContext {
|
|||||||
self.audioRenderer = nil
|
self.audioRenderer = nil
|
||||||
|
|
||||||
let queue = self.queue
|
let queue = self.queue
|
||||||
renderer = MediaPlayerAudioRenderer(audioSession: .manager(self.audioSessionManager), playAndRecord: self.playAndRecord, forceAudioToSpeaker: self.forceAudioToSpeaker, baseRate: self.baseRate, updatedRate: { [weak self] in
|
renderer = MediaPlayerAudioRenderer(audioSession: .manager(self.audioSessionManager), playAndRecord: self.playAndRecord, forceAudioToSpeaker: self.forceAudioToSpeaker, baseRate: self.baseRate, audioLevelPipe: self.audioLevelPipe, updatedRate: { [weak self] in
|
||||||
queue.async {
|
queue.async {
|
||||||
if let strongSelf = self {
|
if let strongSelf = self {
|
||||||
strongSelf.tick()
|
strongSelf.tick()
|
||||||
@@ -444,7 +446,7 @@ private final class MediaPlayerContext {
|
|||||||
self.lastStatusUpdateTimestamp = nil
|
self.lastStatusUpdateTimestamp = nil
|
||||||
if self.enableSound {
|
if self.enableSound {
|
||||||
let queue = self.queue
|
let queue = self.queue
|
||||||
let renderer = MediaPlayerAudioRenderer(audioSession: .manager(self.audioSessionManager), playAndRecord: self.playAndRecord, forceAudioToSpeaker: self.forceAudioToSpeaker, baseRate: self.baseRate, updatedRate: { [weak self] in
|
let renderer = MediaPlayerAudioRenderer(audioSession: .manager(self.audioSessionManager), playAndRecord: self.playAndRecord, forceAudioToSpeaker: self.forceAudioToSpeaker, baseRate: self.baseRate, audioLevelPipe: self.audioLevelPipe, updatedRate: { [weak self] in
|
||||||
queue.async {
|
queue.async {
|
||||||
if let strongSelf = self {
|
if let strongSelf = self {
|
||||||
strongSelf.tick()
|
strongSelf.tick()
|
||||||
@@ -966,6 +968,11 @@ public final class MediaPlayer {
|
|||||||
return self.statusValue.get()
|
return self.statusValue.get()
|
||||||
}
|
}
|
||||||
|
|
||||||
|
private let audioLevelPipe = ValuePipe<Float>()
|
||||||
|
public var audioLevelStream: Signal<Float, NoError> {
|
||||||
|
return self.audioLevelPipe.signal()
|
||||||
|
}
|
||||||
|
|
||||||
public var actionAtEnd: MediaPlayerActionAtEnd = .stop {
|
public var actionAtEnd: MediaPlayerActionAtEnd = .stop {
|
||||||
didSet {
|
didSet {
|
||||||
let value = self.actionAtEnd
|
let value = self.actionAtEnd
|
||||||
@@ -978,8 +985,9 @@ public final class MediaPlayer {
|
|||||||
}
|
}
|
||||||
|
|
||||||
public init(audioSessionManager: ManagedAudioSession, postbox: Postbox, resourceReference: MediaResourceReference, tempFilePath: String? = nil, streamable: MediaPlayerStreaming, video: Bool, preferSoftwareDecoding: Bool, playAutomatically: Bool = false, enableSound: Bool, baseRate: Double = 1.0, fetchAutomatically: Bool, playAndRecord: Bool = false, keepAudioSessionWhilePaused: Bool = false, continuePlayingWithoutSoundOnLostAudioSession: Bool = false) {
|
public init(audioSessionManager: ManagedAudioSession, postbox: Postbox, resourceReference: MediaResourceReference, tempFilePath: String? = nil, streamable: MediaPlayerStreaming, video: Bool, preferSoftwareDecoding: Bool, playAutomatically: Bool = false, enableSound: Bool, baseRate: Double = 1.0, fetchAutomatically: Bool, playAndRecord: Bool = false, keepAudioSessionWhilePaused: Bool = false, continuePlayingWithoutSoundOnLostAudioSession: Bool = false) {
|
||||||
|
let audioLevelPipe = self.audioLevelPipe
|
||||||
self.queue.async {
|
self.queue.async {
|
||||||
let context = MediaPlayerContext(queue: self.queue, audioSessionManager: audioSessionManager, playerStatus: self.statusValue, postbox: postbox, resourceReference: resourceReference, tempFilePath: tempFilePath, streamable: streamable, video: video, preferSoftwareDecoding: preferSoftwareDecoding, playAutomatically: playAutomatically, enableSound: enableSound, baseRate: baseRate, fetchAutomatically: fetchAutomatically, playAndRecord: playAndRecord, keepAudioSessionWhilePaused: keepAudioSessionWhilePaused, continuePlayingWithoutSoundOnLostAudioSession: continuePlayingWithoutSoundOnLostAudioSession)
|
let context = MediaPlayerContext(queue: self.queue, audioSessionManager: audioSessionManager, playerStatus: self.statusValue, audioLevelPipe: audioLevelPipe, postbox: postbox, resourceReference: resourceReference, tempFilePath: tempFilePath, streamable: streamable, video: video, preferSoftwareDecoding: preferSoftwareDecoding, playAutomatically: playAutomatically, enableSound: enableSound, baseRate: baseRate, fetchAutomatically: fetchAutomatically, playAndRecord: playAndRecord, keepAudioSessionWhilePaused: keepAudioSessionWhilePaused, continuePlayingWithoutSoundOnLostAudioSession: continuePlayingWithoutSoundOnLostAudioSession)
|
||||||
self.contextRef = Unmanaged.passRetained(context)
|
self.contextRef = Unmanaged.passRetained(context)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -19,17 +19,19 @@ private final class AudioPlayerRendererBufferContext {
|
|||||||
var lowWaterSize: Int
|
var lowWaterSize: Int
|
||||||
var notifyLowWater: () -> Void
|
var notifyLowWater: () -> Void
|
||||||
var updatedRate: () -> Void
|
var updatedRate: () -> Void
|
||||||
|
var updatedLevel: (Float) -> Void
|
||||||
var notifiedLowWater = false
|
var notifiedLowWater = false
|
||||||
var overflowData = Data()
|
var overflowData = Data()
|
||||||
var overflowDataMaxChannelSampleIndex: Int64 = 0
|
var overflowDataMaxChannelSampleIndex: Int64 = 0
|
||||||
var renderTimestampTick: Int64 = 0
|
var renderTimestampTick: Int64 = 0
|
||||||
|
|
||||||
init(timebase: CMTimebase, buffer: RingByteBuffer, lowWaterSize: Int, notifyLowWater: @escaping () -> Void, updatedRate: @escaping () -> Void) {
|
init(timebase: CMTimebase, buffer: RingByteBuffer, lowWaterSize: Int, notifyLowWater: @escaping () -> Void, updatedRate: @escaping () -> Void, updatedLevel: @escaping (Float) -> Void) {
|
||||||
self.timebase = timebase
|
self.timebase = timebase
|
||||||
self.buffer = buffer
|
self.buffer = buffer
|
||||||
self.lowWaterSize = lowWaterSize
|
self.lowWaterSize = lowWaterSize
|
||||||
self.notifyLowWater = notifyLowWater
|
self.notifyLowWater = notifyLowWater
|
||||||
self.updatedRate = updatedRate
|
self.updatedRate = updatedRate
|
||||||
|
self.updatedLevel = updatedLevel
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -111,6 +113,7 @@ private func rendererInputProc(refCon: UnsafeMutableRawPointer, ioActionFlags: U
|
|||||||
}
|
}
|
||||||
|
|
||||||
let rendererBuffer = context.buffer
|
let rendererBuffer = context.buffer
|
||||||
|
var updatedLevel = false
|
||||||
|
|
||||||
while rendererFillOffset.0 < bufferList.count {
|
while rendererFillOffset.0 < bufferList.count {
|
||||||
if let bufferData = bufferList[rendererFillOffset.0].mData {
|
if let bufferData = bufferList[rendererFillOffset.0].mData {
|
||||||
@@ -125,6 +128,11 @@ private func rendererInputProc(refCon: UnsafeMutableRawPointer, ioActionFlags: U
|
|||||||
let consumeCount = bufferDataSize - dataOffset
|
let consumeCount = bufferDataSize - dataOffset
|
||||||
|
|
||||||
let actualConsumedCount = rendererBuffer.dequeue(bufferData.advanced(by: dataOffset), count: consumeCount)
|
let actualConsumedCount = rendererBuffer.dequeue(bufferData.advanced(by: dataOffset), count: consumeCount)
|
||||||
|
if !updatedLevel && actualConsumedCount > 0 {
|
||||||
|
updatedLevel = true
|
||||||
|
let value = bufferData.advanced(by: dataOffset).assumingMemoryBound(to: UInt16.self).pointee
|
||||||
|
context.updatedLevel(Float(value) / Float(UInt16.max))
|
||||||
|
}
|
||||||
rendererFillOffset.1 += actualConsumedCount
|
rendererFillOffset.1 += actualConsumedCount
|
||||||
|
|
||||||
if actualConsumedCount == 0 {
|
if actualConsumedCount == 0 {
|
||||||
@@ -188,6 +196,8 @@ private final class AudioPlayerRendererContext {
|
|||||||
var paused = true
|
var paused = true
|
||||||
var baseRate: Double
|
var baseRate: Double
|
||||||
|
|
||||||
|
let audioLevelPipe: ValuePipe<Float>
|
||||||
|
|
||||||
var audioGraph: AUGraph?
|
var audioGraph: AUGraph?
|
||||||
var timePitchAudioUnit: AudioComponentInstance?
|
var timePitchAudioUnit: AudioComponentInstance?
|
||||||
var outputAudioUnit: AudioComponentInstance?
|
var outputAudioUnit: AudioComponentInstance?
|
||||||
@@ -210,12 +220,13 @@ private final class AudioPlayerRendererContext {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
init(controlTimebase: CMTimebase, audioSession: MediaPlayerAudioSessionControl, playAndRecord: Bool, forceAudioToSpeaker: Bool, baseRate: Double, updatedRate: @escaping () -> Void, audioPaused: @escaping () -> Void) {
|
init(controlTimebase: CMTimebase, audioSession: MediaPlayerAudioSessionControl, playAndRecord: Bool, forceAudioToSpeaker: Bool, baseRate: Double, audioLevelPipe: ValuePipe<Float>, updatedRate: @escaping () -> Void, audioPaused: @escaping () -> Void) {
|
||||||
assert(audioPlayerRendererQueue.isCurrent())
|
assert(audioPlayerRendererQueue.isCurrent())
|
||||||
|
|
||||||
self.audioSession = audioSession
|
self.audioSession = audioSession
|
||||||
self.forceAudioToSpeaker = forceAudioToSpeaker
|
self.forceAudioToSpeaker = forceAudioToSpeaker
|
||||||
self.baseRate = baseRate
|
self.baseRate = baseRate
|
||||||
|
self.audioLevelPipe = audioLevelPipe
|
||||||
|
|
||||||
self.controlTimebase = controlTimebase
|
self.controlTimebase = controlTimebase
|
||||||
self.updatedRate = updatedRate
|
self.updatedRate = updatedRate
|
||||||
@@ -234,6 +245,8 @@ private final class AudioPlayerRendererContext {
|
|||||||
notifyLowWater()
|
notifyLowWater()
|
||||||
}, updatedRate: {
|
}, updatedRate: {
|
||||||
updatedRate()
|
updatedRate()
|
||||||
|
}, updatedLevel: { level in
|
||||||
|
audioLevelPipe.putNext(level)
|
||||||
}))
|
}))
|
||||||
self.bufferContextId = registerPlayerRendererBufferContext(self.bufferContext)
|
self.bufferContextId = registerPlayerRendererBufferContext(self.bufferContext)
|
||||||
|
|
||||||
@@ -709,7 +722,7 @@ public final class MediaPlayerAudioRenderer {
|
|||||||
private let audioClock: CMClock
|
private let audioClock: CMClock
|
||||||
public let audioTimebase: CMTimebase
|
public let audioTimebase: CMTimebase
|
||||||
|
|
||||||
public init(audioSession: MediaPlayerAudioSessionControl, playAndRecord: Bool, forceAudioToSpeaker: Bool, baseRate: Double, updatedRate: @escaping () -> Void, audioPaused: @escaping () -> Void) {
|
public init(audioSession: MediaPlayerAudioSessionControl, playAndRecord: Bool, forceAudioToSpeaker: Bool, baseRate: Double, audioLevelPipe: ValuePipe<Float>, updatedRate: @escaping () -> Void, audioPaused: @escaping () -> Void) {
|
||||||
var audioClock: CMClock?
|
var audioClock: CMClock?
|
||||||
CMAudioClockCreate(allocator: nil, clockOut: &audioClock)
|
CMAudioClockCreate(allocator: nil, clockOut: &audioClock)
|
||||||
if audioClock == nil {
|
if audioClock == nil {
|
||||||
@@ -722,7 +735,7 @@ public final class MediaPlayerAudioRenderer {
|
|||||||
self.audioTimebase = audioTimebase!
|
self.audioTimebase = audioTimebase!
|
||||||
|
|
||||||
audioPlayerRendererQueue.async {
|
audioPlayerRendererQueue.async {
|
||||||
let context = AudioPlayerRendererContext(controlTimebase: audioTimebase!, audioSession: audioSession, playAndRecord: playAndRecord, forceAudioToSpeaker: forceAudioToSpeaker, baseRate: baseRate, updatedRate: updatedRate, audioPaused: audioPaused)
|
let context = AudioPlayerRendererContext(controlTimebase: audioTimebase!, audioSession: audioSession, playAndRecord: playAndRecord, forceAudioToSpeaker: forceAudioToSpeaker, baseRate: baseRate, audioLevelPipe: audioLevelPipe, updatedRate: updatedRate, audioPaused: audioPaused)
|
||||||
self.contextRef = Unmanaged.passRetained(context)
|
self.contextRef = Unmanaged.passRetained(context)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -22,6 +22,7 @@ private final class PresentationCallToneRenderer {
|
|||||||
private let toneRenderer: MediaPlayerAudioRenderer
|
private let toneRenderer: MediaPlayerAudioRenderer
|
||||||
private var toneRendererAudioSession: MediaPlayerAudioSessionCustomControl?
|
private var toneRendererAudioSession: MediaPlayerAudioSessionCustomControl?
|
||||||
private var toneRendererAudioSessionActivated = false
|
private var toneRendererAudioSessionActivated = false
|
||||||
|
private let audioLevelPipe = ValuePipe<Float>()
|
||||||
|
|
||||||
init(tone: PresentationCallTone) {
|
init(tone: PresentationCallTone) {
|
||||||
let queue = Queue.mainQueue()
|
let queue = Queue.mainQueue()
|
||||||
@@ -33,7 +34,7 @@ private final class PresentationCallToneRenderer {
|
|||||||
|
|
||||||
self.toneRenderer = MediaPlayerAudioRenderer(audioSession: .custom({ control in
|
self.toneRenderer = MediaPlayerAudioRenderer(audioSession: .custom({ control in
|
||||||
return controlImpl?(control) ?? EmptyDisposable
|
return controlImpl?(control) ?? EmptyDisposable
|
||||||
}), playAndRecord: false, forceAudioToSpeaker: false, baseRate: 1.0, updatedRate: {}, audioPaused: {})
|
}), playAndRecord: false, forceAudioToSpeaker: false, baseRate: 1.0, audioLevelPipe: self.audioLevelPipe, updatedRate: {}, audioPaused: {})
|
||||||
|
|
||||||
controlImpl = { [weak self] control in
|
controlImpl = { [weak self] control in
|
||||||
queue.async {
|
queue.async {
|
||||||
|
|||||||
@@ -213,7 +213,7 @@ final class ManagedAudioRecorderContext {
|
|||||||
}
|
}
|
||||||
return ActionDisposable {
|
return ActionDisposable {
|
||||||
}
|
}
|
||||||
}), playAndRecord: true, forceAudioToSpeaker: false, baseRate: 1.0, updatedRate: {
|
}), playAndRecord: true, forceAudioToSpeaker: false, baseRate: 1.0, audioLevelPipe: ValuePipe<Float>(), updatedRate: {
|
||||||
}, audioPaused: {})
|
}, audioPaused: {})
|
||||||
self.toneRenderer = toneRenderer
|
self.toneRenderer = toneRenderer
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user