mirror of
https://github.com/Swiftgram/Telegram-iOS.git
synced 2025-11-08 01:40:09 +00:00
Temporary audio session timing debugging
This commit is contained in:
parent
be87379a4e
commit
e70db49157
@ -335,6 +335,8 @@ private final class AudioPlayerRendererContext {
|
|||||||
assert(audioPlayerRendererQueue.isCurrent())
|
assert(audioPlayerRendererQueue.isCurrent())
|
||||||
|
|
||||||
if self.audioGraph == nil {
|
if self.audioGraph == nil {
|
||||||
|
let startTime = CFAbsoluteTimeGetCurrent()
|
||||||
|
|
||||||
var maybeAudioGraph: AUGraph?
|
var maybeAudioGraph: AUGraph?
|
||||||
guard NewAUGraph(&maybeAudioGraph) == noErr, let audioGraph = maybeAudioGraph else {
|
guard NewAUGraph(&maybeAudioGraph) == noErr, let audioGraph = maybeAudioGraph else {
|
||||||
return
|
return
|
||||||
@ -428,6 +430,8 @@ private final class AudioPlayerRendererContext {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
print("MediaPlayerAudioRenderer initialize audio unit: \((CFAbsoluteTimeGetCurrent() - startTime) * 1000.0) ms")
|
||||||
|
|
||||||
self.audioGraph = audioGraph
|
self.audioGraph = audioGraph
|
||||||
self.timePitchAudioUnit = timePitchAudioUnit
|
self.timePitchAudioUnit = timePitchAudioUnit
|
||||||
self.outputAudioUnit = outputAudioUnit
|
self.outputAudioUnit = outputAudioUnit
|
||||||
@ -497,10 +501,14 @@ private final class AudioPlayerRendererContext {
|
|||||||
assert(audioPlayerRendererQueue.isCurrent())
|
assert(audioPlayerRendererQueue.isCurrent())
|
||||||
|
|
||||||
if let audioGraph = self.audioGraph {
|
if let audioGraph = self.audioGraph {
|
||||||
|
let startTime = CFAbsoluteTimeGetCurrent()
|
||||||
|
|
||||||
guard AUGraphStart(audioGraph) == noErr else {
|
guard AUGraphStart(audioGraph) == noErr else {
|
||||||
self.closeAudioUnit()
|
self.closeAudioUnit()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
print("MediaPlayerAudioRenderer start audio unit: \((CFAbsoluteTimeGetCurrent() - startTime) * 1000.0) ms")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@ -749,12 +749,20 @@ public final class ManagedAudioSession {
|
|||||||
private func activate() {
|
private func activate() {
|
||||||
if let (type, outputMode) = self.currentTypeAndOutputMode {
|
if let (type, outputMode) = self.currentTypeAndOutputMode {
|
||||||
do {
|
do {
|
||||||
|
let startTime = CFAbsoluteTimeGetCurrent()
|
||||||
|
|
||||||
try AVAudioSession.sharedInstance().setActive(true, options: [.notifyOthersOnDeactivation])
|
try AVAudioSession.sharedInstance().setActive(true, options: [.notifyOthersOnDeactivation])
|
||||||
|
|
||||||
|
print("AudioSession activate: \((CFAbsoluteTimeGetCurrent() - startTime) * 1000.0) ms")
|
||||||
|
|
||||||
self.updateCurrentAudioRouteInfo()
|
self.updateCurrentAudioRouteInfo()
|
||||||
|
|
||||||
|
print("AudioSession updateCurrentAudioRouteInfo: \((CFAbsoluteTimeGetCurrent() - startTime) * 1000.0) ms")
|
||||||
|
|
||||||
try self.setupOutputMode(outputMode, type: type)
|
try self.setupOutputMode(outputMode, type: type)
|
||||||
|
|
||||||
|
print("AudioSession setupOutputMode: \((CFAbsoluteTimeGetCurrent() - startTime) * 1000.0) ms")
|
||||||
|
|
||||||
if case .voiceCall = type {
|
if case .voiceCall = type {
|
||||||
try AVAudioSession.sharedInstance().setPreferredIOBufferDuration(0.005)
|
try AVAudioSession.sharedInstance().setPreferredIOBufferDuration(0.005)
|
||||||
}
|
}
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user