Merge branch 'master' of gitlab.com:peter-iakovlev/telegram-ios

This commit is contained in:
Ilya Laktyushin 2020-12-16 08:59:19 +04:00
commit be4f069f1b
11 changed files with 37 additions and 13 deletions

View File

@ -1122,7 +1122,7 @@ ios_extension(
":VersionInfoPlist",
":AppNameInfoPlist",
],
minimum_os_version = "9.0",
minimum_os_version = "10.0",
provisioning_profile = "//build-input/data/provisioning-profiles:NotificationContent.mobileprovision",
deps = [":NotificationContentExtensionLib"],
frameworks = [
@ -1263,7 +1263,7 @@ ios_extension(
":VersionInfoPlist",
":AppNameInfoPlist",
],
minimum_os_version = "9.0",
minimum_os_version = "10.0",
provisioning_profile = "//build-input/data/provisioning-profiles:Intents.mobileprovision",
deps = [":IntentsExtensionLib"],
frameworks = [
@ -1519,7 +1519,6 @@ ios_application(
":MtProtoKitFramework",
":SwiftSignalKitFramework",
":PostboxFramework",
#":TelegramApiFramework",
":SyncCoreFramework",
":TelegramCoreFramework",
":AsyncDisplayKitFramework",

View File

@ -218,6 +218,7 @@ private final class AudioPlayerRendererContext {
let lowWaterSizeInSeconds: Int = 2
let audioSession: MediaPlayerAudioSessionControl
let useVoiceProcessingMode: Bool
let controlTimebase: CMTimebase
let updatedRate: () -> Void
let audioPaused: () -> Void
@ -250,7 +251,7 @@ private final class AudioPlayerRendererContext {
}
}
init(controlTimebase: CMTimebase, audioSession: MediaPlayerAudioSessionControl, playAndRecord: Bool, ambient: Bool, forceAudioToSpeaker: Bool, baseRate: Double, audioLevelPipe: ValuePipe<Float>, updatedRate: @escaping () -> Void, audioPaused: @escaping () -> Void) {
init(controlTimebase: CMTimebase, audioSession: MediaPlayerAudioSessionControl, playAndRecord: Bool, useVoiceProcessingMode: Bool, ambient: Bool, forceAudioToSpeaker: Bool, baseRate: Double, audioLevelPipe: ValuePipe<Float>, updatedRate: @escaping () -> Void, audioPaused: @escaping () -> Void) {
assert(audioPlayerRendererQueue.isCurrent())
self.audioSession = audioSession
@ -263,6 +264,7 @@ private final class AudioPlayerRendererContext {
self.audioPaused = audioPaused
self.playAndRecord = playAndRecord
self.useVoiceProcessingMode = useVoiceProcessingMode
self.ambient = ambient
self.audioStreamDescription = audioRendererNativeStreamDescription()
@ -407,7 +409,11 @@ private final class AudioPlayerRendererContext {
var outputNode: AUNode = 0
var outputDesc = AudioComponentDescription()
outputDesc.componentType = kAudioUnitType_Output
outputDesc.componentSubType = kAudioUnitSubType_RemoteIO
if self.useVoiceProcessingMode {
outputDesc.componentSubType = kAudioUnitSubType_VoiceProcessingIO
} else {
outputDesc.componentSubType = kAudioUnitSubType_RemoteIO
}
outputDesc.componentFlags = 0
outputDesc.componentFlagsMask = 0
outputDesc.componentManufacturer = kAudioUnitManufacturer_Apple
@ -753,7 +759,7 @@ public final class MediaPlayerAudioRenderer {
private let audioClock: CMClock
public let audioTimebase: CMTimebase
public init(audioSession: MediaPlayerAudioSessionControl, playAndRecord: Bool, ambient: Bool, forceAudioToSpeaker: Bool, baseRate: Double, audioLevelPipe: ValuePipe<Float>, updatedRate: @escaping () -> Void, audioPaused: @escaping () -> Void) {
public init(audioSession: MediaPlayerAudioSessionControl, playAndRecord: Bool, useVoiceProcessingMode: Bool = false, ambient: Bool, forceAudioToSpeaker: Bool, baseRate: Double, audioLevelPipe: ValuePipe<Float>, updatedRate: @escaping () -> Void, audioPaused: @escaping () -> Void) {
var audioClock: CMClock?
CMAudioClockCreate(allocator: nil, clockOut: &audioClock)
if audioClock == nil {
@ -766,7 +772,7 @@ public final class MediaPlayerAudioRenderer {
self.audioTimebase = audioTimebase!
audioPlayerRendererQueue.async {
let context = AudioPlayerRendererContext(controlTimebase: audioTimebase!, audioSession: audioSession, playAndRecord: playAndRecord, ambient: ambient, forceAudioToSpeaker: forceAudioToSpeaker, baseRate: baseRate, audioLevelPipe: audioLevelPipe, updatedRate: updatedRate, audioPaused: audioPaused)
let context = AudioPlayerRendererContext(controlTimebase: audioTimebase!, audioSession: audioSession, playAndRecord: playAndRecord, useVoiceProcessingMode: useVoiceProcessingMode, ambient: ambient, forceAudioToSpeaker: forceAudioToSpeaker, baseRate: baseRate, audioLevelPipe: audioLevelPipe, updatedRate: updatedRate, audioPaused: audioPaused)
self.contextRef = Unmanaged.passRetained(context)
}
}

View File

@ -505,9 +505,9 @@ private func dataAndStorageControllerEntries(state: DataAndStorageControllerStat
if #available(iOSApplicationExtension 13.2, iOS 13.2, *) {
entries.append(.shareSheet(presentationData.theme, presentationData.strings.ChatSettings_IntentsSettings))
}
if #available(iOSApplicationExtension 14.0, iOS 14.0, *) {
/*if #available(iOSApplicationExtension 14.0, iOS 14.0, *) {
entries.append(.widgetSettings(presentationData.strings.ChatSettings_WidgetSettings))
}
}*/
entries.append(.saveIncomingPhotos(presentationData.theme, presentationData.strings.Settings_SaveIncomingPhotos))
entries.append(.saveEditedPhotos(presentationData.theme, presentationData.strings.Settings_SaveEditedPhotos, data.generatedMediaStoreSettings.storeEditedPhotos))
entries.append(.openLinksIn(presentationData.theme, presentationData.strings.ChatSettings_OpenLinksIn, defaultWebBrowser))

View File

@ -24,6 +24,8 @@ objc_library(
"MobileCoreServices",
"AddressBook",
"AVFoundation",
],
weak_sdk_frameworks = [
"PassKit",
],
visibility = [

View File

@ -19,6 +19,9 @@ objc_library(
sdk_frameworks = [
"Foundation",
"UIKit",
"AddressBook",
],
weak_sdk_frameworks = [
"PassKit",
],
visibility = [

View File

@ -34,7 +34,7 @@ final class PresentationCallToneRenderer {
self.toneRenderer = MediaPlayerAudioRenderer(audioSession: .custom({ control in
return controlImpl?(control) ?? EmptyDisposable
}), playAndRecord: false, ambient: false, forceAudioToSpeaker: false, baseRate: 1.0, audioLevelPipe: self.audioLevelPipe, updatedRate: {}, audioPaused: {})
}), playAndRecord: false, useVoiceProcessingMode: true, ambient: false, forceAudioToSpeaker: false, baseRate: 1.0, audioLevelPipe: self.audioLevelPipe, updatedRate: {}, audioPaused: {})
controlImpl = { [weak self] control in
queue.async {

View File

@ -1,7 +1,7 @@
import Foundation
import AVFoundation
private func loadToneData(name: String) -> Data? {
private func loadToneData(name: String, addSilenceDuration: Double = 0.0) -> Data? {
let outputSettings: [String: Any] = [
AVFormatIDKey: kAudioFormatLinearPCM as NSNumber,
AVSampleRateKey: 44100.0 as NSNumber,
@ -62,6 +62,15 @@ private func loadToneData(name: String) -> Data? {
}
}
if !addSilenceDuration.isZero {
let sampleRate = 44100
let numberOfSamples = Int(Double(sampleRate) * addSilenceDuration)
let numberOfChannels = 2
let numberOfBytes = numberOfSamples * 2 * numberOfChannels
data.append(Data(count: numberOfBytes))
}
return data
}
@ -110,6 +119,6 @@ func presentationCallToneData(_ tone: PresentationCallTone) -> Data? {
case .groupLeft:
return loadToneData(name: "voip_group_left.mp3")
case .groupConnecting:
return loadToneData(name: "voip_group_connecting.mp3")
return loadToneData(name: "voip_group_connecting.mp3", addSilenceDuration: 2.0)
}
}

View File

@ -444,6 +444,7 @@ public final class PresentationGroupCallImpl: PresentationGroupCall {
private var removedChannelMembersDisposable: Disposable?
private var didStartConnectingOnce: Bool = false
private var didConnectOnce: Bool = false
private var toneRenderer: PresentationCallToneRenderer?
@ -847,7 +848,7 @@ public final class PresentationGroupCallImpl: PresentationGroupCall {
}
}
if wasConnecting != isConnecting && strongSelf.didConnectOnce {
if (wasConnecting != isConnecting && strongSelf.didConnectOnce) { //|| !strongSelf.didStartConnectingOnce {
if isConnecting {
let toneRenderer = PresentationCallToneRenderer(tone: .groupConnecting)
strongSelf.toneRenderer = toneRenderer
@ -857,6 +858,10 @@ public final class PresentationGroupCallImpl: PresentationGroupCall {
}
}
if isConnecting {
strongSelf.didStartConnectingOnce = true
}
if case .connected = state {
if !strongSelf.didConnectOnce {
strongSelf.didConnectOnce = true