mirror of
https://github.com/Swiftgram/Telegram-iOS.git
synced 2025-06-16 05:55:20 +00:00
[WIP] Call tones
This commit is contained in:
parent
2345a1b8d2
commit
430dd4defd
@ -110,6 +110,9 @@ public final class GroupCallNavigationAccessoryPanel: ASDisplayNode {
|
||||
private let joinButtonTitleNode: ImmediateTextNode
|
||||
private let joinButtonBackgroundNode: ASImageNode
|
||||
|
||||
private var previewImageNode: ASImageNode?
|
||||
private var previewImage: UIImage?
|
||||
|
||||
private var audioLevelView: VoiceBlobView?
|
||||
|
||||
private let micButton: HighlightTrackingButtonNode
|
||||
@ -536,8 +539,18 @@ public final class GroupCallNavigationAccessoryPanel: ASDisplayNode {
|
||||
guard let self, let data else {
|
||||
return
|
||||
}
|
||||
let _ = self
|
||||
let _ = data
|
||||
|
||||
var image: UIImage?
|
||||
for i in 0 ..< 100 {
|
||||
image = UIImage(data: data.subdata(in: i ..< data.count))
|
||||
if image != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
self.previewImage = image
|
||||
if let (size, leftInset, rightInset) = self.validLayout {
|
||||
self.updateLayout(size: size, leftInset: leftInset, rightInset: rightInset, transition: .animated(duration: 0.2, curve: .easeInOut))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -668,6 +681,26 @@ public final class GroupCallNavigationAccessoryPanel: ASDisplayNode {
|
||||
staticTransition.updateFrame(node: self.joinButtonBackgroundNode, frame: CGRect(origin: CGPoint(), size: joinButtonFrame.size))
|
||||
staticTransition.updateFrame(node: self.joinButtonTitleNode, frame: CGRect(origin: CGPoint(x: floorToScreenPixels((joinButtonFrame.width - joinButtonTitleSize.width) / 2.0), y: floorToScreenPixels((joinButtonFrame.height - joinButtonTitleSize.height) / 2.0)), size: joinButtonTitleSize))
|
||||
|
||||
if let previewImage = self.previewImage {
|
||||
let previewImageNode: ASImageNode
|
||||
if let current = self.previewImageNode {
|
||||
previewImageNode = current
|
||||
} else {
|
||||
previewImageNode = ASImageNode()
|
||||
previewImageNode.clipsToBounds = true
|
||||
previewImageNode.cornerRadius = 8.0
|
||||
previewImageNode.contentMode = .scaleAspectFill
|
||||
self.previewImageNode = previewImageNode
|
||||
self.addSubnode(previewImageNode)
|
||||
}
|
||||
previewImageNode.image = previewImage
|
||||
let previewSize = CGSize(width: 40.0, height: 40.0)
|
||||
previewImageNode.frame = CGRect(origin: CGPoint(x: joinButtonFrame.minX - previewSize.width - 8.0, y: joinButtonFrame.minY + floor((joinButtonFrame.height - previewSize.height) / 2.0)), size: previewSize)
|
||||
} else if let previewImageNode = self.previewImageNode {
|
||||
self.previewImageNode = nil
|
||||
previewImageNode.removeFromSupernode()
|
||||
}
|
||||
|
||||
let micButtonSize = CGSize(width: 36.0, height: 36.0)
|
||||
let micButtonFrame = CGRect(origin: CGPoint(x: size.width - rightInset - 7.0 - micButtonSize.width, y: floor((panelHeight - micButtonSize.height) / 2.0)), size: micButtonSize)
|
||||
staticTransition.updateFrame(node: self.micButton, frame: micButtonFrame)
|
||||
|
@ -4,12 +4,12 @@ import AVFoundation
|
||||
private func loadToneData(name: String, addSilenceDuration: Double = 0.0) -> Data? {
|
||||
let outputSettings: [String: Any] = [
|
||||
AVFormatIDKey: kAudioFormatLinearPCM as NSNumber,
|
||||
AVSampleRateKey: 44100.0 as NSNumber,
|
||||
AVSampleRateKey: 48000.0 as NSNumber,
|
||||
AVLinearPCMBitDepthKey: 16 as NSNumber,
|
||||
AVLinearPCMIsNonInterleaved: false as NSNumber,
|
||||
AVLinearPCMIsFloatKey: false as NSNumber,
|
||||
AVLinearPCMIsBigEndianKey: false as NSNumber,
|
||||
AVNumberOfChannelsKey: 2 as NSNumber
|
||||
AVNumberOfChannelsKey: 1 as NSNumber
|
||||
]
|
||||
|
||||
let nsName: NSString = name as NSString
|
||||
@ -63,9 +63,9 @@ private func loadToneData(name: String, addSilenceDuration: Double = 0.0) -> Dat
|
||||
}
|
||||
|
||||
if !addSilenceDuration.isZero {
|
||||
let sampleRate = 44100
|
||||
let sampleRate = 48000
|
||||
let numberOfSamples = Int(Double(sampleRate) * addSilenceDuration)
|
||||
let numberOfChannels = 2
|
||||
let numberOfChannels = 1
|
||||
let numberOfBytes = numberOfSamples * 2 * numberOfChannels
|
||||
|
||||
data.append(Data(count: numberOfBytes))
|
||||
|
@ -2484,11 +2484,19 @@ public final class PresentationGroupCallImpl: PresentationGroupCall {
|
||||
}
|
||||
|
||||
private func beginTone(tone: PresentationCallTone) {
|
||||
if self.isStream {
|
||||
switch tone {
|
||||
case .groupJoined, .groupLeft:
|
||||
return
|
||||
default:
|
||||
break
|
||||
}
|
||||
}
|
||||
if let toneData = presentationCallToneData(tone) {
|
||||
self.genericCallContext?.setTone(tone: OngoingGroupCallContext.Tone(
|
||||
samples: toneData,
|
||||
sampleRate: 44100,
|
||||
loopCount: 1000
|
||||
loopCount: tone.loopCount ?? 100000
|
||||
))
|
||||
}
|
||||
|
||||
|
@ -415,6 +415,7 @@ public final class OngoingGroupCallContext {
|
||||
private final class Impl {
|
||||
let queue: Queue
|
||||
let context: GroupCallThreadLocalContext
|
||||
let audioDevice: SharedCallAudioDevice?
|
||||
|
||||
let sessionId = UInt32.random(in: 0 ..< UInt32(Int32.max))
|
||||
|
||||
@ -433,6 +434,12 @@ public final class OngoingGroupCallContext {
|
||||
init(queue: Queue, inputDeviceId: String, outputDeviceId: String, audioSessionActive: Signal<Bool, NoError>, video: OngoingCallVideoCapturer?, requestMediaChannelDescriptions: @escaping (Set<UInt32>, @escaping ([MediaChannelDescription]) -> Void) -> Disposable, rejoinNeeded: @escaping () -> Void, outgoingAudioBitrateKbit: Int32?, videoContentType: VideoContentType, enableNoiseSuppression: Bool, disableAudioInput: Bool, preferX264: Bool, logPath: String) {
|
||||
self.queue = queue
|
||||
|
||||
#if DEBUG
|
||||
self.audioDevice = SharedCallAudioDevice(disableRecording: disableAudioInput)
|
||||
#else
|
||||
self.audioDevice = nil
|
||||
#endif
|
||||
|
||||
var networkStateUpdatedImpl: ((GroupCallNetworkState) -> Void)?
|
||||
var audioLevelsUpdatedImpl: (([NSNumber]) -> Void)?
|
||||
|
||||
@ -538,7 +545,8 @@ public final class OngoingGroupCallContext {
|
||||
enableNoiseSuppression: enableNoiseSuppression,
|
||||
disableAudioInput: disableAudioInput,
|
||||
preferX264: preferX264,
|
||||
logPath: logPath
|
||||
logPath: logPath,
|
||||
audioDevice: self.audioDevice
|
||||
)
|
||||
|
||||
let queue = self.queue
|
||||
@ -592,6 +600,7 @@ public final class OngoingGroupCallContext {
|
||||
return
|
||||
}
|
||||
#if os(iOS)
|
||||
self.audioDevice?.setManualAudioSessionIsActive(isActive)
|
||||
self.context.setManualAudioSessionIsActive(isActive)
|
||||
#endif
|
||||
}))
|
||||
@ -898,7 +907,7 @@ public final class OngoingGroupCallContext {
|
||||
}
|
||||
|
||||
func setTone(tone: Tone?) {
|
||||
self.context.setTone(tone.flatMap { tone in
|
||||
self.audioDevice?.setTone(tone.flatMap { tone in
|
||||
CallAudioTone(samples: tone.samples, sampleRate: tone.sampleRate, loopCount: tone.loopCount)
|
||||
})
|
||||
}
|
||||
|
@ -722,7 +722,7 @@ public final class OngoingCallContext {
|
||||
let impl: SharedCallAudioDevice
|
||||
|
||||
public static func create() -> AudioDevice? {
|
||||
return AudioDevice(impl: SharedCallAudioDevice())
|
||||
return AudioDevice(impl: SharedCallAudioDevice(disableRecording: false))
|
||||
}
|
||||
|
||||
private init(impl: SharedCallAudioDevice) {
|
||||
|
@ -23,7 +23,7 @@
|
||||
|
||||
@interface SharedCallAudioDevice : NSObject
|
||||
|
||||
- (instancetype _Nonnull)init;
|
||||
- (instancetype _Nonnull)initWithDisableRecording:(bool)disableRecording;
|
||||
|
||||
+ (void)setupAudioSession;
|
||||
|
||||
@ -397,7 +397,8 @@ typedef NS_ENUM(int32_t, OngoingGroupCallRequestedVideoQuality) {
|
||||
enableNoiseSuppression:(bool)enableNoiseSuppression
|
||||
disableAudioInput:(bool)disableAudioInput
|
||||
preferX264:(bool)preferX264
|
||||
logPath:(NSString * _Nonnull)logPath;
|
||||
logPath:(NSString * _Nonnull)logPath
|
||||
audioDevice:(SharedCallAudioDevice * _Nullable)audioDevice;
|
||||
|
||||
- (void)stop;
|
||||
|
||||
|
@ -73,35 +73,31 @@ public:
|
||||
|
||||
public:
|
||||
virtual rtc::scoped_refptr<webrtc::tgcalls_ios_adm::AudioDeviceModuleIOS> audioDeviceModule() = 0;
|
||||
virtual void start() = 0;
|
||||
};
|
||||
|
||||
}
|
||||
|
||||
class SharedAudioDeviceModuleImpl: public tgcalls::SharedAudioDeviceModule {
|
||||
public:
|
||||
SharedAudioDeviceModuleImpl() {
|
||||
if (tgcalls::StaticThreads::getThreads()->getWorkerThread()->IsCurrent()) {
|
||||
_audioDeviceModule = rtc::make_ref_counted<webrtc::tgcalls_ios_adm::AudioDeviceModuleIOS>(false, false, 1);
|
||||
_audioDeviceModule->Init();
|
||||
if (!_audioDeviceModule->Playing()) {
|
||||
_audioDeviceModule->InitPlayout();
|
||||
}
|
||||
} else {
|
||||
tgcalls::StaticThreads::getThreads()->getWorkerThread()->BlockingCall([&]() {
|
||||
_audioDeviceModule = rtc::make_ref_counted<webrtc::tgcalls_ios_adm::AudioDeviceModuleIOS>(false, false, 1);
|
||||
_audioDeviceModule->Init();
|
||||
if (!_audioDeviceModule->Playing()) {
|
||||
_audioDeviceModule->InitPlayout();
|
||||
}
|
||||
});
|
||||
}
|
||||
SharedAudioDeviceModuleImpl(bool disableAudioInput) {
|
||||
RTC_DCHECK(tgcalls::StaticThreads::getThreads()->getWorkerThread()->IsCurrent());
|
||||
_audioDeviceModule = rtc::make_ref_counted<webrtc::tgcalls_ios_adm::AudioDeviceModuleIOS>(false, disableAudioInput, disableAudioInput ? 2 : 1);
|
||||
}
|
||||
|
||||
virtual ~SharedAudioDeviceModuleImpl() override {
|
||||
if (tgcalls::StaticThreads::getThreads()->getWorkerThread()->IsCurrent()) {
|
||||
if (_audioDeviceModule->Playing()) {
|
||||
_audioDeviceModule->StopPlayout();
|
||||
_audioDeviceModule->StopRecording();
|
||||
}
|
||||
_audioDeviceModule = nullptr;
|
||||
} else {
|
||||
tgcalls::StaticThreads::getThreads()->getWorkerThread()->BlockingCall([&]() {
|
||||
if (_audioDeviceModule->Playing()) {
|
||||
_audioDeviceModule->StopPlayout();
|
||||
_audioDeviceModule->StopRecording();
|
||||
}
|
||||
_audioDeviceModule = nullptr;
|
||||
});
|
||||
}
|
||||
@ -112,6 +108,18 @@ public:
|
||||
return _audioDeviceModule;
|
||||
}
|
||||
|
||||
virtual void start() override {
|
||||
RTC_DCHECK(tgcalls::StaticThreads::getThreads()->getWorkerThread()->IsCurrent());
|
||||
|
||||
_audioDeviceModule->Init();
|
||||
if (!_audioDeviceModule->Playing()) {
|
||||
_audioDeviceModule->InitPlayout();
|
||||
//_audioDeviceModule->InitRecording();
|
||||
_audioDeviceModule->InternalStartPlayout();
|
||||
//_audioDeviceModule->InternalStartRecording();
|
||||
}
|
||||
}
|
||||
|
||||
private:
|
||||
rtc::scoped_refptr<webrtc::tgcalls_ios_adm::AudioDeviceModuleIOS> _audioDeviceModule;
|
||||
};
|
||||
@ -120,11 +128,11 @@ private:
|
||||
std::shared_ptr<tgcalls::ThreadLocalObject<tgcalls::SharedAudioDeviceModule>> _audioDeviceModule;
|
||||
}
|
||||
|
||||
- (instancetype _Nonnull)init {
|
||||
- (instancetype _Nonnull)initWithDisableRecording:(bool)disableRecording {
|
||||
self = [super init];
|
||||
if (self != nil) {
|
||||
_audioDeviceModule.reset(new tgcalls::ThreadLocalObject<tgcalls::SharedAudioDeviceModule>(tgcalls::StaticThreads::getThreads()->getWorkerThread(), []() mutable {
|
||||
return (tgcalls::SharedAudioDeviceModule *)(new SharedAudioDeviceModuleImpl());
|
||||
_audioDeviceModule.reset(new tgcalls::ThreadLocalObject<tgcalls::SharedAudioDeviceModule>(tgcalls::StaticThreads::getThreads()->getWorkerThread(), [disableRecording]() mutable {
|
||||
return (tgcalls::SharedAudioDeviceModule *)(new SharedAudioDeviceModuleImpl(disableRecording));
|
||||
}));
|
||||
}
|
||||
return self;
|
||||
@ -164,6 +172,12 @@ private:
|
||||
[[RTCAudioSession sharedInstance] audioSessionDidDeactivate:[AVAudioSession sharedInstance]];
|
||||
}
|
||||
[RTCAudioSession sharedInstance].isAudioEnabled = isAudioSessionActive;
|
||||
|
||||
if (isAudioSessionActive) {
|
||||
_audioDeviceModule->perform([](tgcalls::SharedAudioDeviceModule *audioDeviceModule) {
|
||||
audioDeviceModule->start();
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
@end
|
||||
@ -1596,6 +1610,8 @@ private:
|
||||
|
||||
rtc::scoped_refptr<webrtc::tgcalls_ios_adm::AudioDeviceModuleIOS> _currentAudioDeviceModule;
|
||||
rtc::Thread *_currentAudioDeviceModuleThread;
|
||||
|
||||
SharedCallAudioDevice * _audioDevice;
|
||||
}
|
||||
|
||||
@end
|
||||
@ -1617,7 +1633,8 @@ private:
|
||||
enableNoiseSuppression:(bool)enableNoiseSuppression
|
||||
disableAudioInput:(bool)disableAudioInput
|
||||
preferX264:(bool)preferX264
|
||||
logPath:(NSString * _Nonnull)logPath {
|
||||
logPath:(NSString * _Nonnull)logPath
|
||||
audioDevice:(SharedCallAudioDevice * _Nullable)audioDevice {
|
||||
self = [super init];
|
||||
if (self != nil) {
|
||||
_queue = queue;
|
||||
@ -1629,6 +1646,12 @@ private:
|
||||
_networkStateUpdated = [networkStateUpdated copy];
|
||||
_videoCapturer = videoCapturer;
|
||||
|
||||
_audioDevice = audioDevice;
|
||||
std::shared_ptr<tgcalls::ThreadLocalObject<tgcalls::SharedAudioDeviceModule>> audioDeviceModule;
|
||||
if (_audioDevice) {
|
||||
audioDeviceModule = [_audioDevice getAudioDeviceModule];
|
||||
}
|
||||
|
||||
tgcalls::VideoContentType _videoContentType;
|
||||
switch (videoContentType) {
|
||||
case OngoingGroupCallVideoContentTypeGeneric: {
|
||||
@ -1837,7 +1860,10 @@ private:
|
||||
return std::make_shared<RequestMediaChannelDescriptionTaskImpl>(task);
|
||||
},
|
||||
.minOutgoingVideoBitrateKbit = minOutgoingVideoBitrateKbit,
|
||||
.createAudioDeviceModule = [weakSelf, queue, disableAudioInput](webrtc::TaskQueueFactory *taskQueueFactory) -> rtc::scoped_refptr<webrtc::AudioDeviceModule> {
|
||||
.createAudioDeviceModule = [weakSelf, queue, disableAudioInput, audioDeviceModule](webrtc::TaskQueueFactory *taskQueueFactory) -> rtc::scoped_refptr<webrtc::AudioDeviceModule> {
|
||||
if (audioDeviceModule) {
|
||||
return audioDeviceModule->getSyncAssumingSameThread()->audioDeviceModule();
|
||||
} else {
|
||||
rtc::Thread *audioDeviceModuleThread = rtc::Thread::Current();
|
||||
auto resultModule = rtc::make_ref_counted<webrtc::tgcalls_ios_adm::AudioDeviceModuleIOS>(false, disableAudioInput, disableAudioInput ? 2 : 1);
|
||||
[queue dispatch:^{
|
||||
@ -1849,6 +1875,7 @@ private:
|
||||
}];
|
||||
return resultModule;
|
||||
}
|
||||
}
|
||||
}));
|
||||
}
|
||||
return self;
|
||||
|
Loading…
x
Reference in New Issue
Block a user