diff --git a/submodules/MediaPlayer/Sources/FFMpegAudioFrameDecoder.swift b/submodules/MediaPlayer/Sources/FFMpegAudioFrameDecoder.swift index 8324cb4d8e..c3d0d2b570 100644 --- a/submodules/MediaPlayer/Sources/FFMpegAudioFrameDecoder.swift +++ b/submodules/MediaPlayer/Sources/FFMpegAudioFrameDecoder.swift @@ -11,11 +11,29 @@ final class FFMpegAudioFrameDecoder: MediaTrackFrameDecoder { private var delayedFrames: [MediaTrackFrame] = [] - init(codecContext: FFMpegAVCodecContext) { + init(codecContext: FFMpegAVCodecContext, sampleRate: Int = 44100, channelCount: Int = 2) { self.codecContext = codecContext self.audioFrame = FFMpegAVFrame() - self.swrContext = FFMpegSWResample(sourceChannelCount: Int(codecContext.channels()), sourceSampleRate: Int(codecContext.sampleRate()), sourceSampleFormat: codecContext.sampleFormat(), destinationChannelCount: 2, destinationSampleRate: 44100, destinationSampleFormat: FFMPEG_AV_SAMPLE_FMT_S16) + self.swrContext = FFMpegSWResample(sourceChannelCount: Int(codecContext.channels()), sourceSampleRate: Int(codecContext.sampleRate()), sourceSampleFormat: codecContext.sampleFormat(), destinationChannelCount: channelCount, destinationSampleRate: sampleRate, destinationSampleFormat: FFMPEG_AV_SAMPLE_FMT_S16) + } + + func decodeRaw(frame: MediaTrackDecodableFrame) -> Data? { + let status = frame.packet.send(toDecoder: self.codecContext) + if status == 0 { + let result = self.codecContext.receive(into: self.audioFrame) + if case .success = result { + guard let data = self.swrContext.resample(self.audioFrame) else { + return nil + } + + return data + } else { + return nil + } + } else { + return nil + } } func decode(frame: MediaTrackDecodableFrame) -> MediaTrackFrame? { diff --git a/submodules/MediaPlayer/Sources/MediaTrackDecodableFrame.swift b/submodules/MediaPlayer/Sources/MediaTrackDecodableFrame.swift index 692e8f6484..9c49f4da3b 100644 --- a/submodules/MediaPlayer/Sources/MediaTrackDecodableFrame.swift +++ b/submodules/MediaPlayer/Sources/MediaTrackDecodableFrame.swift @@ -23,4 +23,8 @@ public final class MediaTrackDecodableFrame { self.packet = packet } + + public func copyPacketData() -> Data { + return Data(bytes: self.packet.data, count: Int(self.packet.size)) + } } diff --git a/submodules/MediaPlayer/Sources/SoftwareVideoSource.swift b/submodules/MediaPlayer/Sources/SoftwareVideoSource.swift index 1e252012a7..dfb882b54b 100644 --- a/submodules/MediaPlayer/Sources/SoftwareVideoSource.swift +++ b/submodules/MediaPlayer/Sources/SoftwareVideoSource.swift @@ -179,7 +179,7 @@ public final class SoftwareVideoSource { if endOfStream { break } else { - if let avFormatContext = self.avFormatContext, let videoStream = self.videoStream { + if let _ = self.avFormatContext, let _ = self.videoStream { endOfStream = true break } else { @@ -254,3 +254,197 @@ public final class SoftwareVideoSource { } } } + +private final class SoftwareAudioStream { + let index: Int + let fps: CMTime + let timebase: CMTime + let duration: CMTime + let decoder: FFMpegAudioFrameDecoder + + init(index: Int, fps: CMTime, timebase: CMTime, duration: CMTime, decoder: FFMpegAudioFrameDecoder) { + self.index = index + self.fps = fps + self.timebase = timebase + self.duration = duration + self.decoder = decoder + } +} + +public final class SoftwareAudioSource { + private var readingError = false + private var audioStream: SoftwareAudioStream? + private var avIoContext: FFMpegAVIOContext? + private var avFormatContext: FFMpegAVFormatContext? + private let path: String + fileprivate let fd: Int32? + fileprivate let size: Int32 + + private var hasReadToEnd: Bool = false + + public init(path: String) { + let _ = FFMpegMediaFrameSourceContextHelpers.registerFFMpegGlobals + + var s = stat() + stat(path, &s) + self.size = Int32(s.st_size) + + let fd = open(path, O_RDONLY, S_IRUSR) + if fd >= 0 { + self.fd = fd + } else { + self.fd = nil + } + + self.path = path + + let avFormatContext = FFMpegAVFormatContext() + + let ioBufferSize = 64 * 1024 + + let avIoContext = FFMpegAVIOContext(bufferSize: Int32(ioBufferSize), opaqueContext: Unmanaged.passUnretained(self).toOpaque(), readPacket: readPacketCallback, writePacket: nil, seek: seekCallback) + self.avIoContext = avIoContext + + avFormatContext.setIO(self.avIoContext!) + + if !avFormatContext.openInput() { + self.readingError = true + return + } + + if !avFormatContext.findStreamInfo() { + self.readingError = true + return + } + + self.avFormatContext = avFormatContext + + var audioStream: SoftwareAudioStream? + + for streamIndexNumber in avFormatContext.streamIndices(for: FFMpegAVFormatStreamTypeAudio) { + let streamIndex = streamIndexNumber.int32Value + if avFormatContext.isAttachedPic(atStreamIndex: streamIndex) { + continue + } + + let codecId = avFormatContext.codecId(atStreamIndex: streamIndex) + + let fpsAndTimebase = avFormatContext.fpsAndTimebase(forStreamIndex: streamIndex, defaultTimeBase: CMTimeMake(value: 1, timescale: 40000)) + let (fps, timebase) = (fpsAndTimebase.fps, fpsAndTimebase.timebase) + + let duration = CMTimeMake(value: avFormatContext.duration(atStreamIndex: streamIndex), timescale: timebase.timescale) + + let codec = FFMpegAVCodec.find(forId: codecId) + + if let codec = codec { + let codecContext = FFMpegAVCodecContext(codec: codec) + if avFormatContext.codecParams(atStreamIndex: streamIndex, to: codecContext) { + if codecContext.open() { + audioStream = SoftwareAudioStream(index: Int(streamIndex), fps: fps, timebase: timebase, duration: duration, decoder: FFMpegAudioFrameDecoder(codecContext: codecContext, sampleRate: 48000, channelCount: 1)) + break + } + } + } + } + + self.audioStream = audioStream + + if let audioStream = self.audioStream { + avFormatContext.seekFrame(forStreamIndex: Int32(audioStream.index), pts: 0, positionOnKeyframe: false) + } + } + + deinit { + if let fd = self.fd { + close(fd) + } + } + + private func readPacketInternal() -> FFMpegPacket? { + guard let avFormatContext = self.avFormatContext else { + return nil + } + + let packet = FFMpegPacket() + if avFormatContext.readFrame(into: packet) { + return packet + } else { + return nil + } + } + + func readDecodableFrame() -> (MediaTrackDecodableFrame?, Bool) { + var frames: [MediaTrackDecodableFrame] = [] + var endOfStream = false + + while !self.readingError && frames.isEmpty { + if let packet = self.readPacketInternal() { + if let audioStream = audioStream, Int(packet.streamIndex) == audioStream.index { + let packetPts = packet.pts + + let pts = CMTimeMake(value: packetPts, timescale: audioStream.timebase.timescale) + let dts = CMTimeMake(value: packet.dts, timescale: audioStream.timebase.timescale) + + let duration: CMTime + + let frameDuration = packet.duration + if frameDuration != 0 { + duration = CMTimeMake(value: frameDuration * audioStream.timebase.value, timescale: audioStream.timebase.timescale) + } else { + duration = audioStream.fps + } + + let frame = MediaTrackDecodableFrame(type: .audio, packet: packet, pts: pts, dts: dts, duration: duration) + frames.append(frame) + } + } else { + if endOfStream { + break + } else { + if let _ = self.avFormatContext, let _ = self.audioStream { + endOfStream = true + break + } else { + endOfStream = true + break + } + } + } + } + + return (frames.first, endOfStream) + } + + public func readFrame() -> Data? { + guard let audioStream = self.audioStream, let _ = self.avFormatContext else { + return nil + } + + let (decodableFrame, _) = self.readDecodableFrame() + if let decodableFrame = decodableFrame { + return audioStream.decoder.decodeRaw(frame: decodableFrame) + } else { + return nil + } + } + + public func readEncodedFrame() -> (Data, Int)? { + guard let _ = self.audioStream, let _ = self.avFormatContext else { + return nil + } + + let (decodableFrame, _) = self.readDecodableFrame() + if let decodableFrame = decodableFrame { + return (decodableFrame.copyPacketData(), Int(decodableFrame.packet.duration - max(0, -decodableFrame.packet.pts))) + } else { + return nil + } + } + + public func seek(timestamp: Double) { + if let stream = self.audioStream, let avFormatContext = self.avFormatContext { + let pts = CMTimeMakeWithSeconds(timestamp, preferredTimescale: stream.timebase.timescale) + avFormatContext.seekFrame(forStreamIndex: Int32(stream.index), pts: pts.value, positionOnKeyframe: false) + } + } +} diff --git a/submodules/OpusBinding/PublicHeaders/OpusBinding/OggOpusReader.h b/submodules/OpusBinding/PublicHeaders/OpusBinding/OggOpusReader.h index 2bfe4c5438..383b0b94e1 100644 --- a/submodules/OpusBinding/PublicHeaders/OpusBinding/OggOpusReader.h +++ b/submodules/OpusBinding/PublicHeaders/OpusBinding/OggOpusReader.h @@ -2,12 +2,21 @@ NS_ASSUME_NONNULL_BEGIN +@interface OggOpusFrame : NSObject + +@property (nonatomic, readonly) int numSamples; +@property (nonatomic, strong, readonly) NSData *data; + +@end + @interface OggOpusReader : NSObject - (instancetype _Nullable)initWithPath:(NSString *)path; - (int32_t)read:(void *)pcmData bufSize:(int)bufSize; ++ (NSArray * _Nullable)extractFrames:(NSData *)data; + @end NS_ASSUME_NONNULL_END diff --git a/submodules/OpusBinding/Sources/OggOpusReader.m b/submodules/OpusBinding/Sources/OggOpusReader.m index 5e8bd5af53..3df05d1b43 100644 --- a/submodules/OpusBinding/Sources/OggOpusReader.m +++ b/submodules/OpusBinding/Sources/OggOpusReader.m @@ -2,6 +2,37 @@ #import "opusfile/opusfile.h" +static int is_opus(ogg_page *og) { + ogg_stream_state os; + ogg_packet op; + + ogg_stream_init(&os, ogg_page_serialno(og)); + ogg_stream_pagein(&os, og); + if (ogg_stream_packetout(&os, &op) == 1) + { + if (op.bytes >= 19 && !memcmp(op.packet, "OpusHead", 8)) + { + ogg_stream_clear(&os); + return 1; + } + } + ogg_stream_clear(&os); + return 0; +} + +@implementation OggOpusFrame + +- (instancetype)initWithNumSamples:(int)numSamples data:(NSData *)data { + self = [super init]; + if (self != nil) { + _numSamples = numSamples; + _data = data; + } + return self; +} + +@end + @interface OggOpusReader () { OggOpusFile *_opusFile; } @@ -32,4 +63,132 @@ return op_read(_opusFile, pcmData, bufSize, NULL); } ++ (NSArray * _Nullable)extractFrames:(NSData *)data { + NSMutableArray *result = [[NSMutableArray alloc] init]; + + ogg_page opage; + ogg_packet opacket; + ogg_sync_state ostate; + ogg_stream_state ostream; + int sampleRate = 48000; + + if (ogg_sync_init(&ostate) < 0) { + return nil; + } + + char *obuffer; + long obufferSize = (long)data.length; + + obuffer = ogg_sync_buffer(&ostate, obufferSize); + if (!obuffer) { + return nil; + } + + memcpy(obuffer, data.bytes, data.length); + // ogg_sync_wrote function is used to tell the ogg_sync_state struct how many bytes we wrote into the buffer. + if (ogg_sync_wrote(&ostate, obufferSize) < 0) { + return nil; + } + + int pages = 0; + int packetsout = 0; + int invalid = 0; + int eos = 0; + + int headers = 0; + int serialno = 0; + + /* LOOP START */ + while (ogg_sync_pageout(&ostate, &opage) == 1) { + pages++; + + if (headers == 0) { + if (is_opus(&opage)) { + /* this is the start of an Opus stream */ + serialno = ogg_page_serialno(&opage); + if (ogg_stream_init(&ostream, ogg_page_serialno(&opage)) < 0) { + return nil; + } + + headers++; + } else if (!ogg_page_bos(&opage)) { + // We're past the header and haven't found an Opus stream. + // Time to give up. + break; + } else { + /* try again */ + continue; + } + } + + eos = ogg_page_eos(&opage); + + /* submit the page for packetization */ + if (ogg_stream_pagein(&ostream, &opage) < 0) { + return nil; + } + + /* read and process available packets */ + while (ogg_stream_packetout(&ostream, &opacket) == 1) { + + packetsout++; + + int samples; + /* skip header packets */ + if (headers == 1 && opacket.bytes >= 19 && !memcmp(opacket.packet, "OpusHead", 8)) { + headers++; + continue; + } + if (headers == 2 && opacket.bytes >= 16 && !memcmp(opacket.packet, "OpusTags", 8)) { + headers++; + continue; + } + + /* get packet duration */ + samples = opus_packet_get_nb_samples(opacket.packet, opacket.bytes, sampleRate); + if (samples <= 0) { + invalid++; + continue; // skipping invalid packet + } + + [result addObject:[[OggOpusFrame alloc] initWithNumSamples:samples data:[NSData dataWithBytes:opacket.packet length:opacket.bytes]]]; + + /* update the rtp header and send */ + /*this->rtp.header_size = 12 + 4 * this->rtp.cc; + this->rtp.seq++; + this->rtp.time += samples; + this->rtp.payload_size = opacket.bytes; + + // Create RTP Packet + unsigned char *packet; + size_t packetSize = this->rtp.header_size + this->rtp.payload_size; + packet = (unsigned char *)malloc(packetSize); + if (!packet) + throw Napi::Error::New(info.Env(), "Couldn't allocate packet buffer."); + + // Serialize header and copy to packet. Then copy payload to packet. + serialize_rtp_header(packet, this->rtp.header_size, &this->rtp); + memcpy(packet + this->rtp.header_size, opacket.packet, opacket.bytes); + + Napi::Buffer output = Napi::Buffer::Copy(env, reinterpret_cast(packet), packetSize); + + push.Call(thisObj, {output});*/ + } + + if (eos > 0) { + // End of the logical bitstream, clear headers to reset. + headers = 0; + } + } + + /* CLEAN UP */ + if (eos > 0) + { + ogg_stream_clear(&ostream); + ogg_sync_clear(&ostate); + } + + return result; +} + @end diff --git a/submodules/PhotoResources/Sources/PhotoResources.swift b/submodules/PhotoResources/Sources/PhotoResources.swift index 9e195e9130..91ec1a3454 100644 --- a/submodules/PhotoResources/Sources/PhotoResources.swift +++ b/submodules/PhotoResources/Sources/PhotoResources.swift @@ -100,6 +100,10 @@ public func chatMessagePhotoDatas(postbox: Postbox, photoReference: ImageMediaRe switch results[i].0 { case .image: if let data = results[i].1, data.count != 0 { + if Int(fullRepresentationSize.width) > 100 && i <= 1 && !isLastSize { + continue + } + subscriber.putNext(Tuple4(nil, data, .full, isLastSize)) foundData = true if isLastSize { @@ -623,6 +627,11 @@ public func chatMessagePhotoInternal(photoData: Signal() + var packets: Signal<[OngoingGroupCallBroadcastPacket], NoError> { + return self.packetsPipe.signal() + } + + private var timer: SwiftSignalKit.Timer? + + private var enqueuedPackets: [OngoingGroupCallBroadcastPacket] = [] + private var delayTimer: SwiftSignalKit.Timer? + + private var nextIndex: Int = 0 + + init(queue: Queue) { + self.queue = queue + + self.emitPacketAndStartTimer() + } + + deinit { + self.timer?.invalidate() + self.delayTimer?.invalidate() + } + + private func emitPacketAndStartTimer() { + let demoPacketCount = 200 + let index = self.nextIndex % demoPacketCount + self.nextIndex += 1 + + var packets: [OngoingGroupCallBroadcastPacket] = [] + + let fileName = String(format: "%04d", index) + if let path = getAppBundle().path(forResource: fileName, ofType: "ogg") { + let source = SoftwareAudioSource(path: path) + while true { + if let frame = source.readFrame() { + packets.append(OngoingGroupCallBroadcastPacket(numSamples: Int32(frame.count / 2), data: frame)) + } else { + break + } + } + } + + if !packets.isEmpty { + self.enqueuedPackets.append(contentsOf: packets) + self.startDelayTimer() + } + + let timer = SwiftSignalKit.Timer(timeout: 1.0, repeat: false, completion: { [weak self] in + self?.emitPacketAndStartTimer() + }, queue: self.queue) + self.timer = timer + timer.start() + } + + private func startDelayTimer() { + let delayTimer = SwiftSignalKit.Timer(timeout: Double.random(in: 0.1 ... 0.3), repeat: false, completion: { [weak self] in + guard let strongSelf = self else { + return + } + let packets = strongSelf.enqueuedPackets + strongSelf.enqueuedPackets.removeAll() + if !packets.isEmpty { + strongSelf.packetsPipe.putNext(packets) + } + }, queue: self.queue) + self.delayTimer = delayTimer + delayTimer.start() + } +} + public final class OngoingGroupCallContext { public enum NetworkState { case connecting @@ -50,12 +126,14 @@ public final class OngoingGroupCallContext { let videoSources = ValuePromise>(Set(), ignoreRepeated: true) + private var broadcastPacketSource: DemoBroadcastPacketSource? + private var broadcastPacketsDisposable: Disposable? + init(queue: Queue, inputDeviceId: String, outputDeviceId: String, video: OngoingCallVideoCapturer?, participantDescriptionsRequired: @escaping (Set) -> Void) { self.queue = queue var networkStateUpdatedImpl: ((GroupCallNetworkState) -> Void)? var audioLevelsUpdatedImpl: (([NSNumber]) -> Void)? - var participantDescriptionsRequiredImpl: (([NSNumber]) -> Void)? let videoSources = self.videoSources self.context = GroupCallThreadLocalContext( @@ -125,6 +203,53 @@ public final class OngoingGroupCallContext { strongSelf.joinPayload.set(.single((payload, ssrc))) } }) + + let broadcastPacketSource = DemoBroadcastPacketSource(queue: queue) + self.broadcastPacketSource = broadcastPacketSource + self.broadcastPacketsDisposable = (broadcastPacketSource.packets + |> deliverOn(queue)).start(next: { [weak self] packets in + guard let strongSelf = self else { + return + } + strongSelf.context.add(packets) + }) + + /*var packets: [OngoingGroupCallBroadcastPacket] = [] + for i in 0 ..< 200 { + let fileName = String(format: "%04d", i) + if let path = getAppBundle().path(forResource: fileName, ofType: "ogg") { + if let data = try? Data(contentsOf: URL(fileURLWithPath: path)) { + if let frames = OggOpusReader.extractFrames(data) { + for frame in frames { + packets.append(OngoingGroupCallBroadcastPacket(numSamples: frame.numSamples, data: frame.data)) + } + } + } + continue + + let source = SoftwareAudioSource(path: path) + while true { + if let (frame, numSamples) = source.readEncodedFrame() { + if numSamples != 960 { + continue + } + packets.append(OngoingGroupCallBroadcastPacket(numSamples: Int32(numSamples), data: frame)) + } else { + break + } + /*if let frame = source.readFrame() { + packets.append(frame) + } else { + break + }*/ + } + } + } + context.add(packets);*/ + } + + deinit { + self.broadcastPacketsDisposable?.dispose() } func setJoinResponse(payload: String, participants: [(UInt32, String?)]) { diff --git a/submodules/TgVoipWebrtc/PublicHeaders/TgVoipWebrtc/OngoingCallThreadLocalContext.h b/submodules/TgVoipWebrtc/PublicHeaders/TgVoipWebrtc/OngoingCallThreadLocalContext.h index 4db0db1d26..1b9ec8dd23 100644 --- a/submodules/TgVoipWebrtc/PublicHeaders/TgVoipWebrtc/OngoingCallThreadLocalContext.h +++ b/submodules/TgVoipWebrtc/PublicHeaders/TgVoipWebrtc/OngoingCallThreadLocalContext.h @@ -165,6 +165,15 @@ typedef NS_ENUM(int32_t, GroupCallNetworkState) { @end +@interface OngoingGroupCallBroadcastPacket : NSObject + +@property (nonatomic, readonly) int numSamples; +@property (nonatomic, strong, readonly) NSData * _Nonnull data; + +- (instancetype _Nonnull)initWithNumSamples:(int)numSamples data:(NSData * _Nonnull)data; + +@end + @interface GroupCallThreadLocalContext : NSObject - (instancetype _Nonnull)initWithQueue:(id _Nonnull)queue networkStateUpdated:(void (^ _Nonnull)(GroupCallNetworkState))networkStateUpdated audioLevelsUpdated:(void (^ _Nonnull)(NSArray * _Nonnull))audioLevelsUpdated inputDeviceId:(NSString * _Nonnull)inputDeviceId outputDeviceId:(NSString * _Nonnull)outputDeviceId videoCapturer:(OngoingCallThreadLocalContextVideoCapturer * _Nullable)videoCapturer incomingVideoSourcesUpdated:(void (^ _Nonnull)(NSArray * _Nonnull))incomingVideoSourcesUpdated participantDescriptionsRequired:(void (^ _Nonnull)(NSArray * _Nonnull))participantDescriptionsRequired; @@ -186,6 +195,8 @@ typedef NS_ENUM(int32_t, GroupCallNetworkState) { - (void)switchAudioInput:(NSString * _Nonnull)deviceId; - (void)makeIncomingVideoViewWithSsrc:(uint32_t)ssrc completion:(void (^_Nonnull)(UIView * _Nullable))completion; +- (void)addBroadcastPackets:(NSArray * _Nonnull)packets; + @end #endif diff --git a/submodules/TgVoipWebrtc/Sources/OngoingCallThreadLocalContext.mm b/submodules/TgVoipWebrtc/Sources/OngoingCallThreadLocalContext.mm index 564fef27db..05d498f598 100644 --- a/submodules/TgVoipWebrtc/Sources/OngoingCallThreadLocalContext.mm +++ b/submodules/TgVoipWebrtc/Sources/OngoingCallThreadLocalContext.mm @@ -1398,6 +1398,22 @@ static void processJoinPayload(tgcalls::GroupJoinPayload &payload, void (^ _Nonn } } +- (void)addBroadcastPackets:(NSArray * _Nonnull)packets { + if (!_instance) { + return; + } + + std::vector parsedPackets; + for (OngoingGroupCallBroadcastPacket *packet in packets) { + tgcalls::BroadcastPacket parsedPacket; + parsedPacket.numSamples = packet.numSamples; + parsedPacket.data.resize(packet.data.length); + [packet.data getBytes:parsedPacket.data.data() length:packet.data.length]; + parsedPackets.push_back(std::move(parsedPacket)); + } + ((tgcalls::GroupInstanceCustomImpl *)(_instance.get()))->addBroadcastPackets(std::move(parsedPackets)); +} + @end @implementation OngoingGroupCallParticipantDescription @@ -1412,3 +1428,16 @@ static void processJoinPayload(tgcalls::GroupJoinPayload &payload, void (^ _Nonn } @end + +@implementation OngoingGroupCallBroadcastPacket + +- (instancetype _Nonnull)initWithNumSamples:(int)numSamples data:(NSData * _Nonnull)data { + self = [super init]; + if (self != nil) { + _numSamples = numSamples; + _data = data; + } + return self; +} + +@end diff --git a/submodules/TgVoipWebrtc/tgcalls b/submodules/TgVoipWebrtc/tgcalls index d2b15b9edf..35a4725bd3 160000 --- a/submodules/TgVoipWebrtc/tgcalls +++ b/submodules/TgVoipWebrtc/tgcalls @@ -1 +1 @@ -Subproject commit d2b15b9edfe2eeaaa9862adb0ae38f8e6308b5fb +Subproject commit 35a4725bd37e4e0a07eb2d3ba626b80f9dc73fce diff --git a/third-party/webrtc/webrtc b/third-party/webrtc/webrtc index e42b463011..1829cfe837 160000 --- a/third-party/webrtc/webrtc +++ b/third-party/webrtc/webrtc @@ -1 +1 @@ -Subproject commit e42b4630117498504692fbac80ec6dbb970b2314 +Subproject commit 1829cfe8371d479f254d9783ebe66fc562d30dba