mirror of
https://github.com/Swiftgram/Telegram-iOS.git
synced 2025-12-23 06:35:51 +00:00
ASThread: Remove Locker, Unlocker, and SharedMutex (#1213)
* ASThread: Remove Locker, Unlocker, and SharedMutex * Remove extra line * Kick the CI * Move C++ down * Fix thing
This commit is contained in:
@@ -416,12 +416,13 @@ ASSynthesizeLockingMethodsWithMutex(__instanceLock__);
|
||||
|
||||
- (void)onDidLoad:(ASDisplayNodeDidLoadBlock)body
|
||||
{
|
||||
ASDN::MutexLocker l(__instanceLock__);
|
||||
ASDN::UniqueLock l(__instanceLock__);
|
||||
|
||||
if ([self _locked_isNodeLoaded]) {
|
||||
ASDisplayNodeAssertThreadAffinity(self);
|
||||
ASDN::MutexUnlocker l(__instanceLock__);
|
||||
l.unlock();
|
||||
body(self);
|
||||
return;
|
||||
} else if (_onDidLoadBlocks == nil) {
|
||||
_onDidLoadBlocks = [NSMutableArray arrayWithObject:body];
|
||||
} else {
|
||||
@@ -601,7 +602,7 @@ ASSynthesizeLockingMethodsWithMutex(__instanceLock__);
|
||||
|
||||
- (UIView *)view
|
||||
{
|
||||
ASDN::MutexLocker l(__instanceLock__);
|
||||
ASDN::UniqueLock l(__instanceLock__);
|
||||
|
||||
ASDisplayNodeAssert(!_flags.layerBacked, @"Call to -view undefined on layer-backed nodes");
|
||||
BOOL isLayerBacked = _flags.layerBacked;
|
||||
@@ -626,30 +627,28 @@ ASSynthesizeLockingMethodsWithMutex(__instanceLock__);
|
||||
// in the background on a loaded node, which isn't currently supported.
|
||||
if (_pendingViewState.hasSetNeedsLayout) {
|
||||
// Need to unlock before calling setNeedsLayout to avoid deadlocks.
|
||||
// MutexUnlocker will re-lock at the end of scope.
|
||||
ASDN::MutexUnlocker u(__instanceLock__);
|
||||
l.unlock();
|
||||
[self __setNeedsLayout];
|
||||
l.lock();
|
||||
}
|
||||
|
||||
[self _locked_applyPendingStateToViewOrLayer];
|
||||
|
||||
{
|
||||
// The following methods should not be called with a lock
|
||||
ASDN::MutexUnlocker u(__instanceLock__);
|
||||
// The following methods should not be called with a lock
|
||||
l.unlock();
|
||||
|
||||
// No need for the lock as accessing the subviews or layers are always happening on main
|
||||
[self _addSubnodeViewsAndLayers];
|
||||
|
||||
// A subclass hook should never be called with a lock
|
||||
[self _didLoad];
|
||||
}
|
||||
// No need for the lock as accessing the subviews or layers are always happening on main
|
||||
[self _addSubnodeViewsAndLayers];
|
||||
|
||||
// A subclass hook should never be called with a lock
|
||||
[self _didLoad];
|
||||
|
||||
return _view;
|
||||
}
|
||||
|
||||
- (CALayer *)layer
|
||||
{
|
||||
ASDN::MutexLocker l(__instanceLock__);
|
||||
ASDN::UniqueLock l(__instanceLock__);
|
||||
if (_layer != nil) {
|
||||
return _layer;
|
||||
}
|
||||
@@ -661,31 +660,30 @@ ASSynthesizeLockingMethodsWithMutex(__instanceLock__);
|
||||
// Loading a layer needs to happen on the main thread
|
||||
ASDisplayNodeAssertMainThread();
|
||||
[self _locked_loadViewOrLayer];
|
||||
CALayer *layer = _layer;
|
||||
|
||||
// FIXME: Ideally we'd call this as soon as the node receives -setNeedsLayout
|
||||
// but automatic subnode management would require us to modify the node tree
|
||||
// in the background on a loaded node, which isn't currently supported.
|
||||
if (_pendingViewState.hasSetNeedsLayout) {
|
||||
// Need to unlock before calling setNeedsLayout to avoid deadlocks.
|
||||
// MutexUnlocker will re-lock at the end of scope.
|
||||
ASDN::MutexUnlocker u(__instanceLock__);
|
||||
l.unlock();
|
||||
[self __setNeedsLayout];
|
||||
l.lock();
|
||||
}
|
||||
|
||||
[self _locked_applyPendingStateToViewOrLayer];
|
||||
|
||||
{
|
||||
// The following methods should not be called with a lock
|
||||
ASDN::MutexUnlocker u(__instanceLock__);
|
||||
// The following methods should not be called with a lock
|
||||
l.unlock();
|
||||
|
||||
// No need for the lock as accessing the subviews or layers are always happening on main
|
||||
[self _addSubnodeViewsAndLayers];
|
||||
|
||||
// A subclass hook should never be called with a lock
|
||||
[self _didLoad];
|
||||
}
|
||||
// No need for the lock as accessing the subviews or layers are always happening on main
|
||||
[self _addSubnodeViewsAndLayers];
|
||||
|
||||
// A subclass hook should never be called with a lock
|
||||
[self _didLoad];
|
||||
|
||||
return _layer;
|
||||
return layer;
|
||||
}
|
||||
|
||||
// Returns nil if the layer is not an _ASDisplayLayer; will not create the layer if nil.
|
||||
@@ -1033,7 +1031,7 @@ ASSynthesizeLockingMethodsWithMutex(__instanceLock__);
|
||||
|
||||
BOOL loaded = NO;
|
||||
{
|
||||
ASDN::MutexLocker l(__instanceLock__);
|
||||
ASDN::UniqueLock l(__instanceLock__);
|
||||
loaded = [self _locked_isNodeLoaded];
|
||||
CGRect bounds = _threadSafeBounds;
|
||||
|
||||
@@ -1055,10 +1053,9 @@ ASSynthesizeLockingMethodsWithMutex(__instanceLock__);
|
||||
|
||||
// This method will confirm that the layout is up to date (and update if needed).
|
||||
// Importantly, it will also APPLY the layout to all of our subnodes if (unless parent is transitioning).
|
||||
{
|
||||
ASDN::MutexUnlocker u(__instanceLock__);
|
||||
[self _u_measureNodeWithBoundsIfNecessary:bounds];
|
||||
}
|
||||
l.unlock();
|
||||
[self _u_measureNodeWithBoundsIfNecessary:bounds];
|
||||
l.lock();
|
||||
|
||||
[self _locked_layoutPlaceholderIfNecessary];
|
||||
}
|
||||
@@ -1121,7 +1118,7 @@ ASSynthesizeLockingMethodsWithMutex(__instanceLock__);
|
||||
{
|
||||
__ASDisplayNodeCheckForLayoutMethodOverrides;
|
||||
|
||||
ASDN::MutexLocker l(__instanceLock__);
|
||||
ASDN::UniqueLock l(__instanceLock__);
|
||||
|
||||
#if YOGA
|
||||
// There are several cases where Yoga could arrive here:
|
||||
@@ -1138,11 +1135,13 @@ ASSynthesizeLockingMethodsWithMutex(__instanceLock__);
|
||||
if ([self shouldHaveYogaMeasureFunc] == NO) {
|
||||
// If we're a yoga root, tree node, or leaf with no measure func (e.g. spacer), then
|
||||
// initiate a new Yoga calculation pass from root.
|
||||
ASDN::MutexUnlocker ul(__instanceLock__);
|
||||
|
||||
as_activity_create_for_scope("Yoga layout calculation");
|
||||
if (self.yogaLayoutInProgress == NO) {
|
||||
ASYogaLog("Calculating yoga layout from root %@, %@", self, NSStringFromASSizeRange(constrainedSize));
|
||||
l.unlock();
|
||||
[self calculateLayoutFromYogaRoot:constrainedSize];
|
||||
l.lock();
|
||||
} else {
|
||||
ASYogaLog("Reusing existing yoga layout %@", _yogaCalculatedLayout);
|
||||
}
|
||||
@@ -3546,15 +3545,15 @@ ASDISPLAYNODE_INLINE BOOL subtreeIsRasterized(ASDisplayNode *node) {
|
||||
ASDisplayNodeAssertMainThread();
|
||||
ASAssertUnlocked(__instanceLock__);
|
||||
|
||||
ASDN::MutexLocker l(__instanceLock__);
|
||||
ASDN::UniqueLock l(__instanceLock__);
|
||||
// FIXME: Ideally we'd call this as soon as the node receives -setNeedsLayout
|
||||
// but automatic subnode management would require us to modify the node tree
|
||||
// in the background on a loaded node, which isn't currently supported.
|
||||
if (_pendingViewState.hasSetNeedsLayout) {
|
||||
// Need to unlock before calling setNeedsLayout to avoid deadlocks.
|
||||
// MutexUnlocker will re-lock at the end of scope.
|
||||
ASDN::MutexUnlocker u(__instanceLock__);
|
||||
l.unlock();
|
||||
[self __setNeedsLayout];
|
||||
l.lock();
|
||||
}
|
||||
|
||||
[self _locked_applyPendingViewState];
|
||||
|
||||
@@ -434,12 +434,12 @@ typedef void (^ASImageNodeDrawParametersBlock)(ASWeakMapEntry *entry);
|
||||
|
||||
static ASWeakMap<ASImageNodeContentsKey *, UIImage *> *cache = nil;
|
||||
// Allocate cacheLock on the heap to prevent destruction at app exit (https://github.com/TextureGroup/Texture/issues/136)
|
||||
static ASDN::StaticMutex& cacheLock = *new ASDN::StaticMutex;
|
||||
static auto *cacheLock = new ASDN::Mutex;
|
||||
|
||||
+ (ASWeakMapEntry *)contentsForkey:(ASImageNodeContentsKey *)key drawParameters:(id)drawParameters isCancelled:(asdisplaynode_iscancelled_block_t)isCancelled
|
||||
{
|
||||
{
|
||||
ASDN::StaticMutexLocker l(cacheLock);
|
||||
ASDN::MutexLocker l(*cacheLock);
|
||||
if (!cache) {
|
||||
cache = [[ASWeakMap alloc] init];
|
||||
}
|
||||
@@ -456,7 +456,7 @@ static ASDN::StaticMutex& cacheLock = *new ASDN::StaticMutex;
|
||||
}
|
||||
|
||||
{
|
||||
ASDN::StaticMutexLocker l(cacheLock);
|
||||
ASDN::MutexLocker l(*cacheLock);
|
||||
return [cache setObject:contents forKey:key];
|
||||
}
|
||||
}
|
||||
|
||||
@@ -55,14 +55,14 @@
|
||||
*/
|
||||
static NS_RETURNS_RETAINED ASTextLayout *ASTextNodeCompatibleLayoutWithContainerAndText(ASTextContainer *container, NSAttributedString *text) {
|
||||
// Allocate layoutCacheLock on the heap to prevent destruction at app exit (https://github.com/TextureGroup/Texture/issues/136)
|
||||
static ASDN::StaticMutex& layoutCacheLock = *new ASDN::StaticMutex;
|
||||
static auto *layoutCacheLock = new ASDN::Mutex;
|
||||
static NSCache<NSAttributedString *, ASTextCacheValue *> *textLayoutCache;
|
||||
static dispatch_once_t onceToken;
|
||||
dispatch_once(&onceToken, ^{
|
||||
textLayoutCache = [[NSCache alloc] init];
|
||||
});
|
||||
|
||||
layoutCacheLock.lock();
|
||||
layoutCacheLock->lock();
|
||||
|
||||
ASTextCacheValue *cacheValue = [textLayoutCache objectForKey:text];
|
||||
if (cacheValue == nil) {
|
||||
@@ -72,7 +72,7 @@ static NS_RETURNS_RETAINED ASTextLayout *ASTextNodeCompatibleLayoutWithContainer
|
||||
|
||||
// Lock the cache item for the rest of the method. Only after acquiring can we release the NSCache.
|
||||
ASDN::MutexLocker lock(cacheValue->_m);
|
||||
layoutCacheLock.unlock();
|
||||
layoutCacheLock->unlock();
|
||||
|
||||
CGRect containerBounds = (CGRect){ .size = container.size };
|
||||
{
|
||||
|
||||
@@ -10,6 +10,7 @@
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
#import <AsyncDisplayKit/ASDisplayNode+FrameworkPrivate.h>
|
||||
#import <AsyncDisplayKit/ASDisplayNode+Subclasses.h>
|
||||
#import <AsyncDisplayKit/ASDisplayNodeInternal.h>
|
||||
#import <AsyncDisplayKit/ASVideoNode.h>
|
||||
#import <AsyncDisplayKit/ASEqualityHelpers.h>
|
||||
#import <AsyncDisplayKit/ASInternalHelpers.h>
|
||||
@@ -322,7 +323,7 @@ static NSString * const kRate = @"rate";
|
||||
|
||||
- (void)observeValueForKeyPath:(NSString *)keyPath ofObject:(id)object change:(NSDictionary *)change context:(void *)context
|
||||
{
|
||||
ASLockScopeSelf();
|
||||
ASDN::UniqueLock l(__instanceLock__);
|
||||
|
||||
if (object == _currentPlayerItem) {
|
||||
if ([keyPath isEqualToString:kStatus]) {
|
||||
@@ -330,8 +331,9 @@ static NSString * const kRate = @"rate";
|
||||
if (self.playerState != ASVideoNodePlayerStatePlaying) {
|
||||
self.playerState = ASVideoNodePlayerStateReadyToPlay;
|
||||
if (_shouldBePlaying && ASInterfaceStateIncludesVisible(self.interfaceState)) {
|
||||
ASUnlockScope(self);
|
||||
l.unlock();
|
||||
[self play];
|
||||
l.lock();
|
||||
}
|
||||
}
|
||||
// If we don't yet have a placeholder image update it now that we should have data available for it
|
||||
@@ -354,8 +356,10 @@ static NSString * const kRate = @"rate";
|
||||
[self.delegate videoNodeDidRecoverFromStall:self];
|
||||
}
|
||||
|
||||
ASUnlockScope(self);
|
||||
l.unlock();
|
||||
[self play]; // autoresume after buffer catches up
|
||||
// NOTE: Early return without re-locking.
|
||||
return;
|
||||
}
|
||||
} else if ([keyPath isEqualToString:kplaybackBufferEmpty]) {
|
||||
if (_shouldBePlaying && [change[NSKeyValueChangeNewKey] boolValue] == YES && ASInterfaceStateIncludesVisible(self.interfaceState)) {
|
||||
@@ -373,6 +377,8 @@ static NSString * const kRate = @"rate";
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NOTE: Early return above.
|
||||
}
|
||||
|
||||
- (void)tapped
|
||||
|
||||
@@ -39,11 +39,11 @@ NSString * const kASBasicImageDownloaderContextCompletionBlock = @"kASBasicImage
|
||||
|
||||
static NSMutableDictionary *currentRequests = nil;
|
||||
// Allocate currentRequestsLock on the heap to prevent destruction at app exit (https://github.com/TextureGroup/Texture/issues/136)
|
||||
static ASDN::StaticMutex& currentRequestsLock = *new ASDN::StaticMutex;
|
||||
static auto *currentRequestsLock = new ASDN::Mutex;
|
||||
|
||||
+ (ASBasicImageDownloaderContext *)contextForURL:(NSURL *)URL
|
||||
{
|
||||
ASDN::StaticMutexLocker l(currentRequestsLock);
|
||||
ASDN::MutexLocker l(*currentRequestsLock);
|
||||
if (!currentRequests) {
|
||||
currentRequests = [[NSMutableDictionary alloc] init];
|
||||
}
|
||||
@@ -57,7 +57,7 @@ static ASDN::StaticMutex& currentRequestsLock = *new ASDN::StaticMutex;
|
||||
|
||||
+ (void)cancelContextWithURL:(NSURL *)URL
|
||||
{
|
||||
ASDN::StaticMutexLocker l(currentRequestsLock);
|
||||
ASDN::MutexLocker l(*currentRequestsLock);
|
||||
if (currentRequests) {
|
||||
[currentRequests removeObjectForKey:URL];
|
||||
}
|
||||
|
||||
@@ -40,19 +40,21 @@
|
||||
|
||||
- (void)performBlockOnMainThread:(dispatch_block_t)block
|
||||
{
|
||||
ASDN::MutexLocker l(_serialQueueLock);
|
||||
|
||||
ASDN::UniqueLock l(_serialQueueLock);
|
||||
[_blocks addObject:block];
|
||||
{
|
||||
ASDN::MutexUnlocker u(_serialQueueLock);
|
||||
l.unlock();
|
||||
[self runBlocks];
|
||||
l.lock();
|
||||
}
|
||||
}
|
||||
|
||||
- (void)runBlocks
|
||||
{
|
||||
dispatch_block_t mainThread = ^{
|
||||
ASDN::UniqueLock l(self->_serialQueueLock);
|
||||
do {
|
||||
ASDN::MutexLocker l(self->_serialQueueLock);
|
||||
dispatch_block_t block;
|
||||
if (self->_blocks.count > 0) {
|
||||
block = _blocks[0];
|
||||
@@ -61,8 +63,9 @@
|
||||
break;
|
||||
}
|
||||
{
|
||||
ASDN::MutexUnlocker u(self->_serialQueueLock);
|
||||
l.unlock();
|
||||
block();
|
||||
l.lock();
|
||||
}
|
||||
} while (true);
|
||||
};
|
||||
|
||||
@@ -95,7 +95,6 @@ ASDISPLAYNODE_INLINE void _ASUnlockScopeCleanup(id<NSLocking> __strong *lockPtr)
|
||||
|
||||
#ifdef __cplusplus
|
||||
|
||||
#define TIME_LOCKER 0
|
||||
/**
|
||||
* Enable this flag to collect information on the owning thread and ownership level of a mutex.
|
||||
* These properties are useful to determine if a mutex has been acquired and in case of a recursive mutex, how many times that happened.
|
||||
@@ -112,11 +111,8 @@ ASDISPLAYNODE_INLINE void _ASUnlockScopeCleanup(id<NSLocking> __strong *lockPtr)
|
||||
#define CHECK_LOCKING_SAFETY 0
|
||||
#endif
|
||||
|
||||
#if TIME_LOCKER
|
||||
#import <QuartzCore/QuartzCore.h>
|
||||
#endif
|
||||
|
||||
#include <memory>
|
||||
#include <mutex>
|
||||
|
||||
// This MUST always execute, even when assertions are disabled. Otherwise all lock operations become no-ops!
|
||||
// (To be explicit, do not turn this into an NSAssert, assert(), or any other kind of statement where the
|
||||
@@ -143,106 +139,6 @@ ASDISPLAYNODE_INLINE void _ASUnlockScopeCleanup(id<NSLocking> __strong *lockPtr)
|
||||
|
||||
namespace ASDN {
|
||||
|
||||
template<class T>
|
||||
class Locker
|
||||
{
|
||||
T &_l;
|
||||
|
||||
#if TIME_LOCKER
|
||||
CFTimeInterval _ti;
|
||||
const char *_name;
|
||||
#endif
|
||||
|
||||
public:
|
||||
#if !TIME_LOCKER
|
||||
|
||||
Locker (T &l) noexcept : _l (l) {
|
||||
_l.lock ();
|
||||
}
|
||||
|
||||
~Locker () {
|
||||
_l.unlock ();
|
||||
}
|
||||
|
||||
// non-copyable.
|
||||
Locker(const Locker<T>&) = delete;
|
||||
Locker &operator=(const Locker<T>&) = delete;
|
||||
|
||||
#else
|
||||
|
||||
Locker (T &l, const char *name = NULL) noexcept : _l (l), _name(name) {
|
||||
_ti = CACurrentMediaTime();
|
||||
_l.lock ();
|
||||
}
|
||||
|
||||
~Locker () {
|
||||
_l.unlock ();
|
||||
if (_name) {
|
||||
printf(_name, NULL);
|
||||
printf(" dt:%f\n", CACurrentMediaTime() - _ti);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
template<class T>
|
||||
class SharedLocker
|
||||
{
|
||||
std::shared_ptr<T> _l;
|
||||
|
||||
#if TIME_LOCKER
|
||||
CFTimeInterval _ti;
|
||||
const char *_name;
|
||||
#endif
|
||||
|
||||
public:
|
||||
#if !TIME_LOCKER
|
||||
|
||||
SharedLocker (std::shared_ptr<T> const& l) noexcept : _l (l) {
|
||||
ASDisplayNodeCAssertTrue(_l != nullptr);
|
||||
_l->lock ();
|
||||
}
|
||||
|
||||
~SharedLocker () {
|
||||
_l->unlock ();
|
||||
}
|
||||
|
||||
// non-copyable.
|
||||
SharedLocker(const SharedLocker<T>&) = delete;
|
||||
SharedLocker &operator=(const SharedLocker<T>&) = delete;
|
||||
|
||||
#else
|
||||
|
||||
SharedLocker (std::shared_ptr<T> const& l, const char *name = NULL) noexcept : _l (l), _name(name) {
|
||||
_ti = CACurrentMediaTime();
|
||||
_l->lock ();
|
||||
}
|
||||
|
||||
~SharedLocker () {
|
||||
_l->unlock ();
|
||||
if (_name) {
|
||||
printf(_name, NULL);
|
||||
printf(" dt:%f\n", CACurrentMediaTime() - _ti);
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
};
|
||||
|
||||
template<class T>
|
||||
class Unlocker
|
||||
{
|
||||
T &_l;
|
||||
public:
|
||||
Unlocker (T &l) noexcept : _l (l) { _l.unlock (); }
|
||||
~Unlocker () {_l.lock ();}
|
||||
Unlocker(Unlocker<T>&) = delete;
|
||||
Unlocker &operator=(Unlocker<T>&) = delete;
|
||||
};
|
||||
|
||||
// Set once in Mutex constructor. Linker fails if this is a member variable. ??
|
||||
static BOOL gMutex_unfair;
|
||||
|
||||
@@ -414,41 +310,8 @@ namespace ASDN {
|
||||
RecursiveMutex () : Mutex (true) {}
|
||||
};
|
||||
|
||||
typedef Locker<Mutex> MutexLocker;
|
||||
typedef SharedLocker<Mutex> MutexSharedLocker;
|
||||
typedef Unlocker<Mutex> MutexUnlocker;
|
||||
|
||||
/**
|
||||
If you are creating a static mutex, use StaticMutex. This avoids expensive constructor overhead at startup (or worse, ordering
|
||||
issues between different static objects). It also avoids running a destructor on app exit time (needless expense).
|
||||
|
||||
Note that you can, but should not, use StaticMutex for non-static objects. It will leak its mutex on destruction,
|
||||
so avoid that!
|
||||
*/
|
||||
struct StaticMutex
|
||||
{
|
||||
StaticMutex () : _m (PTHREAD_MUTEX_INITIALIZER) {}
|
||||
|
||||
// non-copyable.
|
||||
StaticMutex(const StaticMutex&) = delete;
|
||||
StaticMutex &operator=(const StaticMutex&) = delete;
|
||||
|
||||
void lock () {
|
||||
AS_POSIX_ASSERT_NOERR(pthread_mutex_lock (this->mutex()));
|
||||
}
|
||||
|
||||
void unlock () {
|
||||
AS_POSIX_ASSERT_NOERR(pthread_mutex_unlock (this->mutex()));
|
||||
}
|
||||
|
||||
pthread_mutex_t *mutex () { return &_m; }
|
||||
|
||||
private:
|
||||
pthread_mutex_t _m;
|
||||
};
|
||||
|
||||
typedef Locker<StaticMutex> StaticMutexLocker;
|
||||
typedef Unlocker<StaticMutex> StaticMutexUnlocker;
|
||||
typedef std::lock_guard<Mutex> MutexLocker;
|
||||
typedef std::unique_lock<Mutex> UniqueLock;
|
||||
|
||||
} // namespace ASDN
|
||||
|
||||
|
||||
@@ -81,7 +81,7 @@ static inline BOOL ASLayoutCanTransitionAsynchronous(ASLayout *layout) {
|
||||
|
||||
- (BOOL)isSynchronous
|
||||
{
|
||||
ASDN::MutexSharedLocker l(__instanceLock__);
|
||||
ASDN::MutexLocker l(*__instanceLock__);
|
||||
return !ASLayoutCanTransitionAsynchronous(_pendingLayout.layout);
|
||||
}
|
||||
|
||||
@@ -93,7 +93,7 @@ static inline BOOL ASLayoutCanTransitionAsynchronous(ASLayout *layout) {
|
||||
|
||||
- (void)applySubnodeInsertionsAndMoves
|
||||
{
|
||||
ASDN::MutexSharedLocker l(__instanceLock__);
|
||||
ASDN::MutexLocker l(*__instanceLock__);
|
||||
[self calculateSubnodeOperationsIfNeeded];
|
||||
|
||||
// Create an activity even if no subnodes affected.
|
||||
@@ -131,7 +131,7 @@ static inline BOOL ASLayoutCanTransitionAsynchronous(ASLayout *layout) {
|
||||
- (void)applySubnodeRemovals
|
||||
{
|
||||
as_activity_scope(as_activity_create("Apply subnode removals", AS_ACTIVITY_CURRENT, OS_ACTIVITY_FLAG_DEFAULT));
|
||||
ASDN::MutexSharedLocker l(__instanceLock__);
|
||||
ASDN::MutexLocker l(*__instanceLock__);
|
||||
[self calculateSubnodeOperationsIfNeeded];
|
||||
|
||||
if (_removedSubnodes.count == 0) {
|
||||
@@ -151,7 +151,7 @@ static inline BOOL ASLayoutCanTransitionAsynchronous(ASLayout *layout) {
|
||||
|
||||
- (void)calculateSubnodeOperationsIfNeeded
|
||||
{
|
||||
ASDN::MutexSharedLocker l(__instanceLock__);
|
||||
ASDN::MutexLocker l(*__instanceLock__);
|
||||
if (_calculatedSubnodeOperations) {
|
||||
return;
|
||||
}
|
||||
@@ -206,27 +206,27 @@ static inline BOOL ASLayoutCanTransitionAsynchronous(ASLayout *layout) {
|
||||
|
||||
- (NSArray<ASDisplayNode *> *)currentSubnodesWithTransitionContext:(_ASTransitionContext *)context
|
||||
{
|
||||
ASDN::MutexSharedLocker l(__instanceLock__);
|
||||
ASDN::MutexLocker l(*__instanceLock__);
|
||||
return _node.subnodes;
|
||||
}
|
||||
|
||||
- (NSArray<ASDisplayNode *> *)insertedSubnodesWithTransitionContext:(_ASTransitionContext *)context
|
||||
{
|
||||
ASDN::MutexSharedLocker l(__instanceLock__);
|
||||
ASDN::MutexLocker l(*__instanceLock__);
|
||||
[self calculateSubnodeOperationsIfNeeded];
|
||||
return _insertedSubnodes;
|
||||
}
|
||||
|
||||
- (NSArray<ASDisplayNode *> *)removedSubnodesWithTransitionContext:(_ASTransitionContext *)context
|
||||
{
|
||||
ASDN::MutexSharedLocker l(__instanceLock__);
|
||||
ASDN::MutexLocker l(*__instanceLock__);
|
||||
[self calculateSubnodeOperationsIfNeeded];
|
||||
return _removedSubnodes;
|
||||
}
|
||||
|
||||
- (ASLayout *)transitionContext:(_ASTransitionContext *)context layoutForKey:(NSString *)key
|
||||
{
|
||||
ASDN::MutexSharedLocker l(__instanceLock__);
|
||||
ASDN::MutexLocker l(*__instanceLock__);
|
||||
if ([key isEqualToString:ASTransitionContextFromLayoutKey]) {
|
||||
return _previousLayout.layout;
|
||||
} else if ([key isEqualToString:ASTransitionContextToLayoutKey]) {
|
||||
@@ -238,7 +238,7 @@ static inline BOOL ASLayoutCanTransitionAsynchronous(ASLayout *layout) {
|
||||
|
||||
- (ASSizeRange)transitionContext:(_ASTransitionContext *)context constrainedSizeForKey:(NSString *)key
|
||||
{
|
||||
ASDN::MutexSharedLocker l(__instanceLock__);
|
||||
ASDN::MutexLocker l(*__instanceLock__);
|
||||
if ([key isEqualToString:ASTransitionContextFromLayoutKey]) {
|
||||
return _previousLayout.constrainedSize;
|
||||
} else if ([key isEqualToString:ASTransitionContextToLayoutKey]) {
|
||||
|
||||
@@ -32,9 +32,9 @@
|
||||
{
|
||||
if (self = [super init]) {
|
||||
// Concurrently initialising TextKit components crashes (rdar://18448377) so we use a global lock.
|
||||
// Allocate __staticMutex on the heap to prevent destruction at app exit (https://github.com/TextureGroup/Texture/issues/136)
|
||||
static ASDN::StaticMutex& __staticMutex = *new ASDN::StaticMutex;
|
||||
ASDN::StaticMutexLocker l(__staticMutex);
|
||||
// Allocate mutex on the heap to prevent destruction at app exit (https://github.com/TextureGroup/Texture/issues/136)
|
||||
static auto *mutex = new ASDN::Mutex;
|
||||
ASDN::MutexLocker l(*mutex);
|
||||
|
||||
__instanceLock__ = std::make_shared<ASDN::Mutex>();
|
||||
|
||||
@@ -66,7 +66,7 @@
|
||||
NSTextStorage *,
|
||||
NSTextContainer *))block
|
||||
{
|
||||
ASDN::MutexSharedLocker l(__instanceLock__);
|
||||
ASDN::MutexLocker l(*__instanceLock__);
|
||||
if (block) {
|
||||
block(_layoutManager, _textStorage, _textContainer);
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user