Further improvements to cache locking.
In particular, don't hold cache lock while doing computations.
This commit is contained in:
parent
0f8dac81cb
commit
6ced4f72d0
|
@ -263,7 +263,7 @@ void BOARD::IncrementTimeStamp()
|
|||
|| !m_ZoneBBoxCache.empty()
|
||||
|| m_CopperItemRTreeCache )
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> cacheLock( m_CachesMutex );
|
||||
std::unique_lock<std::shared_mutex> writeLock( m_CachesMutex );
|
||||
|
||||
m_IntersectsAreaCache.clear();
|
||||
m_EnclosedByAreaCache.clear();
|
||||
|
|
|
@ -1227,7 +1227,7 @@ public:
|
|||
};
|
||||
|
||||
// ------------ Run-time caches -------------
|
||||
std::shared_mutex m_CachesMutex;
|
||||
mutable std::shared_mutex m_CachesMutex;
|
||||
std::unordered_map<PTR_PTR_CACHE_KEY, bool> m_IntersectsCourtyardCache;
|
||||
std::unordered_map<PTR_PTR_CACHE_KEY, bool> m_IntersectsFCourtyardCache;
|
||||
std::unordered_map<PTR_PTR_CACHE_KEY, bool> m_IntersectsBCourtyardCache;
|
||||
|
|
|
@ -137,7 +137,7 @@ bool DRC_CACHE_GENERATOR::Run()
|
|||
std::future<void> retn = tp.submit(
|
||||
[&]()
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> cacheLock( m_board->m_CachesMutex );
|
||||
std::unique_lock<std::shared_mutex> writeLock( m_board->m_CachesMutex );
|
||||
|
||||
if( !m_board->m_CopperItemRTreeCache )
|
||||
m_board->m_CopperItemRTreeCache = std::make_shared<DRC_RTREE>();
|
||||
|
@ -185,8 +185,10 @@ bool DRC_CACHE_GENERATOR::Run()
|
|||
rtree->Insert( aZone, layer );
|
||||
}
|
||||
|
||||
std::unique_lock<std::shared_mutex> cacheLock( m_board->m_CachesMutex );
|
||||
m_board->m_CopperZoneRTreeCache[ aZone ] = std::move( rtree );
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> writeLock( m_board->m_CachesMutex );
|
||||
m_board->m_CopperZoneRTreeCache[ aZone ] = std::move( rtree );
|
||||
}
|
||||
|
||||
done.fetch_add( 1 );
|
||||
}
|
||||
|
|
|
@ -154,7 +154,7 @@ bool DRC_TEST_PROVIDER_DISALLOW::Run()
|
|||
PTR_PTR_LAYER_CACHE_KEY key = { ruleArea, copperZone, UNDEFINED_LAYER };
|
||||
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> cacheLock( board->m_CachesMutex );
|
||||
std::unique_lock<std::shared_mutex> writeLock( board->m_CachesMutex );
|
||||
board->m_IntersectsAreaCache[ key ] = isInside;
|
||||
}
|
||||
|
||||
|
|
|
@ -58,28 +58,32 @@ public:
|
|||
// in the ENUM_MAP: one for the canonical layer name and one for the user layer name.
|
||||
// We need to check against both.
|
||||
|
||||
wxPGChoices& layerMap = ENUM_MAP<PCB_LAYER_ID>::Instance().Choices();
|
||||
const wxString& layerName = b->AsString();
|
||||
BOARD* board = static_cast<PCBEXPR_CONTEXT*>( aCtx )->GetBoard();
|
||||
std::unique_lock<std::shared_mutex> cacheLock( board->m_CachesMutex );
|
||||
auto i = board->m_LayerExpressionCache.find( layerName );
|
||||
LSET mask;
|
||||
wxPGChoices& layerMap = ENUM_MAP<PCB_LAYER_ID>::Instance().Choices();
|
||||
const wxString& layerName = b->AsString();
|
||||
BOARD* board = static_cast<PCBEXPR_CONTEXT*>( aCtx )->GetBoard();
|
||||
|
||||
if( i == board->m_LayerExpressionCache.end() )
|
||||
{
|
||||
for( unsigned ii = 0; ii < layerMap.GetCount(); ++ii )
|
||||
{
|
||||
wxPGChoiceEntry& entry = layerMap[ii];
|
||||
std::shared_lock<std::shared_mutex> readLock( board->m_CachesMutex );
|
||||
|
||||
if( entry.GetText().Matches( layerName ) )
|
||||
mask.set( ToLAYER_ID( entry.GetValue() ) );
|
||||
}
|
||||
auto i = board->m_LayerExpressionCache.find( layerName );
|
||||
|
||||
board->m_LayerExpressionCache[ layerName ] = mask;
|
||||
if( i != board->m_LayerExpressionCache.end() )
|
||||
return i->second.Contains( m_layer );
|
||||
}
|
||||
else
|
||||
|
||||
LSET mask;
|
||||
|
||||
for( unsigned ii = 0; ii < layerMap.GetCount(); ++ii )
|
||||
{
|
||||
mask = i->second;
|
||||
wxPGChoiceEntry& entry = layerMap[ii];
|
||||
|
||||
if( entry.GetText().Matches( layerName ) )
|
||||
mask.set( ToLAYER_ID( entry.GetValue() ) );
|
||||
}
|
||||
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> writeLock( board->m_CachesMutex );
|
||||
board->m_LayerExpressionCache[ layerName ] = mask;
|
||||
}
|
||||
|
||||
return mask.Contains( m_layer );
|
||||
|
|
|
@ -128,6 +128,8 @@ static void existsOnLayerFunc( LIBEVAL::CONTEXT* aCtx, void *self )
|
|||
aCtx->ReportError( wxString::Format( _( "Unrecognized layer '%s'" ),
|
||||
layerName ) );
|
||||
}
|
||||
|
||||
return 0.0;
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -136,32 +138,33 @@ static void existsOnLayerFunc( LIBEVAL::CONTEXT* aCtx, void *self )
|
|||
*/
|
||||
|
||||
BOARD* board = item->GetBoard();
|
||||
std::unique_lock<std::shared_mutex> cacheLock( board->m_CachesMutex );
|
||||
auto i = board->m_LayerExpressionCache.find( layerName );
|
||||
|
||||
{
|
||||
std::shared_lock<std::shared_mutex> readLock( board->m_CachesMutex );
|
||||
|
||||
auto i = board->m_LayerExpressionCache.find( layerName );
|
||||
|
||||
if( i != board->m_LayerExpressionCache.end() )
|
||||
return ( item->GetLayerSet() & i->second ).any() ? 1.0 : 0.0;
|
||||
}
|
||||
|
||||
LSET mask;
|
||||
|
||||
if( i == board->m_LayerExpressionCache.end() )
|
||||
for( unsigned ii = 0; ii < layerMap.GetCount(); ++ii )
|
||||
{
|
||||
for( unsigned ii = 0; ii < layerMap.GetCount(); ++ii )
|
||||
{
|
||||
wxPGChoiceEntry& entry = layerMap[ ii ];
|
||||
wxPGChoiceEntry& entry = layerMap[ ii ];
|
||||
|
||||
if( entry.GetText().Matches( layerName ) )
|
||||
mask.set( ToLAYER_ID( entry.GetValue() ) );
|
||||
}
|
||||
if( entry.GetText().Matches( layerName ) )
|
||||
mask.set( ToLAYER_ID( entry.GetValue() ) );
|
||||
}
|
||||
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> writeLock( board->m_CachesMutex );
|
||||
board->m_LayerExpressionCache[ layerName ] = mask;
|
||||
}
|
||||
else
|
||||
{
|
||||
mask = i->second;
|
||||
}
|
||||
|
||||
if( ( item->GetLayerSet() & mask ).any() )
|
||||
return 1.0;
|
||||
return ( item->GetLayerSet() & mask ).any() ? 1.0 : 0.0;
|
||||
}
|
||||
|
||||
return 0.0;
|
||||
} );
|
||||
}
|
||||
|
||||
|
@ -272,11 +275,12 @@ static void intersectsCourtyardFunc( LIBEVAL::CONTEXT* aCtx, void* self )
|
|||
if( searchFootprints( board, arg->AsString(), context,
|
||||
[&]( FOOTPRINT* fp )
|
||||
{
|
||||
PTR_PTR_CACHE_KEY key = { fp, item };
|
||||
std::unique_lock<std::shared_mutex> cacheLock( board->m_CachesMutex );
|
||||
PTR_PTR_CACHE_KEY key = { fp, item };
|
||||
|
||||
if( ( item->GetFlags() & ROUTER_TRANSIENT ) == 0 )
|
||||
{
|
||||
std::shared_lock<std::shared_mutex> readLock( board->m_CachesMutex );
|
||||
|
||||
auto i = board->m_IntersectsCourtyardCache.find( key );
|
||||
|
||||
if( i != board->m_IntersectsCourtyardCache.end() )
|
||||
|
@ -287,7 +291,10 @@ static void intersectsCourtyardFunc( LIBEVAL::CONTEXT* aCtx, void* self )
|
|||
|| collidesWithCourtyard( item, itemShape, context, fp, B_Cu );
|
||||
|
||||
if( ( item->GetFlags() & ROUTER_TRANSIENT ) == 0 )
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> cacheLock( board->m_CachesMutex );
|
||||
board->m_IntersectsCourtyardCache[ key ] = res;
|
||||
}
|
||||
|
||||
return res;
|
||||
} ) )
|
||||
|
@ -332,11 +339,12 @@ static void intersectsFrontCourtyardFunc( LIBEVAL::CONTEXT* aCtx, void* self )
|
|||
if( searchFootprints( board, arg->AsString(), context,
|
||||
[&]( FOOTPRINT* fp )
|
||||
{
|
||||
PTR_PTR_CACHE_KEY key = { fp, item };
|
||||
std::unique_lock<std::shared_mutex> cacheLock( board->m_CachesMutex );
|
||||
PTR_PTR_CACHE_KEY key = { fp, item };
|
||||
|
||||
if( ( item->GetFlags() & ROUTER_TRANSIENT ) == 0 )
|
||||
{
|
||||
std::shared_lock<std::shared_mutex> readLock( board->m_CachesMutex );
|
||||
|
||||
auto i = board->m_IntersectsFCourtyardCache.find( key );
|
||||
|
||||
if( i != board->m_IntersectsFCourtyardCache.end() )
|
||||
|
@ -346,7 +354,10 @@ static void intersectsFrontCourtyardFunc( LIBEVAL::CONTEXT* aCtx, void* self )
|
|||
bool res = collidesWithCourtyard( item, itemShape, context, fp, F_Cu );
|
||||
|
||||
if( ( item->GetFlags() & ROUTER_TRANSIENT ) == 0 )
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> writeLock( board->m_CachesMutex );
|
||||
board->m_IntersectsFCourtyardCache[ key ] = res;
|
||||
}
|
||||
|
||||
return res;
|
||||
} ) )
|
||||
|
@ -391,11 +402,12 @@ static void intersectsBackCourtyardFunc( LIBEVAL::CONTEXT* aCtx, void* self )
|
|||
if( searchFootprints( board, arg->AsString(), context,
|
||||
[&]( FOOTPRINT* fp )
|
||||
{
|
||||
PTR_PTR_CACHE_KEY key = { fp, item };
|
||||
std::unique_lock<std::shared_mutex> cacheLock( board->m_CachesMutex );
|
||||
PTR_PTR_CACHE_KEY key = { fp, item };
|
||||
|
||||
if( ( item->GetFlags() & ROUTER_TRANSIENT ) == 0 )
|
||||
{
|
||||
std::shared_lock<std::shared_mutex> readLock( board->m_CachesMutex );
|
||||
|
||||
auto i = board->m_IntersectsBCourtyardCache.find( key );
|
||||
|
||||
if( i != board->m_IntersectsBCourtyardCache.end() )
|
||||
|
@ -405,7 +417,10 @@ static void intersectsBackCourtyardFunc( LIBEVAL::CONTEXT* aCtx, void* self )
|
|||
bool res = collidesWithCourtyard( item, itemShape, context, fp, B_Cu );
|
||||
|
||||
if( ( item->GetFlags() & ROUTER_TRANSIENT ) == 0 )
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> writeLock( board->m_CachesMutex );
|
||||
board->m_IntersectsBCourtyardCache[ key ] = res;
|
||||
}
|
||||
|
||||
return res;
|
||||
} ) )
|
||||
|
@ -649,8 +664,7 @@ static void intersectsAreaFunc( LIBEVAL::CONTEXT* aCtx, void* self )
|
|||
if( !aArea->GetBoundingBox().Intersects( itemBBox ) )
|
||||
return false;
|
||||
|
||||
LSET testLayers;
|
||||
PTR_PTR_LAYER_CACHE_KEY key;
|
||||
LSET testLayers;
|
||||
|
||||
if( aLayer != UNDEFINED_LAYER )
|
||||
testLayers.set( aLayer );
|
||||
|
@ -659,10 +673,11 @@ static void intersectsAreaFunc( LIBEVAL::CONTEXT* aCtx, void* self )
|
|||
|
||||
for( PCB_LAYER_ID layer : testLayers.UIOrder() )
|
||||
{
|
||||
PTR_PTR_LAYER_CACHE_KEY key = { aArea, item, layer };
|
||||
|
||||
if( ( item->GetFlags() & ROUTER_TRANSIENT ) == 0 )
|
||||
{
|
||||
std::shared_lock<std::shared_mutex> cacheLock( board->m_CachesMutex );
|
||||
key = { aArea, item, layer };
|
||||
std::shared_lock<std::shared_mutex> readLock( board->m_CachesMutex );
|
||||
|
||||
auto i = board->m_IntersectsAreaCache.find( key );
|
||||
|
||||
|
@ -674,7 +689,7 @@ static void intersectsAreaFunc( LIBEVAL::CONTEXT* aCtx, void* self )
|
|||
|
||||
if( ( item->GetFlags() & ROUTER_TRANSIENT ) == 0 )
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> cacheLock( board->m_CachesMutex );
|
||||
std::unique_lock<std::shared_mutex> writeLock( board->m_CachesMutex );
|
||||
board->m_IntersectsAreaCache[ key ] = collides;
|
||||
}
|
||||
|
||||
|
@ -736,11 +751,12 @@ static void enclosedByAreaFunc( LIBEVAL::CONTEXT* aCtx, void* self )
|
|||
if( !aArea->GetBoundingBox().Intersects( itemBBox ) )
|
||||
return false;
|
||||
|
||||
std::unique_lock<std::shared_mutex> cacheLock( board->m_CachesMutex );
|
||||
PTR_PTR_LAYER_CACHE_KEY key = { aArea, item, layer };
|
||||
PTR_PTR_LAYER_CACHE_KEY key = { aArea, item, layer };
|
||||
|
||||
if( ( item->GetFlags() & ROUTER_TRANSIENT ) == 0 )
|
||||
{
|
||||
std::shared_lock<std::shared_mutex> readLock( board->m_CachesMutex );
|
||||
|
||||
auto i = board->m_EnclosedByAreaCache.find( key );
|
||||
|
||||
if( i != board->m_EnclosedByAreaCache.end() )
|
||||
|
@ -767,7 +783,10 @@ static void enclosedByAreaFunc( LIBEVAL::CONTEXT* aCtx, void* self )
|
|||
}
|
||||
|
||||
if( ( item->GetFlags() & ROUTER_TRANSIENT ) == 0 )
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> writeLock( board->m_CachesMutex );
|
||||
board->m_EnclosedByAreaCache[ key ] = enclosedByArea;
|
||||
}
|
||||
|
||||
return enclosedByArea;
|
||||
} ) )
|
||||
|
|
|
@ -345,25 +345,22 @@ const BOX2I ZONE::GetBoundingBox() const
|
|||
if( const BOARD* board = GetBoard() )
|
||||
{
|
||||
std::unordered_map<const ZONE*, BOX2I>& cache = board->m_ZoneBBoxCache;
|
||||
std::shared_lock<std::shared_mutex> readLock( const_cast<BOARD*>( board )->m_CachesMutex );
|
||||
auto cacheIter = cache.find( this );
|
||||
|
||||
if( cacheIter != cache.end() )
|
||||
return cacheIter->second;
|
||||
{
|
||||
std::shared_lock<std::shared_mutex> readLock( board->m_CachesMutex );
|
||||
|
||||
readLock.unlock();
|
||||
auto cacheIter = cache.find( this );
|
||||
|
||||
// if we get here we need an exclusive lock to cache the entry
|
||||
std::unique_lock<std::shared_mutex> writeLock( const_cast<BOARD*>( board )->m_CachesMutex );
|
||||
|
||||
// check again for the cached item; it might have been computed meanwhile by another thread
|
||||
cacheIter = cache.find( this );
|
||||
if( cacheIter != cache.end() )
|
||||
return cacheIter->second;
|
||||
if( cacheIter != cache.end() )
|
||||
return cacheIter->second;
|
||||
}
|
||||
|
||||
BOX2I bbox = m_Poly->BBox();
|
||||
|
||||
cache[ this ] = bbox;
|
||||
{
|
||||
std::unique_lock<std::shared_mutex> writeLock( board->m_CachesMutex );
|
||||
cache[ this ] = bbox;
|
||||
}
|
||||
|
||||
return bbox;
|
||||
}
|
||||
|
@ -374,18 +371,9 @@ const BOX2I ZONE::GetBoundingBox() const
|
|||
|
||||
void ZONE::CacheBoundingBox()
|
||||
{
|
||||
BOARD* board = GetBoard();
|
||||
std::unordered_map<const ZONE*, BOX2I>& cache = board->m_ZoneBBoxCache;
|
||||
std::shared_lock<std::shared_mutex> readLock( board->m_CachesMutex );
|
||||
auto cacheIter = cache.find( this );
|
||||
|
||||
if( cacheIter == cache.end() ) {
|
||||
readLock.unlock();
|
||||
std::unique_lock<std::shared_mutex> writeLock(board->m_CachesMutex);
|
||||
// check again in case another thread has already calculated the entry while we were waiting for the exclusive lock
|
||||
if ( cache.count( this ) == 0 )
|
||||
cache[this] = m_Poly->BBox();
|
||||
}
|
||||
// GetBoundingBox() will cache it for us, and there's no sense duplicating the somewhat tricky
|
||||
// locking code.
|
||||
GetBoundingBox();
|
||||
}
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue