ADDED: Threadpool
Thread pools are long-lasting executors that have close to zero overhead when launching new jobs. This is advantageous over creating new threads as we can use this for threading smalling jobs and smaller quanta. It also avoids the heuristics needed to determine the optimal number of threads to spawn
This commit is contained in:
parent
8418fe12d8
commit
03c279ffd4
|
@ -65,6 +65,16 @@ install( TARGETS lib_kicad
|
|||
)
|
||||
endif()
|
||||
|
||||
# Build a single library for the thread pool that we can link around
|
||||
|
||||
add_library( threadpool STATIC
|
||||
thread_pool.cpp
|
||||
)
|
||||
|
||||
target_include_directories( threadpool PRIVATE
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
|
||||
# The build version string defaults to the value in the KiCadVersion.cmake file.
|
||||
# If being built inside a git repository, the git tag and commit hash are used to create
|
||||
|
@ -433,7 +443,7 @@ set( COMMON_SRCS
|
|||
add_library( common STATIC
|
||||
${COMMON_SRCS}
|
||||
)
|
||||
|
||||
|
||||
add_dependencies( common version_header )
|
||||
add_dependencies( common compoundfilereader ) # used by altium_parser.cpp
|
||||
|
||||
|
@ -443,6 +453,7 @@ target_link_libraries( common
|
|||
kiplatform
|
||||
gal
|
||||
scripting
|
||||
threadpool
|
||||
pybind11::embed
|
||||
compoundfilereader
|
||||
${Boost_LIBRARIES}
|
||||
|
@ -464,6 +475,7 @@ target_include_directories( common
|
|||
PUBLIC
|
||||
.
|
||||
${CMAKE_BINARY_DIR}
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
# text markup support
|
||||
|
@ -559,11 +571,16 @@ set_source_files_properties( ${PCB_COMMON_SRCS} PROPERTIES
|
|||
|
||||
add_library( pcbcommon STATIC ${PCB_COMMON_SRCS} )
|
||||
|
||||
target_include_directories( pcbcommon PRIVATE
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
target_link_libraries( pcbcommon PUBLIC
|
||||
common
|
||||
delaunator
|
||||
kimath
|
||||
kiplatform
|
||||
threadpool
|
||||
)
|
||||
|
||||
if( KICAD_USE_3DCONNEXION )
|
||||
|
|
|
@ -152,63 +152,3 @@ FOOTPRINT_LIST* FOOTPRINT_LIST::GetInstance( KIWAY& aKiway )
|
|||
|
||||
return footprintInfo;
|
||||
}
|
||||
|
||||
|
||||
FOOTPRINT_ASYNC_LOADER::FOOTPRINT_ASYNC_LOADER() : m_list( nullptr )
|
||||
{
|
||||
m_total_libs = 0;
|
||||
}
|
||||
|
||||
|
||||
FOOTPRINT_ASYNC_LOADER::~FOOTPRINT_ASYNC_LOADER()
|
||||
{
|
||||
// This is NOP if the load has finished
|
||||
Abort();
|
||||
}
|
||||
|
||||
|
||||
void FOOTPRINT_ASYNC_LOADER::SetList( FOOTPRINT_LIST* aList )
|
||||
{
|
||||
m_list = aList;
|
||||
}
|
||||
|
||||
|
||||
void FOOTPRINT_ASYNC_LOADER::Start( FP_LIB_TABLE* aTable, wxString const* aNickname,
|
||||
unsigned aNThreads )
|
||||
{
|
||||
// Disable KIID generation: not needed for library parts; sometimes very slow
|
||||
KIID::CreateNilUuids( true );
|
||||
|
||||
// Capture the FP_LIB_TABLE into m_last_table. Formatting it as a string instead of storing the
|
||||
// raw data avoids having to pull in the FP-specific parts.
|
||||
STRING_FORMATTER sof;
|
||||
aTable->Format( &sof, 0 );
|
||||
m_last_table = sof.GetString();
|
||||
|
||||
m_list->startWorkers( aTable, aNickname, this, aNThreads );
|
||||
}
|
||||
|
||||
|
||||
bool FOOTPRINT_ASYNC_LOADER::Join()
|
||||
{
|
||||
if( m_list )
|
||||
{
|
||||
bool rv = m_list->joinWorkers();
|
||||
m_list = nullptr;
|
||||
return rv;
|
||||
}
|
||||
else
|
||||
return true;
|
||||
}
|
||||
|
||||
|
||||
void FOOTPRINT_ASYNC_LOADER::Abort()
|
||||
{
|
||||
if( m_list )
|
||||
{
|
||||
m_list->stopWorkers();
|
||||
m_list = nullptr;
|
||||
}
|
||||
|
||||
KIID::CreateNilUuids( false );
|
||||
}
|
||||
|
|
|
@ -53,14 +53,15 @@
|
|||
#include <kiplatform/policy.h>
|
||||
#include <lockfile.h>
|
||||
#include <menus_helpers.h>
|
||||
#include <paths.h>
|
||||
#include <pgm_base.h>
|
||||
#include <policy_keys.h>
|
||||
#include <python_scripting.h>
|
||||
#include <settings/common_settings.h>
|
||||
#include <settings/settings_manager.h>
|
||||
#include <systemdirsappend.h>
|
||||
#include <thread_pool.h>
|
||||
#include <trace_helpers.h>
|
||||
#include <paths.h>
|
||||
#include <policy_keys.h>
|
||||
|
||||
#ifdef KICAD_USE_SENTRY
|
||||
#include <boost/uuid/uuid_io.hpp>
|
||||
|
@ -392,6 +393,8 @@ bool PGM_BASE::InitPgm( bool aHeadless, bool aSkipPyInit )
|
|||
}
|
||||
#endif
|
||||
|
||||
GetKiCadThreadPool();
|
||||
|
||||
// Init KiCad environment
|
||||
// the environment variable KICAD (if exists) gives the kicad path:
|
||||
// something like set KICAD=d:\kicad
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
/*
|
||||
* This program source code file is part of KiCad, a free EDA CAD application.
|
||||
*
|
||||
* Copyright (C) 2022 KiCad Developers, see CHANGELOG.TXT for contributors.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 3
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you may find one here:
|
||||
* http://www.gnu.org/licenses/gpl-3.0.html
|
||||
* or you may search the http://www.gnu.org website for the version 3 license,
|
||||
* or you may write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
*/
|
||||
|
||||
|
||||
#include <thread_pool.h>
|
||||
|
||||
|
||||
thread_pool& GetKiCadThreadPool()
|
||||
{
|
||||
static thread_pool tp;
|
||||
return tp;
|
||||
}
|
||||
|
|
@ -83,6 +83,10 @@ target_link_libraries( cvpcb_kiface
|
|||
# Must follow github_plugin
|
||||
target_link_libraries( cvpcb_kiface ${Boost_LIBRARIES} )
|
||||
|
||||
target_include_directories( cvpcb_kiface PRIVATE
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
if( UNIX AND NOT APPLE )
|
||||
# -lrt must follow Boost
|
||||
target_link_libraries( cvpcb_kiface rt )
|
||||
|
|
|
@ -396,7 +396,7 @@ target_link_libraries( eeschema
|
|||
common
|
||||
${wxWidgets_LIBRARIES}
|
||||
)
|
||||
|
||||
|
||||
# the main Eeschema program, in DSO form.
|
||||
add_library( eeschema_kiface_objects OBJECT
|
||||
${EESCHEMA_SRCS}
|
||||
|
@ -406,12 +406,14 @@ add_library( eeschema_kiface_objects OBJECT
|
|||
target_include_directories( eeschema_kiface_objects
|
||||
PUBLIC
|
||||
.
|
||||
netlist_exporters )
|
||||
netlist_exporters
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
target_link_libraries( eeschema_kiface_objects
|
||||
PUBLIC
|
||||
common )
|
||||
|
||||
|
||||
# Since we're not using target_link_libraries, we need to explicitly
|
||||
# declare the dependency
|
||||
add_dependencies( eeschema_kiface_objects common )
|
||||
|
|
|
@ -21,7 +21,6 @@
|
|||
*/
|
||||
|
||||
#include <list>
|
||||
#include <thread>
|
||||
#include <future>
|
||||
#include <vector>
|
||||
#include <unordered_map>
|
||||
|
@ -44,6 +43,7 @@
|
|||
#include <connection_graph.h>
|
||||
#include <widgets/ui_common.h>
|
||||
#include <string_utils.h>
|
||||
#include <thread_pool.h>
|
||||
#include <wx/log.h>
|
||||
|
||||
#include <advanced_config.h> // for realtime connectivity switch in release builds
|
||||
|
@ -600,141 +600,125 @@ void CONNECTION_GRAPH::updateItemConnectivity( const SCH_SHEET_PATH& aSheet,
|
|||
// Pre-scan to see if we have a bus at this location
|
||||
SCH_LINE* busLine = aSheet.LastScreen()->GetBus( it.first );
|
||||
|
||||
// We don't want to spin up a new thread for fewer than 4 items (overhead costs)
|
||||
size_t parallelThreadCount = std::min<size_t>( std::thread::hardware_concurrency(),
|
||||
( connection_vec.size() + 3 ) / 4 );
|
||||
|
||||
std::atomic<size_t> nextItem( 0 );
|
||||
std::mutex update_mutex;
|
||||
std::vector<std::future<size_t>> returns( parallelThreadCount );
|
||||
|
||||
auto update_lambda = [&]() -> size_t
|
||||
auto update_lambda = [&]( SCH_ITEM* connected_item ) -> size_t
|
||||
{
|
||||
for( size_t ii = nextItem++; ii < connection_vec.size(); ii = nextItem++ )
|
||||
// Bus entries are special: they can have connection points in the
|
||||
// middle of a wire segment, because the junction algo doesn't split
|
||||
// the segment in two where you place a bus entry. This means that
|
||||
// bus entries that don't land on the end of a line segment need to
|
||||
// have "virtual" connection points to the segments they graphically
|
||||
// touch.
|
||||
if( connected_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
SCH_ITEM* connected_item = connection_vec[ii];
|
||||
// Bus entries are special: they can have connection points in the
|
||||
// middle of a wire segment, because the junction algo doesn't split
|
||||
// the segment in two where you place a bus entry. This means that
|
||||
// bus entries that don't land on the end of a line segment need to
|
||||
// have "virtual" connection points to the segments they graphically
|
||||
// touch.
|
||||
// If this location only has the connection point of the bus
|
||||
// entry itself, this means that either the bus entry is not
|
||||
// connected to anything graphically, or that it is connected to
|
||||
// a segment at some point other than at one of the endpoints.
|
||||
if( connection_vec.size() == 1 )
|
||||
{
|
||||
if( busLine )
|
||||
{
|
||||
auto bus_entry = static_cast<SCH_BUS_WIRE_ENTRY*>( connected_item );
|
||||
bus_entry->m_connected_bus_item = busLine;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Bus-to-bus entries are treated just like bus wires
|
||||
else if( connected_item->Type() == SCH_BUS_BUS_ENTRY_T )
|
||||
{
|
||||
if( connection_vec.size() < 2 )
|
||||
{
|
||||
if( busLine )
|
||||
{
|
||||
auto bus_entry = static_cast<SCH_BUS_BUS_ENTRY*>( connected_item );
|
||||
|
||||
if( it.first == bus_entry->GetPosition() )
|
||||
bus_entry->m_connected_bus_items[0] = busLine;
|
||||
else
|
||||
bus_entry->m_connected_bus_items[1] = busLine;
|
||||
|
||||
std::lock_guard<std::mutex> lock( update_mutex );
|
||||
bus_entry->AddConnectionTo( aSheet, busLine );
|
||||
busLine->AddConnectionTo( aSheet, bus_entry );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Change junctions to be on bus junction layer if they are touching a bus
|
||||
else if( connected_item->Type() == SCH_JUNCTION_T )
|
||||
{
|
||||
connected_item->SetLayer( busLine ? LAYER_BUS_JUNCTION : LAYER_JUNCTION );
|
||||
}
|
||||
|
||||
SCH_ITEM_SET& connected_set = connected_item->ConnectedItems( aSheet );
|
||||
connected_set.reserve( connection_vec.size() );
|
||||
|
||||
for( SCH_ITEM* test_item : connection_vec )
|
||||
{
|
||||
bool bus_connection_ok = true;
|
||||
|
||||
if( test_item == connected_item )
|
||||
continue;
|
||||
|
||||
// Set up the link between the bus entry net and the bus
|
||||
if( connected_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
// If this location only has the connection point of the bus
|
||||
// entry itself, this means that either the bus entry is not
|
||||
// connected to anything graphically, or that it is connected to
|
||||
// a segment at some point other than at one of the endpoints.
|
||||
if( connection_vec.size() == 1 )
|
||||
if( test_item->GetLayer() == LAYER_BUS )
|
||||
{
|
||||
if( busLine )
|
||||
{
|
||||
auto bus_entry = static_cast<SCH_BUS_WIRE_ENTRY*>( connected_item );
|
||||
bus_entry->m_connected_bus_item = busLine;
|
||||
}
|
||||
auto bus_entry = static_cast<SCH_BUS_WIRE_ENTRY*>( connected_item );
|
||||
bus_entry->m_connected_bus_item = test_item;
|
||||
}
|
||||
}
|
||||
|
||||
// Bus-to-bus entries are treated just like bus wires
|
||||
else if( connected_item->Type() == SCH_BUS_BUS_ENTRY_T )
|
||||
{
|
||||
if( connection_vec.size() < 2 )
|
||||
{
|
||||
if( busLine )
|
||||
{
|
||||
auto bus_entry = static_cast<SCH_BUS_BUS_ENTRY*>( connected_item );
|
||||
|
||||
if( it.first == bus_entry->GetPosition() )
|
||||
bus_entry->m_connected_bus_items[0] = busLine;
|
||||
else
|
||||
bus_entry->m_connected_bus_items[1] = busLine;
|
||||
|
||||
std::lock_guard<std::mutex> lock( update_mutex );
|
||||
bus_entry->AddConnectionTo( aSheet, busLine );
|
||||
busLine->AddConnectionTo( aSheet, bus_entry );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Change junctions to be on bus junction layer if they are touching a bus
|
||||
else if( connected_item->Type() == SCH_JUNCTION_T )
|
||||
{
|
||||
connected_item->SetLayer( busLine ? LAYER_BUS_JUNCTION : LAYER_JUNCTION );
|
||||
}
|
||||
|
||||
SCH_ITEM_SET& connected_set = connected_item->ConnectedItems( aSheet );
|
||||
connected_set.reserve( connection_vec.size() );
|
||||
|
||||
for( SCH_ITEM* test_item : connection_vec )
|
||||
{
|
||||
bool bus_connection_ok = true;
|
||||
|
||||
if( test_item == connected_item )
|
||||
continue;
|
||||
|
||||
// Set up the link between the bus entry net and the bus
|
||||
if( connected_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
if( test_item->GetLayer() == LAYER_BUS )
|
||||
{
|
||||
auto bus_entry = static_cast<SCH_BUS_WIRE_ENTRY*>( connected_item );
|
||||
bus_entry->m_connected_bus_item = test_item;
|
||||
}
|
||||
}
|
||||
|
||||
// Bus entries only connect to bus lines on the end that is touching a bus line.
|
||||
// If the user has overlapped another net line with the endpoint of the bus entry
|
||||
// where the entry connects to a bus, we don't want to short-circuit it.
|
||||
if( connected_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
bus_connection_ok = !busLine || test_item->GetLayer() == LAYER_BUS;
|
||||
}
|
||||
else if( test_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
bus_connection_ok = !busLine || connected_item->GetLayer() == LAYER_BUS;
|
||||
}
|
||||
|
||||
if( connected_item->ConnectionPropagatesTo( test_item ) &&
|
||||
test_item->ConnectionPropagatesTo( connected_item ) &&
|
||||
bus_connection_ok )
|
||||
{
|
||||
connected_set.push_back( test_item );
|
||||
}
|
||||
}
|
||||
|
||||
// If we got this far and did not find a connected bus item for a bus entry,
|
||||
// we should do a manual scan in case there is a bus item on this connection
|
||||
// point but we didn't pick it up earlier because there is *also* a net item here.
|
||||
// Bus entries only connect to bus lines on the end that is touching a bus line.
|
||||
// If the user has overlapped another net line with the endpoint of the bus entry
|
||||
// where the entry connects to a bus, we don't want to short-circuit it.
|
||||
if( connected_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
auto bus_entry = static_cast<SCH_BUS_WIRE_ENTRY*>( connected_item );
|
||||
bus_connection_ok = !busLine || test_item->GetLayer() == LAYER_BUS;
|
||||
}
|
||||
else if( test_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
bus_connection_ok = !busLine || connected_item->GetLayer() == LAYER_BUS;
|
||||
}
|
||||
|
||||
if( !bus_entry->m_connected_bus_item )
|
||||
{
|
||||
SCH_SCREEN* screen = aSheet.LastScreen();
|
||||
SCH_LINE* bus = screen->GetBus( it.first );
|
||||
if( connected_item->ConnectionPropagatesTo( test_item ) &&
|
||||
test_item->ConnectionPropagatesTo( connected_item ) &&
|
||||
bus_connection_ok )
|
||||
{
|
||||
connected_set.push_back( test_item );
|
||||
}
|
||||
}
|
||||
|
||||
if( bus )
|
||||
bus_entry->m_connected_bus_item = bus;
|
||||
}
|
||||
// If we got this far and did not find a connected bus item for a bus entry,
|
||||
// we should do a manual scan in case there is a bus item on this connection
|
||||
// point but we didn't pick it up earlier because there is *also* a net item here.
|
||||
if( connected_item->Type() == SCH_BUS_WIRE_ENTRY_T )
|
||||
{
|
||||
auto bus_entry = static_cast<SCH_BUS_WIRE_ENTRY*>( connected_item );
|
||||
|
||||
if( !bus_entry->m_connected_bus_item )
|
||||
{
|
||||
SCH_SCREEN* screen = aSheet.LastScreen();
|
||||
SCH_LINE* bus = screen->GetBus( it.first );
|
||||
|
||||
if( bus )
|
||||
bus_entry->m_connected_bus_item = bus;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
|
||||
if( parallelThreadCount == 1 )
|
||||
update_lambda();
|
||||
else
|
||||
{
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii] = std::async( std::launch::async, update_lambda );
|
||||
|
||||
// Finalize the threads
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii].wait();
|
||||
}
|
||||
GetKiCadThreadPool().parallelize_loop( 0, connection_vec.size(),
|
||||
[&]( const int a, const int b)
|
||||
{
|
||||
for( int ii = a; ii < b; ++ii )
|
||||
update_lambda( connection_vec[ii] );
|
||||
}).wait();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -832,13 +816,6 @@ void CONNECTION_GRAPH::buildItemSubGraphs()
|
|||
void CONNECTION_GRAPH::resolveAllDrivers()
|
||||
{
|
||||
// Resolve drivers for subgraphs and propagate connectivity info
|
||||
|
||||
// We don't want to spin up a new thread for fewer than 8 nets (overhead costs)
|
||||
size_t parallelThreadCount = std::min<size_t>( std::thread::hardware_concurrency(),
|
||||
( m_subgraphs.size() + 3 ) / 4 );
|
||||
|
||||
std::atomic<size_t> nextSubgraph( 0 );
|
||||
std::vector<std::future<size_t>> returns( parallelThreadCount );
|
||||
std::vector<CONNECTION_SUBGRAPH*> dirty_graphs;
|
||||
|
||||
std::copy_if( m_subgraphs.begin(), m_subgraphs.end(), std::back_inserter( dirty_graphs ),
|
||||
|
@ -847,61 +824,53 @@ void CONNECTION_GRAPH::resolveAllDrivers()
|
|||
return candidate->m_dirty;
|
||||
} );
|
||||
|
||||
auto update_lambda = [&nextSubgraph, &dirty_graphs]() -> size_t
|
||||
std::vector<std::future<size_t>> returns( dirty_graphs.size() );
|
||||
|
||||
auto update_lambda = []( CONNECTION_SUBGRAPH* subgraph ) -> size_t
|
||||
{
|
||||
for( size_t subgraphId = nextSubgraph++; subgraphId < dirty_graphs.size(); subgraphId = nextSubgraph++ )
|
||||
if( !subgraph->m_dirty )
|
||||
return 0;
|
||||
|
||||
// Special processing for some items
|
||||
for( auto item : subgraph->m_items )
|
||||
{
|
||||
auto subgraph = dirty_graphs[subgraphId];
|
||||
|
||||
if( !subgraph->m_dirty )
|
||||
continue;
|
||||
|
||||
// Special processing for some items
|
||||
for( auto item : subgraph->m_items )
|
||||
switch( item->Type() )
|
||||
{
|
||||
switch( item->Type() )
|
||||
{
|
||||
case SCH_NO_CONNECT_T:
|
||||
case SCH_NO_CONNECT_T:
|
||||
subgraph->m_no_connect = item;
|
||||
break;
|
||||
|
||||
case SCH_BUS_WIRE_ENTRY_T:
|
||||
subgraph->m_bus_entry = item;
|
||||
break;
|
||||
|
||||
case SCH_PIN_T:
|
||||
{
|
||||
auto pin = static_cast<SCH_PIN*>( item );
|
||||
|
||||
if( pin->GetType() == ELECTRICAL_PINTYPE::PT_NC )
|
||||
subgraph->m_no_connect = item;
|
||||
break;
|
||||
|
||||
case SCH_BUS_WIRE_ENTRY_T:
|
||||
subgraph->m_bus_entry = item;
|
||||
break;
|
||||
|
||||
case SCH_PIN_T:
|
||||
{
|
||||
auto pin = static_cast<SCH_PIN*>( item );
|
||||
|
||||
if( pin->GetType() == ELECTRICAL_PINTYPE::PT_NC )
|
||||
subgraph->m_no_connect = item;
|
||||
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
subgraph->ResolveDrivers( true );
|
||||
subgraph->m_dirty = false;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
subgraph->ResolveDrivers( true );
|
||||
subgraph->m_dirty = false;
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
if( parallelThreadCount == 1 )
|
||||
update_lambda();
|
||||
else
|
||||
{
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii] = std::async( std::launch::async, update_lambda );
|
||||
|
||||
// Finalize the threads
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii].wait();
|
||||
}
|
||||
GetKiCadThreadPool().parallelize_loop( 0, dirty_graphs.size(),
|
||||
[&]( const int a, const int b)
|
||||
{
|
||||
for( int ii = a; ii < b; ++ii )
|
||||
update_lambda( dirty_graphs[ii] );
|
||||
}).wait();
|
||||
|
||||
// Now discard any non-driven subgraphs from further consideration
|
||||
|
||||
|
@ -1434,39 +1403,12 @@ void CONNECTION_GRAPH::buildConnectionGraph()
|
|||
for( CONNECTION_SUBGRAPH* subgraph : m_driver_subgraphs )
|
||||
m_sheet_to_subgraphs_map[ subgraph->m_sheet ].emplace_back( subgraph );
|
||||
|
||||
// Update item connections at this point so that neighbor propagation works
|
||||
std::atomic<size_t> nextSubgraph( 0 );
|
||||
|
||||
// We don't want to spin up a new thread for fewer than 8 nets (overhead costs)
|
||||
size_t parallelThreadCount = std::min<size_t>( std::thread::hardware_concurrency(),
|
||||
( m_subgraphs.size() + 3 ) / 4 );
|
||||
|
||||
std::vector<std::future<size_t>> returns( parallelThreadCount );
|
||||
|
||||
auto preliminaryUpdateTask =
|
||||
[&]() -> size_t
|
||||
GetKiCadThreadPool().parallelize_loop( 0, m_driver_subgraphs.size(),
|
||||
[&]( const int a, const int b)
|
||||
{
|
||||
for( size_t subgraphId = nextSubgraph++;
|
||||
subgraphId < m_driver_subgraphs.size();
|
||||
subgraphId = nextSubgraph++ )
|
||||
{
|
||||
m_driver_subgraphs[subgraphId]->UpdateItemConnections();
|
||||
}
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
if( parallelThreadCount == 1 )
|
||||
preliminaryUpdateTask();
|
||||
else
|
||||
{
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii] = std::async( std::launch::async, preliminaryUpdateTask );
|
||||
|
||||
// Finalize the threads
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii].wait();
|
||||
}
|
||||
for( int ii = a; ii < b; ++ii )
|
||||
m_driver_subgraphs[ii]->UpdateItemConnections();
|
||||
}).wait();
|
||||
|
||||
// Next time through the subgraphs, we do some post-processing to handle things like
|
||||
// connecting bus members to their neighboring subgraphs, and then propagate connections
|
||||
|
@ -1595,84 +1537,71 @@ void CONNECTION_GRAPH::buildConnectionGraph()
|
|||
}
|
||||
}
|
||||
|
||||
nextSubgraph.store( 0 );
|
||||
|
||||
auto updateItemConnectionsTask =
|
||||
[&]() -> size_t
|
||||
[&]( CONNECTION_SUBGRAPH* subgraph ) -> size_t
|
||||
{
|
||||
for( size_t subgraphId = nextSubgraph++;
|
||||
subgraphId < m_driver_subgraphs.size();
|
||||
subgraphId = nextSubgraph++ )
|
||||
// Make sure weakly-driven single-pin nets get the unconnected_ prefix
|
||||
if( !subgraph->m_strong_driver && subgraph->m_drivers.size() == 1 &&
|
||||
subgraph->m_driver->Type() == SCH_PIN_T )
|
||||
{
|
||||
CONNECTION_SUBGRAPH* subgraph = m_driver_subgraphs[subgraphId];
|
||||
SCH_PIN* pin = static_cast<SCH_PIN*>( subgraph->m_driver );
|
||||
wxString name = pin->GetDefaultNetName( subgraph->m_sheet, true );
|
||||
|
||||
// Make sure weakly-driven single-pin nets get the unconnected_ prefix
|
||||
if( !subgraph->m_strong_driver && subgraph->m_drivers.size() == 1 &&
|
||||
subgraph->m_driver->Type() == SCH_PIN_T )
|
||||
subgraph->m_driver_connection->ConfigureFromLabel( name );
|
||||
}
|
||||
|
||||
subgraph->m_dirty = false;
|
||||
subgraph->UpdateItemConnections();
|
||||
|
||||
// No other processing to do on buses
|
||||
if( subgraph->m_driver_connection->IsBus() )
|
||||
return 0;
|
||||
|
||||
// As a visual aid, we can check sheet pins that are driven by themselves to see
|
||||
// if they should be promoted to buses
|
||||
|
||||
if( subgraph->m_driver->Type() == SCH_SHEET_PIN_T )
|
||||
{
|
||||
SCH_SHEET_PIN* pin = static_cast<SCH_SHEET_PIN*>( subgraph->m_driver );
|
||||
|
||||
if( SCH_SHEET* sheet = pin->GetParent() )
|
||||
{
|
||||
SCH_PIN* pin = static_cast<SCH_PIN*>( subgraph->m_driver );
|
||||
wxString name = pin->GetDefaultNetName( subgraph->m_sheet, true );
|
||||
wxString pinText = pin->GetText();
|
||||
SCH_SCREEN* screen = sheet->GetScreen();
|
||||
|
||||
subgraph->m_driver_connection->ConfigureFromLabel( name );
|
||||
}
|
||||
|
||||
subgraph->m_dirty = false;
|
||||
subgraph->UpdateItemConnections();
|
||||
|
||||
// No other processing to do on buses
|
||||
if( subgraph->m_driver_connection->IsBus() )
|
||||
continue;
|
||||
|
||||
// As a visual aid, we can check sheet pins that are driven by themselves to see
|
||||
// if they should be promoted to buses
|
||||
|
||||
if( subgraph->m_driver->Type() == SCH_SHEET_PIN_T )
|
||||
{
|
||||
SCH_SHEET_PIN* pin = static_cast<SCH_SHEET_PIN*>( subgraph->m_driver );
|
||||
|
||||
if( SCH_SHEET* sheet = pin->GetParent() )
|
||||
for( SCH_ITEM* item : screen->Items().OfType( SCH_HIER_LABEL_T ) )
|
||||
{
|
||||
wxString pinText = pin->GetText();
|
||||
SCH_SCREEN* screen = sheet->GetScreen();
|
||||
SCH_HIERLABEL* label = static_cast<SCH_HIERLABEL*>( item );
|
||||
|
||||
for( SCH_ITEM* item : screen->Items().OfType( SCH_HIER_LABEL_T ) )
|
||||
if( label->GetText() == pinText )
|
||||
{
|
||||
SCH_HIERLABEL* label = static_cast<SCH_HIERLABEL*>( item );
|
||||
SCH_SHEET_PATH path = subgraph->m_sheet;
|
||||
path.push_back( sheet );
|
||||
|
||||
if( label->GetText() == pinText )
|
||||
{
|
||||
SCH_SHEET_PATH path = subgraph->m_sheet;
|
||||
path.push_back( sheet );
|
||||
SCH_CONNECTION* parent_conn = label->Connection( &path );
|
||||
|
||||
SCH_CONNECTION* parent_conn = label->Connection( &path );
|
||||
if( parent_conn && parent_conn->IsBus() )
|
||||
subgraph->m_driver_connection->SetType( CONNECTION_TYPE::BUS );
|
||||
|
||||
if( parent_conn && parent_conn->IsBus() )
|
||||
subgraph->m_driver_connection->SetType( CONNECTION_TYPE::BUS );
|
||||
|
||||
break;
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
if( subgraph->m_driver_connection->IsBus() )
|
||||
continue;
|
||||
}
|
||||
|
||||
if( subgraph->m_driver_connection->IsBus() )
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
if( parallelThreadCount == 1 )
|
||||
updateItemConnectionsTask();
|
||||
else
|
||||
{
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii] = std::async( std::launch::async, updateItemConnectionsTask );
|
||||
|
||||
// Finalize the threads
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii].wait();
|
||||
}
|
||||
GetKiCadThreadPool().parallelize_loop( 0, m_driver_subgraphs.size(),
|
||||
[&]( const int a, const int b)
|
||||
{
|
||||
for( int ii = a; ii < b; ++ii )
|
||||
updateItemConnectionsTask( m_driver_subgraphs[ii] );
|
||||
}).wait();
|
||||
|
||||
m_net_code_to_subgraphs_map.clear();
|
||||
m_net_name_to_subgraphs_map.clear();
|
||||
|
|
|
@ -120,6 +120,10 @@ target_link_libraries( gerbview
|
|||
${wxWidgets_LIBRARIES}
|
||||
)
|
||||
|
||||
target_include_directories( gerbview PRIVATE
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
if( MAKE_LINK_MAPS )
|
||||
set_target_properties( gerbview PROPERTIES
|
||||
LINK_FLAGS "-Wl,-cref,-Map=gerbview.map" )
|
||||
|
|
|
@ -43,7 +43,6 @@
|
|||
class FP_LIB_TABLE;
|
||||
class FOOTPRINT_LIST;
|
||||
class FOOTPRINT_LIST_IMPL;
|
||||
class FOOTPRINT_ASYNC_LOADER;
|
||||
class PROGRESS_REPORTER;
|
||||
class wxTopLevelWindow;
|
||||
class KIWAY;
|
||||
|
@ -268,27 +267,6 @@ public:
|
|||
*/
|
||||
static FOOTPRINT_LIST* GetInstance( KIWAY& aKiway );
|
||||
|
||||
protected:
|
||||
/**
|
||||
* Launch worker threads to load footprints. Part of the #FOOTPRINT_ASYNC_LOADER
|
||||
* implementation.
|
||||
*/
|
||||
virtual void startWorkers( FP_LIB_TABLE* aTable, const wxString* aNickname,
|
||||
FOOTPRINT_ASYNC_LOADER* aLoader, unsigned aNThreads ) = 0;
|
||||
|
||||
/**
|
||||
* Join worker threads. Part of the FOOTPRINT_ASYNC_LOADER implementation.
|
||||
*/
|
||||
virtual bool joinWorkers() = 0;
|
||||
|
||||
/**
|
||||
* Stop worker threads. Part of the FOOTPRINT_ASYNC_LOADER implementation.
|
||||
*/
|
||||
virtual void stopWorkers() = 0;
|
||||
|
||||
private:
|
||||
friend class FOOTPRINT_ASYNC_LOADER;
|
||||
|
||||
protected:
|
||||
FP_LIB_TABLE* m_lib_table; ///< no ownership
|
||||
|
||||
|
@ -297,75 +275,5 @@ protected:
|
|||
};
|
||||
|
||||
|
||||
/**
|
||||
* Object used to populate a #FOOTPRINT_LIST asynchronously.
|
||||
*
|
||||
* Construct one, calling #Start(), and then waiting until it reports completion. This is
|
||||
* equivalent to calling #FOOTPRINT_LIST::ReadFootprintFiles().
|
||||
*/
|
||||
class APIEXPORT FOOTPRINT_ASYNC_LOADER
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* Construct an asynchronous loader.
|
||||
*/
|
||||
FOOTPRINT_ASYNC_LOADER();
|
||||
|
||||
~FOOTPRINT_ASYNC_LOADER();
|
||||
|
||||
/**
|
||||
* Assign a FOOTPRINT_LIST to the loader. This does not take ownership of
|
||||
* the list.
|
||||
*/
|
||||
void SetList( FOOTPRINT_LIST* aList );
|
||||
|
||||
/**
|
||||
* Launch the worker threads.
|
||||
*
|
||||
* @param aTable defines all the libraries.
|
||||
* @param aNickname is the library to read from, or if NULL means read all footprints from
|
||||
* all known libraries in \a aTable.
|
||||
* @param aNThreads is the number of worker threads.
|
||||
*/
|
||||
void Start( FP_LIB_TABLE* aTable, const wxString* aNickname = nullptr,
|
||||
unsigned aNThreads = DEFAULT_THREADS );
|
||||
|
||||
/**
|
||||
* Wait until the worker threads are finished, and then perform any required
|
||||
* single-threaded finishing on the list. This must be called before using
|
||||
* the list, even if the completion callback was used!
|
||||
*
|
||||
* It is safe to call this method from a thread, but it is not safe to use
|
||||
* the list from ANY thread until it completes. It is recommended to call
|
||||
* this from the main thread because of this.
|
||||
*
|
||||
* It is safe to call this multiple times, but after the first it will
|
||||
* always return true.
|
||||
*
|
||||
* @return true if no errors occurred
|
||||
*/
|
||||
bool Join();
|
||||
|
||||
/**
|
||||
* Safely stop the current process.
|
||||
*/
|
||||
void Abort();
|
||||
|
||||
private:
|
||||
/**
|
||||
* Default number of worker threads. Determined empirically (by dickelbeck):
|
||||
* More than 6 is not significantly faster, less than 6 is likely slower.
|
||||
*/
|
||||
static constexpr unsigned DEFAULT_THREADS = 6;
|
||||
|
||||
friend class FOOTPRINT_LIST;
|
||||
friend class FOOTPRINT_LIST_IMPL;
|
||||
|
||||
FOOTPRINT_LIST* m_list;
|
||||
std::string m_last_table;
|
||||
|
||||
int m_total_libs;
|
||||
};
|
||||
|
||||
|
||||
#endif // FOOTPRINT_INFO_H_
|
||||
|
|
|
@ -176,4 +176,21 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* RAII class to safely set/reset nil KIIDs for use in footprint/symbol loading
|
||||
*/
|
||||
class KIID_NIL_SET_RESET
|
||||
{
|
||||
public:
|
||||
KIID_NIL_SET_RESET()
|
||||
{
|
||||
KIID::CreateNilUuids( true );
|
||||
};
|
||||
|
||||
~KIID_NIL_SET_RESET()
|
||||
{
|
||||
KIID::CreateNilUuids( false );
|
||||
}
|
||||
};
|
||||
|
||||
#endif // KIID_H
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
/*
|
||||
* This program source code file is part of KiCad, a free EDA CAD application.
|
||||
*
|
||||
* Copyright (C) 2022 KiCad Developers, see CHANGELOG.TXT for contributors.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU General Public License
|
||||
* as published by the Free Software Foundation; either version 3
|
||||
* of the License, or (at your option) any later version.
|
||||
*
|
||||
* This program is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
* GNU General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU General Public License
|
||||
* along with this program; if not, you may find one here:
|
||||
* http://www.gnu.org/licenses/gpl-3.0.html
|
||||
* or you may search the http://www.gnu.org website for the version 3 license,
|
||||
* or you may write to the Free Software Foundation, Inc.,
|
||||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
*/
|
||||
|
||||
#pragma once
|
||||
#ifndef INCLUDE_THREAD_POOL_H_
|
||||
#define INCLUDE_THREAD_POOL_H_
|
||||
|
||||
#include <bs_thread_pool.hpp>
|
||||
|
||||
using thread_pool = BS::thread_pool;
|
||||
|
||||
/**
|
||||
* Get a reference to the current thread pool. N.B., you cannot copy the thread pool
|
||||
* so if you accidentally write thread_pool tp = GetKiCadThreadPool(), you will break
|
||||
* your compilation
|
||||
*
|
||||
* @return Reference to the current (potentially newly constructed) thread pool
|
||||
*/
|
||||
thread_pool& GetKiCadThreadPool();
|
||||
|
||||
|
||||
#endif /* INCLUDE_THREAD_POOL_H_ */
|
|
@ -94,6 +94,10 @@ endif()
|
|||
|
||||
target_link_libraries( kicad pcm )
|
||||
|
||||
target_include_directories( kicad PRIVATE
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
install( TARGETS kicad
|
||||
DESTINATION ${KICAD_BIN}
|
||||
COMPONENT binary
|
||||
|
|
|
@ -116,6 +116,11 @@ set_target_properties( pl_editor_kiface PROPERTIES
|
|||
PREFIX ${KIFACE_PREFIX}
|
||||
SUFFIX ${KIFACE_SUFFIX}
|
||||
)
|
||||
|
||||
target_include_directories( pl_editor_kiface PRIVATE
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
set_source_files_properties( pl_editor.cpp PROPERTIES
|
||||
# The KIFACE is in pcbnew.cpp, export it:
|
||||
COMPILE_DEFINITIONS "BUILD_KIWAY_DLL;COMPILING_DLL"
|
||||
|
|
|
@ -666,6 +666,10 @@ target_link_libraries( pcbnew_kiface_objects
|
|||
nlohmann_json
|
||||
)
|
||||
|
||||
target_include_directories( pcbnew_kiface_objects PRIVATE
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
)
|
||||
|
||||
if( KICAD_STEP_EXPORT_LIB )
|
||||
add_definitions( -DKICAD_STEP_EXPORT_LIB )
|
||||
endif()
|
||||
|
@ -720,7 +724,7 @@ target_link_libraries( pcbnew_kiface
|
|||
PRIVATE
|
||||
${PCBNEW_KIFACE_LIBRARIES}
|
||||
)
|
||||
|
||||
|
||||
set_source_files_properties( pcbnew.cpp PROPERTIES
|
||||
# The KIFACE is in pcbnew.cpp, export it:
|
||||
COMPILE_DEFINITIONS "BUILD_KIWAY_DLL;COMPILING_DLL"
|
||||
|
|
|
@ -26,37 +26,39 @@
|
|||
*/
|
||||
|
||||
#include <iterator>
|
||||
#include <thread>
|
||||
|
||||
#include <wx/log.h>
|
||||
|
||||
#include <drc/drc_rtree.h>
|
||||
#include <pcb_base_frame.h>
|
||||
#include <board_design_settings.h>
|
||||
#include <reporter.h>
|
||||
#include <board_commit.h>
|
||||
#include <board.h>
|
||||
#include <core/arraydim.h>
|
||||
#include <core/kicad_algo.h>
|
||||
#include <connectivity/connectivity_data.h>
|
||||
#include <convert_shape_list_to_polygon.h>
|
||||
#include <footprint.h>
|
||||
#include <pcb_base_frame.h>
|
||||
#include <pcb_track.h>
|
||||
#include <zone.h>
|
||||
#include <pcb_marker.h>
|
||||
#include <pcb_group.h>
|
||||
#include <pcb_target.h>
|
||||
#include <pcb_shape.h>
|
||||
#include <pcb_text.h>
|
||||
#include <pcb_textbox.h>
|
||||
#include <core/arraydim.h>
|
||||
#include <core/kicad_algo.h>
|
||||
#include <connectivity/connectivity_data.h>
|
||||
#include <string_utils.h>
|
||||
#include <pgm_base.h>
|
||||
#include <pcbnew_settings.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <project.h>
|
||||
#include <project/net_settings.h>
|
||||
#include <project/project_file.h>
|
||||
#include <project/project_local_settings.h>
|
||||
#include <ratsnest/ratsnest_data.h>
|
||||
#include <reporter.h>
|
||||
#include <tool/selection_conditions.h>
|
||||
#include <convert_shape_list_to_polygon.h>
|
||||
#include <wx/log.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <string_utils.h>
|
||||
#include <thread_pool.h>
|
||||
#include <zone.h>
|
||||
|
||||
// This is an odd place for this, but CvPcb won't link if it's in board_item.cpp like I first
|
||||
// tried it.
|
||||
|
@ -639,40 +641,40 @@ void BOARD::CacheTriangulation( PROGRESS_REPORTER* aReporter, const std::vector<
|
|||
if( aReporter )
|
||||
aReporter->Report( _( "Tessellating copper zones..." ) );
|
||||
|
||||
std::atomic<size_t> next( 0 );
|
||||
std::atomic<size_t> zones_done( 0 );
|
||||
std::atomic<size_t> threads_done( 0 );
|
||||
size_t parallelThreadCount = std::max<size_t>( std::thread::hardware_concurrency(), 2 );
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
std::vector<std::future<size_t>> returns;
|
||||
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
{
|
||||
std::thread t = std::thread(
|
||||
[ &zones, &zones_done, &threads_done, &next ]( )
|
||||
{
|
||||
for( size_t i = next.fetch_add( 1 ); i < zones.size(); i = next.fetch_add( 1 ) )
|
||||
{
|
||||
zones[i]->CacheTriangulation();
|
||||
zones_done.fetch_add( 1 );
|
||||
}
|
||||
returns.reserve( zones.size() );
|
||||
|
||||
threads_done.fetch_add( 1 );
|
||||
} );
|
||||
auto cache_zones = [aReporter]( ZONE* aZone ) -> size_t
|
||||
{
|
||||
if( aReporter && aReporter->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
t.detach();
|
||||
}
|
||||
aZone->CacheTriangulation();
|
||||
|
||||
if( aReporter )
|
||||
aReporter->AdvanceProgress();
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
for( ZONE* zone : zones )
|
||||
returns.emplace_back( tp.submit( cache_zones, zone ) );
|
||||
|
||||
// Finalize the triangulation threads
|
||||
while( threads_done < parallelThreadCount )
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
if( aReporter )
|
||||
std::future_status status;
|
||||
|
||||
do
|
||||
{
|
||||
aReporter->SetCurrentProgress( (double) zones_done / (double) zones.size() );
|
||||
aReporter->KeepRefreshing();
|
||||
}
|
||||
if( aReporter )
|
||||
aReporter->KeepRefreshing();
|
||||
|
||||
std::this_thread::sleep_for( std::chrono::milliseconds( 100 ) );
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ include_directories(
|
|||
./
|
||||
../
|
||||
../../include
|
||||
$<TARGET_PROPERTY:thread-pool,INTERFACE_INCLUDE_DIRECTORIES>
|
||||
${INC_AFTER}
|
||||
)
|
||||
|
||||
|
@ -18,4 +19,5 @@ add_library( connectivity STATIC ${PCBNEW_CONN_SRCS} )
|
|||
|
||||
target_link_libraries( connectivity PRIVATE
|
||||
common
|
||||
threadpool
|
||||
)
|
|
@ -24,18 +24,19 @@
|
|||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
*/
|
||||
|
||||
|
||||
#include <algorithm>
|
||||
#include <future>
|
||||
#include <mutex>
|
||||
|
||||
#include <connectivity/connectivity_algo.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <geometry/geometry_utils.h>
|
||||
#include <board_commit.h>
|
||||
#include <thread_pool.h>
|
||||
|
||||
#include <wx/log.h>
|
||||
|
||||
#include <thread>
|
||||
#include <mutex>
|
||||
#include <algorithm>
|
||||
#include <future>
|
||||
|
||||
#ifdef PROFILE
|
||||
#include <profile.h>
|
||||
#endif
|
||||
|
@ -221,6 +222,7 @@ void CN_CONNECTIVITY_ALGO::searchConnections()
|
|||
PROF_COUNTER search_basic( "search-basic" );
|
||||
#endif
|
||||
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
std::vector<CN_ITEM*> dirtyItems;
|
||||
std::copy_if( m_itemList.begin(), m_itemList.end(), std::back_inserter( dirtyItems ),
|
||||
[] ( CN_ITEM* aItem )
|
||||
|
@ -238,57 +240,39 @@ void CN_CONNECTIVITY_ALGO::searchConnections()
|
|||
|
||||
if( m_itemList.IsDirty() )
|
||||
{
|
||||
size_t parallelThreadCount = std::min<size_t>( std::thread::hardware_concurrency(),
|
||||
( dirtyItems.size() + 7 ) / 8 );
|
||||
|
||||
std::atomic<size_t> nextItem( 0 );
|
||||
std::vector<std::future<size_t>> returns( parallelThreadCount );
|
||||
std::vector<std::future<size_t>> returns( dirtyItems.size() );
|
||||
|
||||
auto conn_lambda =
|
||||
[&nextItem, &dirtyItems]( CN_LIST* aItemList,
|
||||
[&dirtyItems]( size_t aItem, CN_LIST* aItemList,
|
||||
PROGRESS_REPORTER* aReporter) -> size_t
|
||||
{
|
||||
for( size_t i = nextItem++; i < dirtyItems.size(); i = nextItem++ )
|
||||
{
|
||||
CN_VISITOR visitor( dirtyItems[i] );
|
||||
aItemList->FindNearby( dirtyItems[i], visitor );
|
||||
if( aReporter && aReporter->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
if( aReporter )
|
||||
{
|
||||
if( aReporter->IsCancelled() )
|
||||
break;
|
||||
else
|
||||
aReporter->AdvanceProgress();
|
||||
}
|
||||
}
|
||||
CN_VISITOR visitor( dirtyItems[aItem] );
|
||||
aItemList->FindNearby( dirtyItems[aItem], visitor );
|
||||
|
||||
if( aReporter )
|
||||
aReporter->AdvanceProgress();
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
if( parallelThreadCount <= 1 )
|
||||
{
|
||||
conn_lambda( &m_itemList, m_progressReporter );
|
||||
}
|
||||
else
|
||||
{
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
{
|
||||
returns[ii] = std::async( std::launch::async, conn_lambda, &m_itemList,
|
||||
m_progressReporter );
|
||||
}
|
||||
for( size_t ii = 0; ii < dirtyItems.size(); ++ii )
|
||||
returns[ii] = tp.submit( conn_lambda, ii, &m_itemList, m_progressReporter );
|
||||
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
// Here we balance returns with a 100ms timeout to allow UI updating
|
||||
std::future_status status;
|
||||
do
|
||||
{
|
||||
// Here we balance returns with a 100ms timeout to allow UI updating
|
||||
std::future_status status;
|
||||
do
|
||||
{
|
||||
if( m_progressReporter )
|
||||
m_progressReporter->KeepRefreshing();
|
||||
if( m_progressReporter )
|
||||
m_progressReporter->KeepRefreshing();
|
||||
|
||||
status = returns[ii].wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
|
||||
if( m_progressReporter )
|
||||
|
@ -470,37 +454,37 @@ void CN_CONNECTIVITY_ALGO::Build( BOARD* aBoard, PROGRESS_REPORTER* aReporter )
|
|||
|
||||
// Generate RTrees for CN_ZONE_LAYER items (in parallel)
|
||||
//
|
||||
std::atomic<size_t> next( 0 );
|
||||
std::atomic<size_t> zitems_done( 0 );
|
||||
std::atomic<size_t> threads_done( 0 );
|
||||
size_t parallelThreadCount = std::max<size_t>( std::thread::hardware_concurrency(), 2 );
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
std::vector<std::future<size_t>> returns( zitems.size() );
|
||||
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
auto cache_zones = [aReporter]( CN_ZONE_LAYER* aZone ) -> size_t
|
||||
{
|
||||
if( aReporter && aReporter->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
aZone->BuildRTree();
|
||||
|
||||
if( aReporter )
|
||||
aReporter->AdvanceProgress();
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
for( size_t ii = 0; ii < zitems.size(); ++ii )
|
||||
returns[ii] = tp.submit( cache_zones, zitems[ii] );
|
||||
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
std::thread t = std::thread(
|
||||
[ &zitems, &zitems_done, &threads_done, &next ]( )
|
||||
{
|
||||
for( size_t i = next.fetch_add( 1 ); i < zitems.size(); i = next.fetch_add( 1 ) )
|
||||
{
|
||||
zitems[i]->BuildRTree();
|
||||
zitems_done.fetch_add( 1 );
|
||||
}
|
||||
std::future_status status;
|
||||
|
||||
threads_done.fetch_add( 1 );
|
||||
} );
|
||||
|
||||
t.detach();
|
||||
}
|
||||
|
||||
while( threads_done < parallelThreadCount )
|
||||
{
|
||||
if( aReporter )
|
||||
do
|
||||
{
|
||||
aReporter->SetCurrentProgress( zitems_done / size );
|
||||
aReporter->KeepRefreshing();
|
||||
}
|
||||
if( aReporter )
|
||||
aReporter->KeepRefreshing();
|
||||
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
|
||||
std::this_thread::sleep_for( std::chrono::milliseconds( 100 ) );
|
||||
}
|
||||
|
||||
// Add CN_ZONE_LAYERS, tracks, and pads to connectivity
|
||||
|
|
|
@ -27,7 +27,6 @@
|
|||
#include <profile.h>
|
||||
#endif
|
||||
|
||||
#include <thread>
|
||||
#include <algorithm>
|
||||
#include <future>
|
||||
#include <initializer_list>
|
||||
|
@ -40,6 +39,7 @@
|
|||
#include <geometry/shape_circle.h>
|
||||
#include <ratsnest/ratsnest_data.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <thread_pool.h>
|
||||
#include <trigo.h>
|
||||
#include <drc/drc_rtree.h>
|
||||
|
||||
|
@ -169,35 +169,12 @@ void CONNECTIVITY_DATA::updateRatsnest()
|
|||
return aNet->IsDirty() && aNet->GetNodeCount() > 0;
|
||||
} );
|
||||
|
||||
// We don't want to spin up a new thread for fewer than 8 nets (overhead costs)
|
||||
size_t parallelThreadCount = std::min<size_t>( std::thread::hardware_concurrency(),
|
||||
( dirty_nets.size() + 7 ) / 8 );
|
||||
|
||||
std::atomic<size_t> nextNet( 0 );
|
||||
std::vector<std::future<size_t>> returns( parallelThreadCount );
|
||||
|
||||
auto update_lambda =
|
||||
[this, &nextNet, &dirty_nets]() -> size_t
|
||||
GetKiCadThreadPool().parallelize_loop( 0, dirty_nets.size(),
|
||||
[&]( const int a, const int b)
|
||||
{
|
||||
for( size_t i = nextNet++; i < dirty_nets.size(); i = nextNet++ )
|
||||
dirty_nets[i]->Update( m_exclusions );
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
if( parallelThreadCount <= 1 )
|
||||
{
|
||||
update_lambda();
|
||||
}
|
||||
else
|
||||
{
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii] = std::async( std::launch::async, update_lambda );
|
||||
|
||||
// Finalize the ratsnest threads
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii].wait();
|
||||
}
|
||||
for( int ii = a; ii < b; ++ii )
|
||||
dirty_nets[ii]->Update( m_exclusions );
|
||||
}).wait();
|
||||
|
||||
#ifdef PROFILE
|
||||
rnUpdate.Show();
|
||||
|
|
|
@ -22,10 +22,9 @@
|
|||
*/
|
||||
|
||||
#include <common.h>
|
||||
#include <atomic>
|
||||
#include <thread>
|
||||
#include <board_design_settings.h>
|
||||
#include <footprint.h>
|
||||
#include <thread_pool.h>
|
||||
#include <zone.h>
|
||||
|
||||
#include <drc/drc_engine.h>
|
||||
|
@ -149,58 +148,60 @@ bool DRC_CACHE_GENERATOR::Run()
|
|||
|
||||
// Cache zone bounding boxes, triangulation, copper zone rtrees, and footprint courtyards
|
||||
// before we start.
|
||||
//
|
||||
|
||||
m_drcEngine->SetMaxProgress( allZones.size() );
|
||||
|
||||
for( FOOTPRINT* footprint : m_board->Footprints() )
|
||||
footprint->BuildPolyCourtyards();
|
||||
|
||||
count = allZones.size();
|
||||
std::atomic<size_t> next( 0 );
|
||||
std::atomic<size_t> done( 0 );
|
||||
std::atomic<size_t> threads_finished( 0 );
|
||||
size_t parallelThreadCount = std::max<size_t>( std::thread::hardware_concurrency(), 2 );
|
||||
|
||||
for( ii = 0; ii < parallelThreadCount; ++ii )
|
||||
{
|
||||
std::thread t = std::thread(
|
||||
[ this, &allZones, &done, &threads_finished, &next, count ]( )
|
||||
{
|
||||
for( size_t i = next.fetch_add( 1 ); i < count; i = next.fetch_add( 1 ) )
|
||||
{
|
||||
ZONE* zone = allZones[ i ];
|
||||
for( ZONE* zone : footprint->Zones() )
|
||||
allZones.push_back( zone );
|
||||
|
||||
zone->CacheBoundingBox();
|
||||
zone->CacheTriangulation();
|
||||
|
||||
if( !zone->GetIsRuleArea() && zone->IsOnCopperLayer() )
|
||||
{
|
||||
std::unique_ptr<DRC_RTREE> rtree = std::make_unique<DRC_RTREE>();
|
||||
|
||||
for( PCB_LAYER_ID layer : zone->GetLayerSet().Seq() )
|
||||
{
|
||||
if( IsCopperLayer( layer ) )
|
||||
rtree->Insert( zone, layer );
|
||||
}
|
||||
|
||||
std::unique_lock<std::mutex> cacheLock( m_board->m_CachesMutex );
|
||||
m_board->m_CopperZoneRTreeCache[ zone ] = std::move( rtree );
|
||||
}
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
break;
|
||||
|
||||
done.fetch_add( 1 );
|
||||
}
|
||||
|
||||
threads_finished.fetch_add( 1 );
|
||||
} );
|
||||
|
||||
t.detach();
|
||||
footprint->BuildPolyCourtyards();
|
||||
}
|
||||
|
||||
while( threads_finished < parallelThreadCount )
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
std::vector<std::future<size_t>> returns;
|
||||
|
||||
returns.reserve( allZones.size() );
|
||||
|
||||
auto cache_zones = [this]( ZONE* aZone ) -> size_t
|
||||
{
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
aZone->CacheBoundingBox();
|
||||
aZone->CacheTriangulation();
|
||||
|
||||
if( !aZone->GetIsRuleArea() && aZone->IsOnCopperLayer() )
|
||||
{
|
||||
std::unique_ptr<DRC_RTREE> rtree = std::make_unique<DRC_RTREE>();
|
||||
|
||||
for( PCB_LAYER_ID layer : aZone->GetLayerSet().Seq() )
|
||||
{
|
||||
if( IsCopperLayer( layer ) )
|
||||
rtree->Insert( aZone, layer );
|
||||
}
|
||||
|
||||
std::unique_lock<std::mutex> cacheLock( m_board->m_CachesMutex );
|
||||
m_board->m_CopperZoneRTreeCache[ aZone ] = std::move( rtree );
|
||||
m_drcEngine->AdvanceProgress();
|
||||
}
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
for( ZONE* zone : allZones )
|
||||
returns.emplace_back( tp.submit( cache_zones, zone ) );
|
||||
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
reportProgress( done, count, 1 );
|
||||
std::this_thread::sleep_for( std::chrono::milliseconds( 100 ) );
|
||||
std::future_status status;
|
||||
|
||||
do
|
||||
{
|
||||
m_drcEngine->KeepRefreshing();
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
|
||||
return !m_drcEngine->IsCancelled();
|
||||
|
|
|
@ -24,7 +24,6 @@
|
|||
*/
|
||||
|
||||
#include <atomic>
|
||||
#include <thread>
|
||||
#include <reporter.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <string_utils.h>
|
||||
|
@ -40,6 +39,7 @@
|
|||
#include <footprint.h>
|
||||
#include <pad.h>
|
||||
#include <pcb_track.h>
|
||||
#include <thread_pool.h>
|
||||
#include <zone.h>
|
||||
|
||||
|
||||
|
@ -1514,6 +1514,29 @@ void DRC_ENGINE::ReportAux ( const wxString& aStr )
|
|||
}
|
||||
|
||||
|
||||
bool DRC_ENGINE::KeepRefreshing( bool aWait )
|
||||
{
|
||||
if( !m_progressReporter )
|
||||
return true;
|
||||
|
||||
return m_progressReporter->KeepRefreshing( aWait );
|
||||
}
|
||||
|
||||
|
||||
void DRC_ENGINE::AdvanceProgress()
|
||||
{
|
||||
if( m_progressReporter )
|
||||
m_progressReporter->AdvanceProgress();
|
||||
}
|
||||
|
||||
|
||||
void DRC_ENGINE::SetMaxProgress( int aSize )
|
||||
{
|
||||
if( m_progressReporter )
|
||||
m_progressReporter->SetMaxProgress( aSize );
|
||||
}
|
||||
|
||||
|
||||
bool DRC_ENGINE::ReportProgress( double aProgress )
|
||||
{
|
||||
if( !m_progressReporter )
|
||||
|
|
|
@ -167,6 +167,9 @@ public:
|
|||
void ReportViolation( const std::shared_ptr<DRC_ITEM>& aItem, const VECTOR2I& aPos,
|
||||
PCB_LAYER_ID aMarkerLayer );
|
||||
|
||||
bool KeepRefreshing( bool aWait = false );
|
||||
void AdvanceProgress();
|
||||
void SetMaxProgress( int aSize );
|
||||
bool ReportProgress( double aProgress );
|
||||
bool ReportPhase( const wxString& aMessage );
|
||||
void ReportAux( const wxString& aStr );
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
*/
|
||||
|
||||
#include <atomic>
|
||||
#include <thread>
|
||||
#include <common.h>
|
||||
#include <board_design_settings.h>
|
||||
#include <drc/drc_rtree.h>
|
||||
|
@ -31,6 +30,8 @@
|
|||
#include <drc/drc_rule.h>
|
||||
#include <drc/drc_test_provider.h>
|
||||
#include <pad.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <thread_pool.h>
|
||||
#include <zone.h>
|
||||
|
||||
|
||||
|
@ -106,86 +107,92 @@ bool DRC_TEST_PROVIDER_DISALLOW::Run()
|
|||
}
|
||||
}
|
||||
|
||||
std::atomic<size_t> next( 0 );
|
||||
std::atomic<size_t> done( 0 );
|
||||
std::atomic<size_t> threads_finished( 0 );
|
||||
size_t parallelThreadCount = std::max<size_t>( std::thread::hardware_concurrency(), 2 );
|
||||
PROGRESS_REPORTER* reporter = m_drcEngine->GetProgressReporter();
|
||||
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
{
|
||||
std::thread t = std::thread(
|
||||
[&]()
|
||||
auto query_areas = [&]( std::pair<ZONE*, ZONE*> aCache ) -> size_t
|
||||
{
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
ZONE* ruleArea = aCache.first;
|
||||
ZONE* copperZone = aCache.second;
|
||||
EDA_RECT areaBBox = ruleArea->GetCachedBoundingBox();
|
||||
EDA_RECT copperBBox = copperZone->GetCachedBoundingBox();
|
||||
bool isInside = false;
|
||||
|
||||
if( copperZone->IsFilled() && areaBBox.Intersects( copperBBox ) )
|
||||
{
|
||||
for( size_t i = next.fetch_add( 1 ); i < toCache.size(); i = next.fetch_add( 1 ) )
|
||||
// Collisions include touching, so we need to deflate outline by
|
||||
// enough to exclude it. This is particularly important for detecting
|
||||
// copper fills as they will be exactly touching along the entire
|
||||
// exclusion border.
|
||||
SHAPE_POLY_SET areaPoly = ruleArea->Outline()->CloneDropTriangulation();
|
||||
areaPoly.Deflate( epsilon, 0, SHAPE_POLY_SET::ALLOW_ACUTE_CORNERS );
|
||||
|
||||
DRC_RTREE* zoneRTree = board->m_CopperZoneRTreeCache[ copperZone ].get();
|
||||
|
||||
if( zoneRTree )
|
||||
{
|
||||
ZONE* ruleArea = toCache[i].first;
|
||||
ZONE* copperZone = toCache[i].second;
|
||||
EDA_RECT areaBBox = ruleArea->GetCachedBoundingBox();
|
||||
EDA_RECT copperBBox = copperZone->GetCachedBoundingBox();
|
||||
bool isInside = false;
|
||||
|
||||
if( copperZone->IsFilled() && areaBBox.Intersects( copperBBox ) )
|
||||
for( PCB_LAYER_ID layer : ruleArea->GetLayerSet().Seq() )
|
||||
{
|
||||
// Collisions include touching, so we need to deflate outline by
|
||||
// enough to exclude it. This is particularly important for detecting
|
||||
// copper fills as they will be exactly touching along the entire
|
||||
// exclusion border.
|
||||
SHAPE_POLY_SET areaPoly = ruleArea->Outline()->CloneDropTriangulation();
|
||||
areaPoly.Deflate( epsilon, 0, SHAPE_POLY_SET::ALLOW_ACUTE_CORNERS );
|
||||
|
||||
DRC_RTREE* zoneRTree = board->m_CopperZoneRTreeCache[ copperZone ].get();
|
||||
|
||||
if( zoneRTree )
|
||||
if( zoneRTree->QueryColliding( areaBBox, &areaPoly, layer ) )
|
||||
{
|
||||
for( PCB_LAYER_ID layer : ruleArea->GetLayerSet().Seq() )
|
||||
{
|
||||
if( zoneRTree->QueryColliding( areaBBox, &areaPoly, layer ) )
|
||||
{
|
||||
isInside = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
break;
|
||||
}
|
||||
isInside = true;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return 0;
|
||||
}
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
break;
|
||||
|
||||
std::tuple<BOARD_ITEM*, BOARD_ITEM*, PCB_LAYER_ID> key( ruleArea,
|
||||
copperZone,
|
||||
UNDEFINED_LAYER );
|
||||
|
||||
{
|
||||
std::unique_lock<std::mutex> cacheLock( board->m_CachesMutex );
|
||||
board->m_InsideAreaCache[ key ] = isInside;
|
||||
}
|
||||
|
||||
done.fetch_add( 1 );
|
||||
}
|
||||
}
|
||||
|
||||
threads_finished.fetch_add( 1 );
|
||||
} );
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
t.detach();
|
||||
}
|
||||
std::tuple<BOARD_ITEM*, BOARD_ITEM*, PCB_LAYER_ID> key( ruleArea,
|
||||
copperZone,
|
||||
UNDEFINED_LAYER );
|
||||
|
||||
while( threads_finished < parallelThreadCount )
|
||||
{
|
||||
std::unique_lock<std::mutex> cacheLock( board->m_CachesMutex );
|
||||
board->m_InsideAreaCache[ key ] = isInside;
|
||||
}
|
||||
|
||||
m_drcEngine->AdvanceProgress();
|
||||
|
||||
return 1;
|
||||
};
|
||||
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
std::vector<std::future<size_t>> returns;
|
||||
|
||||
returns.reserve( toCache.size() );
|
||||
|
||||
for( auto& cache : toCache )
|
||||
returns.emplace_back( tp.submit( query_areas, cache ) );
|
||||
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
m_drcEngine->ReportProgress( (double) done / (double) totalCount );
|
||||
std::this_thread::sleep_for( std::chrono::milliseconds( 100 ) );
|
||||
std::future_status status;
|
||||
|
||||
do
|
||||
{
|
||||
if( reporter )
|
||||
reporter->KeepRefreshing();
|
||||
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return false;
|
||||
|
||||
// Now go through all the board objects calling the DRC_ENGINE to run the actual dissallow
|
||||
// Now go through all the board objects calling the DRC_ENGINE to run the actual disallow
|
||||
// tests. These should be reasonably quick using the caches generated above.
|
||||
//
|
||||
int delta = 100;
|
||||
int ii = done;
|
||||
int ii = static_cast<int>( toCache.size() );
|
||||
|
||||
auto checkTextOnEdgeCuts =
|
||||
[&]( BOARD_ITEM* item )
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
*/
|
||||
|
||||
#include <atomic>
|
||||
#include <thread>
|
||||
#include <board.h>
|
||||
#include <board_design_settings.h>
|
||||
#include <zone.h>
|
||||
|
@ -33,6 +32,8 @@
|
|||
#include <drc/drc_item.h>
|
||||
#include <drc/drc_test_provider.h>
|
||||
#include <advanced_config.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <thread_pool.h>
|
||||
|
||||
/*
|
||||
Checks for slivers in copper layers
|
||||
|
@ -92,6 +93,7 @@ bool DRC_TEST_PROVIDER_SLIVER_CHECKER::Run()
|
|||
|
||||
// Report progress on board zones only. Everything else is in the noise.
|
||||
int zoneLayerCount = 0;
|
||||
std::atomic<size_t> done( 1 );
|
||||
|
||||
for( PCB_LAYER_ID layer : copperLayers )
|
||||
{
|
||||
|
@ -102,86 +104,91 @@ bool DRC_TEST_PROVIDER_SLIVER_CHECKER::Run()
|
|||
}
|
||||
}
|
||||
|
||||
// The first completion may be a long time coming, so this one gets us started.
|
||||
zoneLayerCount++;
|
||||
PROGRESS_REPORTER* reporter = m_drcEngine->GetProgressReporter();
|
||||
|
||||
if( !m_drcEngine->ReportProgress( 1.0 / (double) zoneLayerCount ) )
|
||||
if( reporter && reporter->IsCancelled() )
|
||||
return false; // DRC cancelled
|
||||
|
||||
std::vector<SHAPE_POLY_SET> layerPolys;
|
||||
layerPolys.resize( layerCount );
|
||||
std::vector<SHAPE_POLY_SET> layerPolys( layerCount );
|
||||
|
||||
std::atomic<size_t> next( 0 );
|
||||
std::atomic<size_t> done( 1 );
|
||||
std::atomic<size_t> threads_finished( 0 );
|
||||
size_t parallelThreadCount = std::max<size_t>( std::thread::hardware_concurrency(), 2 );
|
||||
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
{
|
||||
std::thread t = std::thread(
|
||||
[&]( )
|
||||
auto sliver_checker =
|
||||
[&]( int aItem ) -> size_t
|
||||
{
|
||||
for( int i = next.fetch_add( 1 ); i < layerCount; i = next.fetch_add( 1 ) )
|
||||
{
|
||||
PCB_LAYER_ID layer = copperLayers[i];
|
||||
SHAPE_POLY_SET& poly = layerPolys[i];
|
||||
SHAPE_POLY_SET fill;
|
||||
PCB_LAYER_ID layer = copperLayers[aItem];
|
||||
SHAPE_POLY_SET& poly = layerPolys[aItem];
|
||||
|
||||
forEachGeometryItem( s_allBasicItems, LSET().set( layer ),
|
||||
[&]( BOARD_ITEM* item ) -> bool
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
SHAPE_POLY_SET fill;
|
||||
|
||||
forEachGeometryItem( s_allBasicItems, LSET().set( layer ),
|
||||
[&]( BOARD_ITEM* item ) -> bool
|
||||
{
|
||||
if( dynamic_cast<ZONE*>( item) )
|
||||
{
|
||||
if( dynamic_cast<ZONE*>( item) )
|
||||
ZONE* zone = static_cast<ZONE*>( item );
|
||||
|
||||
if( !zone->GetIsRuleArea() )
|
||||
{
|
||||
ZONE* zone = static_cast<ZONE*>( item );
|
||||
fill = zone->GetFill( layer )->CloneDropTriangulation();
|
||||
fill.Unfracture( SHAPE_POLY_SET::PM_FAST );
|
||||
|
||||
if( !zone->GetIsRuleArea() )
|
||||
{
|
||||
fill = zone->GetFill( layer )->CloneDropTriangulation();
|
||||
fill.Unfracture( SHAPE_POLY_SET::PM_FAST );
|
||||
for( int jj = 0; jj < fill.OutlineCount(); ++jj )
|
||||
poly.AddOutline( fill.Outline( jj ) );
|
||||
|
||||
for( int jj = 0; jj < fill.OutlineCount(); ++jj )
|
||||
poly.AddOutline( fill.Outline( jj ) );
|
||||
|
||||
// Report progress on board zones only. Everything
|
||||
// else is in the noise.
|
||||
done.fetch_add( 1 );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
item->TransformShapeWithClearanceToPolygon( poly, layer, 0,
|
||||
ARC_LOW_DEF,
|
||||
ERROR_OUTSIDE );
|
||||
// Report progress on board zones only. Everything
|
||||
// else is in the noise.
|
||||
done.fetch_add( 1 );
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
item->TransformShapeWithClearanceToPolygon( poly, layer, 0,
|
||||
ARC_LOW_DEF,
|
||||
ERROR_OUTSIDE );
|
||||
}
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return false;
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return false;
|
||||
|
||||
return true;
|
||||
} );
|
||||
return true;
|
||||
} );
|
||||
|
||||
poly.Simplify( SHAPE_POLY_SET::PM_FAST );
|
||||
|
||||
// Sharpen corners
|
||||
poly.Deflate( widthTolerance / 2, ARC_LOW_DEF,
|
||||
SHAPE_POLY_SET::ALLOW_ACUTE_CORNERS );
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
break;
|
||||
}
|
||||
poly.Simplify( SHAPE_POLY_SET::PM_FAST );
|
||||
|
||||
threads_finished.fetch_add( 1 );
|
||||
} );
|
||||
// Sharpen corners
|
||||
poly.Deflate( widthTolerance / 2, ARC_LOW_DEF,
|
||||
SHAPE_POLY_SET::ALLOW_ACUTE_CORNERS );
|
||||
|
||||
t.detach();
|
||||
}
|
||||
return 1;
|
||||
};
|
||||
|
||||
while( threads_finished < parallelThreadCount )
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
std::vector<std::future<size_t>> returns;
|
||||
|
||||
returns.reserve( copperLayers.size() );
|
||||
|
||||
for( size_t ii = 0; ii < copperLayers.size(); ++ii )
|
||||
returns.emplace_back( tp.submit( sliver_checker, ii ) );
|
||||
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
m_drcEngine->ReportProgress( (double) done / (double) zoneLayerCount );
|
||||
std::this_thread::sleep_for( std::chrono::milliseconds( 100 ) );
|
||||
std::future_status status;
|
||||
|
||||
do
|
||||
{
|
||||
m_drcEngine->ReportProgress( static_cast<double>( zoneLayerCount ) / done );
|
||||
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
|
||||
|
||||
for( int ii = 0; ii < layerCount; ++ii )
|
||||
{
|
||||
PCB_LAYER_ID layer = copperLayers[ii];
|
||||
|
|
|
@ -21,8 +21,6 @@
|
|||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
*/
|
||||
|
||||
#include <atomic>
|
||||
#include <thread>
|
||||
#include <board.h>
|
||||
#include <board_design_settings.h>
|
||||
#include <connectivity/connectivity_data.h>
|
||||
|
@ -30,6 +28,8 @@
|
|||
#include <footprint.h>
|
||||
#include <pad.h>
|
||||
#include <pcb_track.h>
|
||||
#include <thread_pool.h>
|
||||
|
||||
#include <geometry/shape_line_chain.h>
|
||||
#include <geometry/shape_poly_set.h>
|
||||
#include <drc/drc_rule.h>
|
||||
|
@ -193,36 +193,33 @@ bool DRC_TEST_PROVIDER_ZONE_CONNECTIONS::Run()
|
|||
zoneLayers.push_back( { zone, layer } );
|
||||
}
|
||||
|
||||
int zoneLayerCount = zoneLayers.size();
|
||||
std::atomic<size_t> next( 0 );
|
||||
std::atomic<size_t> done( 1 );
|
||||
std::atomic<size_t> threads_finished( 0 );
|
||||
size_t parallelThreadCount = std::max<size_t>( std::thread::hardware_concurrency(), 2 );
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
std::vector<std::future<int>> returns;
|
||||
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns.reserve( zoneLayers.size() );
|
||||
|
||||
for( auto& zonelayer : zoneLayers )
|
||||
{
|
||||
std::thread t = std::thread(
|
||||
[&]( )
|
||||
returns.emplace_back( tp.submit([&]( ZONE* aZone, PCB_LAYER_ID aLayer ) -> int
|
||||
{
|
||||
for( int i = next.fetch_add( 1 ); i < zoneLayerCount; i = next.fetch_add( 1 ) )
|
||||
if( !m_drcEngine->IsCancelled() )
|
||||
{
|
||||
testZoneLayer( zoneLayers[i].first, zoneLayers[i].second );
|
||||
done.fetch_add( 1 );
|
||||
|
||||
if( m_drcEngine->IsCancelled() )
|
||||
break;
|
||||
testZoneLayer( aZone, aLayer );
|
||||
m_drcEngine->AdvanceProgress();
|
||||
}
|
||||
|
||||
threads_finished.fetch_add( 1 );
|
||||
} );
|
||||
|
||||
t.detach();
|
||||
return 0;
|
||||
}, zonelayer.first, zonelayer.second ) );
|
||||
}
|
||||
|
||||
while( threads_finished < parallelThreadCount )
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
m_drcEngine->ReportProgress( (double) done / (double) zoneLayerCount );
|
||||
std::this_thread::sleep_for( std::chrono::milliseconds( 100 ) );
|
||||
std::future_status status;
|
||||
|
||||
do
|
||||
{
|
||||
m_drcEngine->KeepRefreshing();
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
|
||||
return !m_drcEngine->IsCancelled();
|
||||
|
|
|
@ -22,22 +22,22 @@
|
|||
|
||||
#include <footprint_info_impl.h>
|
||||
|
||||
#include <dialogs/html_message_box.h>
|
||||
#include <footprint.h>
|
||||
#include <footprint_info.h>
|
||||
#include <fp_lib_table.h>
|
||||
#include <dialogs/html_message_box.h>
|
||||
#include <string_utils.h>
|
||||
#include <locale_io.h>
|
||||
#include <kiway.h>
|
||||
#include <locale_io.h>
|
||||
#include <lib_id.h>
|
||||
#include <wildcards_and_files_ext.h>
|
||||
#include <progress_reporter.h>
|
||||
#include <string_utils.h>
|
||||
#include <thread_pool.h>
|
||||
#include <wildcards_and_files_ext.h>
|
||||
|
||||
#include <wx/textfile.h>
|
||||
#include <wx/txtstrm.h>
|
||||
#include <wx/wfstream.h>
|
||||
|
||||
#include <thread>
|
||||
|
||||
|
||||
void FOOTPRINT_INFO_IMPL::load()
|
||||
{
|
||||
|
@ -95,26 +95,6 @@ bool FOOTPRINT_LIST_IMPL::CatchErrors( const std::function<void()>& aFunc )
|
|||
}
|
||||
|
||||
|
||||
void FOOTPRINT_LIST_IMPL::loader_job()
|
||||
{
|
||||
wxString nickname;
|
||||
|
||||
while( m_queue_in.pop( nickname ) && !m_cancelled )
|
||||
{
|
||||
CatchErrors( [this, &nickname]()
|
||||
{
|
||||
m_lib_table->PrefetchLib( nickname );
|
||||
m_queue_out.push( nickname );
|
||||
} );
|
||||
|
||||
m_count_finished.fetch_add( 1 );
|
||||
|
||||
if( m_progress_reporter )
|
||||
m_progress_reporter->AdvanceProgress();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
bool FOOTPRINT_LIST_IMPL::ReadFootprintFiles( FP_LIB_TABLE* aTable, const wxString* aNickname,
|
||||
PROGRESS_REPORTER* aProgressReporter )
|
||||
{
|
||||
|
@ -123,6 +103,9 @@ bool FOOTPRINT_LIST_IMPL::ReadFootprintFiles( FP_LIB_TABLE* aTable, const wxStri
|
|||
if( generatedTimestamp == m_list_timestamp )
|
||||
return true;
|
||||
|
||||
// Disable KIID generation: not needed for library parts; sometimes very slow
|
||||
KIID_NIL_SET_RESET reset_kiid;
|
||||
|
||||
m_progress_reporter = aProgressReporter;
|
||||
|
||||
if( m_progress_reporter )
|
||||
|
@ -132,25 +115,28 @@ bool FOOTPRINT_LIST_IMPL::ReadFootprintFiles( FP_LIB_TABLE* aTable, const wxStri
|
|||
}
|
||||
|
||||
m_cancelled = false;
|
||||
m_lib_table = aTable;
|
||||
|
||||
FOOTPRINT_ASYNC_LOADER loader;
|
||||
// Clear data before reading files
|
||||
m_errors.clear();
|
||||
m_list.clear();
|
||||
m_queue_in.clear();
|
||||
m_queue_out.clear();
|
||||
|
||||
loader.SetList( this );
|
||||
loader.Start( aTable, aNickname );
|
||||
|
||||
while( !m_cancelled && (int)m_count_finished.load() < m_loader->m_total_libs )
|
||||
if( aNickname )
|
||||
{
|
||||
if( m_progress_reporter && !m_progress_reporter->KeepRefreshing() )
|
||||
m_cancelled = true;
|
||||
|
||||
wxMilliSleep( 33 /* 30 FPS refresh rate */);
|
||||
}
|
||||
|
||||
if( m_cancelled )
|
||||
{
|
||||
loader.Abort();
|
||||
m_queue_in.push( *aNickname );
|
||||
}
|
||||
else
|
||||
{
|
||||
for( auto const& nickname : aTable->GetLogicalLibs() )
|
||||
m_queue_in.push( nickname );
|
||||
}
|
||||
|
||||
|
||||
loadLibs();
|
||||
|
||||
if( !m_cancelled )
|
||||
{
|
||||
if( m_progress_reporter )
|
||||
{
|
||||
|
@ -159,7 +145,7 @@ bool FOOTPRINT_LIST_IMPL::ReadFootprintFiles( FP_LIB_TABLE* aTable, const wxStri
|
|||
m_progress_reporter->Report( _( "Loading footprints..." ) );
|
||||
}
|
||||
|
||||
loader.Join();
|
||||
loadFootprints();
|
||||
|
||||
if( m_progress_reporter )
|
||||
m_progress_reporter->AdvancePhase();
|
||||
|
@ -174,75 +160,54 @@ bool FOOTPRINT_LIST_IMPL::ReadFootprintFiles( FP_LIB_TABLE* aTable, const wxStri
|
|||
}
|
||||
|
||||
|
||||
void FOOTPRINT_LIST_IMPL::startWorkers( FP_LIB_TABLE* aTable, wxString const* aNickname,
|
||||
FOOTPRINT_ASYNC_LOADER* aLoader, unsigned aNThreads )
|
||||
void FOOTPRINT_LIST_IMPL::loadLibs()
|
||||
{
|
||||
m_loader = aLoader;
|
||||
m_lib_table = aTable;
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
size_t num_returns = m_queue_in.size();
|
||||
std::vector<std::future<size_t>> returns( num_returns );
|
||||
|
||||
// Clear data before reading files
|
||||
m_count_finished.store( 0 );
|
||||
m_errors.clear();
|
||||
m_list.clear();
|
||||
m_threads.clear();
|
||||
m_queue_in.clear();
|
||||
m_queue_out.clear();
|
||||
auto loader_job = [this]() -> size_t
|
||||
{
|
||||
wxString nickname;
|
||||
size_t retval = 0;
|
||||
|
||||
if( aNickname )
|
||||
if( !m_cancelled && m_queue_in.pop( nickname ) )
|
||||
{
|
||||
if( CatchErrors( [this, &nickname]()
|
||||
{
|
||||
m_lib_table->PrefetchLib( nickname );
|
||||
m_queue_out.push( nickname );
|
||||
} ) && m_progress_reporter )
|
||||
{
|
||||
m_progress_reporter->AdvanceProgress();
|
||||
}
|
||||
|
||||
++retval;
|
||||
}
|
||||
|
||||
return retval;
|
||||
};
|
||||
|
||||
for( size_t ii = 0; ii < num_returns; ++ii )
|
||||
returns[ii] = tp.submit( loader_job );
|
||||
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
m_queue_in.push( *aNickname );
|
||||
}
|
||||
else
|
||||
{
|
||||
for( auto const& nickname : aTable->GetLogicalLibs() )
|
||||
m_queue_in.push( nickname );
|
||||
}
|
||||
std::future_status status;
|
||||
|
||||
m_loader->m_total_libs = m_queue_in.size();
|
||||
do
|
||||
{
|
||||
if( m_progress_reporter && !m_progress_reporter->KeepRefreshing() )
|
||||
m_cancelled = true;
|
||||
|
||||
for( unsigned i = 0; i < aNThreads; ++i )
|
||||
{
|
||||
m_threads.emplace_back( &FOOTPRINT_LIST_IMPL::loader_job, this );
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
void FOOTPRINT_LIST_IMPL::stopWorkers()
|
||||
void FOOTPRINT_LIST_IMPL::loadFootprints()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock1( m_join );
|
||||
|
||||
// To safely stop our workers, we set the cancellation flag (they will each
|
||||
// exit on their next safe loop location when this is set). Then we need to wait
|
||||
// for all threads to finish as closing the implementation will free the queues
|
||||
// that the threads write to.
|
||||
for( auto& i : m_threads )
|
||||
i.join();
|
||||
|
||||
m_threads.clear();
|
||||
m_queue_in.clear();
|
||||
m_count_finished.store( 0 );
|
||||
|
||||
// If we have canceled in the middle of a load, clear our timestamp to re-load next time
|
||||
if( m_cancelled )
|
||||
m_list_timestamp = 0;
|
||||
}
|
||||
|
||||
|
||||
bool FOOTPRINT_LIST_IMPL::joinWorkers()
|
||||
{
|
||||
{
|
||||
std::lock_guard<std::mutex> lock1( m_join );
|
||||
|
||||
for( auto& i : m_threads )
|
||||
i.join();
|
||||
|
||||
m_threads.clear();
|
||||
m_queue_in.clear();
|
||||
m_count_finished.store( 0 );
|
||||
}
|
||||
|
||||
size_t total_count = m_queue_out.size();
|
||||
|
||||
LOCALE_IO toggle_locale;
|
||||
|
||||
// Parse the footprints in parallel. WARNING! This requires changing the locale, which is
|
||||
|
@ -253,85 +218,58 @@ bool FOOTPRINT_LIST_IMPL::joinWorkers()
|
|||
// TODO: blast LOCALE_IO into the sun
|
||||
|
||||
SYNC_QUEUE<std::unique_ptr<FOOTPRINT_INFO>> queue_parsed;
|
||||
std::vector<std::thread> threads;
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
size_t num_elements = m_queue_out.size();
|
||||
std::vector<std::future<size_t>> returns( num_elements );
|
||||
|
||||
for( size_t ii = 0; ii < std::thread::hardware_concurrency() + 1; ++ii )
|
||||
auto fp_thread = [ this, &queue_parsed ]() -> size_t
|
||||
{
|
||||
threads.emplace_back( [this, &queue_parsed]() {
|
||||
wxString nickname;
|
||||
wxString nickname;
|
||||
|
||||
while( m_queue_out.pop( nickname ) && !m_cancelled )
|
||||
if( m_queue_out.pop( nickname ) && !m_cancelled )
|
||||
{
|
||||
wxArrayString fpnames;
|
||||
|
||||
if( !CatchErrors( [&]()
|
||||
{ m_lib_table->FootprintEnumerate( fpnames, nickname, false ); } ) )
|
||||
{
|
||||
wxArrayString fpnames;
|
||||
|
||||
try
|
||||
{
|
||||
m_lib_table->FootprintEnumerate( fpnames, nickname, false );
|
||||
}
|
||||
catch( const IO_ERROR& ioe )
|
||||
{
|
||||
m_errors.move_push( std::make_unique<IO_ERROR>( ioe ) );
|
||||
}
|
||||
catch( const std::exception& se )
|
||||
{
|
||||
// This is a round about way to do this, but who knows what THROW_IO_ERROR()
|
||||
// may be tricked out to do someday, keep it in the game.
|
||||
try
|
||||
{
|
||||
THROW_IO_ERROR( se.what() );
|
||||
}
|
||||
catch( const IO_ERROR& ioe )
|
||||
{
|
||||
m_errors.move_push( std::make_unique<IO_ERROR>( ioe ) );
|
||||
}
|
||||
}
|
||||
|
||||
for( unsigned jj = 0; jj < fpnames.size() && !m_cancelled; ++jj )
|
||||
{
|
||||
try
|
||||
{
|
||||
wxString fpname = fpnames[jj];
|
||||
FOOTPRINT_INFO* fpinfo = new FOOTPRINT_INFO_IMPL( this, nickname, fpname );
|
||||
queue_parsed.move_push( std::unique_ptr<FOOTPRINT_INFO>( fpinfo ) );
|
||||
}
|
||||
catch( const IO_ERROR& ioe )
|
||||
{
|
||||
m_errors.move_push( std::make_unique<IO_ERROR>( ioe ) );
|
||||
}
|
||||
catch( const std::exception& se )
|
||||
{
|
||||
// This is a round about way to do this, but who knows what THROW_IO_ERROR()
|
||||
// may be tricked out to do someday, keep it in the game.
|
||||
try
|
||||
{
|
||||
THROW_IO_ERROR( se.what() );
|
||||
}
|
||||
catch( const IO_ERROR& ioe )
|
||||
{
|
||||
m_errors.move_push( std::make_unique<IO_ERROR>( ioe ) );
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if( m_progress_reporter )
|
||||
m_progress_reporter->AdvanceProgress();
|
||||
|
||||
m_count_finished.fetch_add( 1 );
|
||||
return 0;
|
||||
}
|
||||
} );
|
||||
}
|
||||
|
||||
while( !m_cancelled && (size_t)m_count_finished.load() < total_count )
|
||||
for( unsigned jj = 0; jj < fpnames.size() && !m_cancelled; ++jj )
|
||||
{
|
||||
CatchErrors( [&]()
|
||||
{
|
||||
FOOTPRINT_INFO* fpinfo = new FOOTPRINT_INFO_IMPL( this, nickname, fpnames[jj] );
|
||||
queue_parsed.move_push( std::unique_ptr<FOOTPRINT_INFO>( fpinfo ) );
|
||||
});
|
||||
}
|
||||
|
||||
if( m_progress_reporter )
|
||||
m_progress_reporter->AdvanceProgress();
|
||||
|
||||
return 1;
|
||||
}
|
||||
|
||||
return 0;
|
||||
};
|
||||
|
||||
for( size_t ii = 0; ii < num_elements; ++ii )
|
||||
returns[ii] = tp.submit( fp_thread );
|
||||
|
||||
for( auto& retval : returns )
|
||||
{
|
||||
if( m_progress_reporter && !m_progress_reporter->KeepRefreshing() )
|
||||
m_cancelled = true;
|
||||
std::future_status status;
|
||||
|
||||
wxMilliSleep( 33 /* 30 FPS refresh rate */ );
|
||||
do
|
||||
{
|
||||
if( m_progress_reporter )
|
||||
m_progress_reporter->KeepRefreshing();
|
||||
|
||||
status = retval.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
|
||||
for( auto& thr : threads )
|
||||
thr.join();
|
||||
|
||||
std::unique_ptr<FOOTPRINT_INFO> fpi;
|
||||
|
||||
while( queue_parsed.pop( fpi ) )
|
||||
|
@ -343,14 +281,10 @@ bool FOOTPRINT_LIST_IMPL::joinWorkers()
|
|||
{
|
||||
return *lhs < *rhs;
|
||||
} );
|
||||
|
||||
return m_errors.empty();
|
||||
}
|
||||
|
||||
|
||||
FOOTPRINT_LIST_IMPL::FOOTPRINT_LIST_IMPL() :
|
||||
m_loader( nullptr ),
|
||||
m_count_finished( 0 ),
|
||||
m_list_timestamp( 0 ),
|
||||
m_progress_reporter( nullptr ),
|
||||
m_cancelled( false )
|
||||
|
@ -358,12 +292,6 @@ FOOTPRINT_LIST_IMPL::FOOTPRINT_LIST_IMPL() :
|
|||
}
|
||||
|
||||
|
||||
FOOTPRINT_LIST_IMPL::~FOOTPRINT_LIST_IMPL()
|
||||
{
|
||||
stopWorkers();
|
||||
}
|
||||
|
||||
|
||||
void FOOTPRINT_LIST_IMPL::WriteCacheToFile( const wxString& aFilePath )
|
||||
{
|
||||
wxFileName tmpFileName = wxFileName::CreateTempFileName( aFilePath );
|
||||
|
|
|
@ -85,7 +85,7 @@ class FOOTPRINT_LIST_IMPL : public FOOTPRINT_LIST
|
|||
{
|
||||
public:
|
||||
FOOTPRINT_LIST_IMPL();
|
||||
virtual ~FOOTPRINT_LIST_IMPL();
|
||||
virtual ~FOOTPRINT_LIST_IMPL() {};
|
||||
|
||||
void WriteCacheToFile( const wxString& aFilePath ) override;
|
||||
void ReadCacheFromFile( const wxString& aFilePath ) override;
|
||||
|
@ -94,16 +94,8 @@ public:
|
|||
PROGRESS_REPORTER* aProgressReporter = nullptr ) override;
|
||||
|
||||
protected:
|
||||
void startWorkers( FP_LIB_TABLE* aTable, const wxString* aNickname,
|
||||
FOOTPRINT_ASYNC_LOADER* aLoader, unsigned aNThreads ) override;
|
||||
bool joinWorkers() override;
|
||||
|
||||
void stopWorkers() override;
|
||||
|
||||
/**
|
||||
* Load footprints from m_queue_in.
|
||||
*/
|
||||
void loader_job();
|
||||
void loadLibs();
|
||||
void loadFootprints();
|
||||
|
||||
private:
|
||||
/**
|
||||
|
@ -113,11 +105,8 @@ private:
|
|||
*/
|
||||
bool CatchErrors( const std::function<void()>& aFunc );
|
||||
|
||||
FOOTPRINT_ASYNC_LOADER* m_loader;
|
||||
std::vector<std::thread> m_threads;
|
||||
SYNC_QUEUE<wxString> m_queue_in;
|
||||
SYNC_QUEUE<wxString> m_queue_out;
|
||||
std::atomic_size_t m_count_finished;
|
||||
long long m_list_timestamp;
|
||||
PROGRESS_REPORTER* m_progress_reporter;
|
||||
std::atomic_bool m_cancelled;
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA
|
||||
*/
|
||||
|
||||
#include <thread>
|
||||
#include <future>
|
||||
#include <core/kicad_algo.h>
|
||||
#include <advanced_config.h>
|
||||
|
@ -48,6 +47,7 @@
|
|||
#include <geometry/geometry_utils.h>
|
||||
#include <confirm.h>
|
||||
#include <convert_to_biu.h>
|
||||
#include <thread_pool.h>
|
||||
#include <math/util.h> // for KiROUND
|
||||
#include "zone_filler.h"
|
||||
|
||||
|
@ -174,8 +174,6 @@ bool ZONE_FILLER::Fill( std::vector<ZONE*>& aZones, bool aCheck, wxWindow* aPare
|
|||
zone->UnFill();
|
||||
}
|
||||
|
||||
size_t cores = std::thread::hardware_concurrency();
|
||||
std::atomic<size_t> nextItem;
|
||||
|
||||
auto check_fill_dependency =
|
||||
[&]( ZONE* aZone, PCB_LAYER_ID aLayer, ZONE* aOtherZone ) -> bool
|
||||
|
@ -212,84 +210,75 @@ bool ZONE_FILLER::Fill( std::vector<ZONE*>& aZones, bool aCheck, wxWindow* aPare
|
|||
};
|
||||
|
||||
auto fill_lambda =
|
||||
[&]( PROGRESS_REPORTER* aReporter )
|
||||
[&]( std::pair<ZONE*, PCB_LAYER_ID> aFillItem ) -> int
|
||||
{
|
||||
size_t num = 0;
|
||||
PCB_LAYER_ID layer = aFillItem.second;
|
||||
ZONE* zone = aFillItem.first;
|
||||
bool canFill = true;
|
||||
|
||||
for( size_t i = nextItem++; i < toFill.size(); i = nextItem++ )
|
||||
// Check for any fill dependencies. If our zone needs to be clipped by
|
||||
// another zone then we can't fill until that zone is filled.
|
||||
for( ZONE* otherZone : aZones )
|
||||
{
|
||||
PCB_LAYER_ID layer = toFill[i].second;
|
||||
ZONE* zone = toFill[i].first;
|
||||
bool canFill = true;
|
||||
|
||||
// Check for any fill dependencies. If our zone needs to be clipped by
|
||||
// another zone then we can't fill until that zone is filled.
|
||||
for( ZONE* otherZone : aZones )
|
||||
{
|
||||
if( otherZone == zone )
|
||||
continue;
|
||||
|
||||
if( check_fill_dependency( zone, layer, otherZone ) )
|
||||
{
|
||||
canFill = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if( m_progressReporter && m_progressReporter->IsCancelled() )
|
||||
break;
|
||||
|
||||
if( !canFill )
|
||||
if( otherZone == zone )
|
||||
continue;
|
||||
|
||||
// Now we're ready to fill.
|
||||
SHAPE_POLY_SET fillPolys;
|
||||
fillSingleZone( zone, layer, fillPolys );
|
||||
if( check_fill_dependency( zone, layer, otherZone ) )
|
||||
{
|
||||
canFill = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
std::unique_lock<std::mutex> zoneLock( zone->GetLock() );
|
||||
if( m_progressReporter && m_progressReporter->IsCancelled() )
|
||||
return 0;
|
||||
|
||||
if( !canFill )
|
||||
return 0;
|
||||
|
||||
|
||||
// Now we're ready to fill.
|
||||
std::unique_lock<std::mutex> zoneLock( zone->GetLock(), std::try_to_lock );
|
||||
|
||||
if( zoneLock.owns_lock() )
|
||||
{
|
||||
SHAPE_POLY_SET fillPolys;
|
||||
if( !fillSingleZone( zone, layer, fillPolys ) )
|
||||
return 0;
|
||||
|
||||
zone->SetFilledPolysList( layer, fillPolys );
|
||||
zone->SetFillFlag( layer, true );
|
||||
|
||||
if( m_progressReporter )
|
||||
m_progressReporter->AdvanceProgress();
|
||||
|
||||
num++;
|
||||
}
|
||||
|
||||
return num;
|
||||
return 0;
|
||||
};
|
||||
|
||||
// Calculate the copper fills (NB: this is multi-threaded)
|
||||
//
|
||||
while( !toFill.empty() )
|
||||
{
|
||||
size_t parallelThreadCount = std::min( cores, toFill.size() );
|
||||
std::vector<std::future<size_t>> returns( parallelThreadCount );
|
||||
std::vector<std::future<int>> returns;
|
||||
returns.reserve( toFill.size() );
|
||||
|
||||
nextItem = 0;
|
||||
thread_pool& tp = GetKiCadThreadPool();
|
||||
|
||||
if( parallelThreadCount <= 1 )
|
||||
for( auto& fillItem : toFill )
|
||||
returns.emplace_back( tp.submit( fill_lambda, fillItem ) );
|
||||
|
||||
for( auto& ret : returns )
|
||||
{
|
||||
fill_lambda( m_progressReporter );
|
||||
}
|
||||
else
|
||||
{
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
returns[ii] = std::async( std::launch::async, fill_lambda, m_progressReporter );
|
||||
std::future_status status;
|
||||
|
||||
for( size_t ii = 0; ii < parallelThreadCount; ++ii )
|
||||
do
|
||||
{
|
||||
// Here we balance returns with a 100ms timeout to allow UI updating
|
||||
std::future_status status;
|
||||
do
|
||||
{
|
||||
if( m_progressReporter )
|
||||
m_progressReporter->KeepRefreshing();
|
||||
if( m_progressReporter )
|
||||
m_progressReporter->KeepRefreshing();
|
||||
|
||||
status = returns[ii].wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
status = ret.wait_for( std::chrono::milliseconds( 100 ) );
|
||||
} while( status != std::future_status::ready );
|
||||
}
|
||||
|
||||
alg::delete_if( toFill, [&]( const std::pair<ZONE*, PCB_LAYER_ID> pair ) -> bool
|
||||
|
|
|
@ -42,3 +42,4 @@ add_subdirectory( json_schema_validator )
|
|||
add_subdirectory( pegtl )
|
||||
add_subdirectory( 3dxware_sdk )
|
||||
add_subdirectory( turtle )
|
||||
add_subdirectory( thread-pool )
|
||||
|
|
|
@ -0,0 +1,7 @@
|
|||
add_library( thread-pool INTERFACE )
|
||||
|
||||
target_include_directories( thread-pool INTERFACE ${CMAKE_CURRENT_SOURCE_DIR} )
|
||||
|
||||
target_sources( thread-pool INTERFACE
|
||||
${CMAKE_CURRENT_SOURCE_DIR}/bs_thread_pool.hpp
|
||||
)
|
|
@ -0,0 +1,21 @@
|
|||
MIT License
|
||||
|
||||
Copyright (c) 2022 Barak Shoshany
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -0,0 +1,516 @@
|
|||
#pragma once
|
||||
|
||||
/**
|
||||
* @file BS_thread_pool.hpp
|
||||
* @author Barak Shoshany (baraksh@gmail.com) (http://baraksh.com)
|
||||
* @version 3.0.0
|
||||
* @date 2022-05-30
|
||||
* @copyright Copyright (c) 2022 Barak Shoshany. Licensed under the MIT license. If you use this library in software of any kind, please provide a link to the GitHub repository https://github.com/bshoshany/thread-pool in the source code and documentation. If you use this library in published research, please cite it as follows: Barak Shoshany, "A C++17 Thread Pool for High-Performance Scientific Computing", doi:10.5281/zenodo.4742687, arXiv:2105.00613 (May 2021)
|
||||
*
|
||||
* @brief BS::thread_pool: a fast, lightweight, and easy-to-use C++17 thread pool library. This header file contains the entire library, including the main BS::thread_pool class and the helper classes BS::multi_future, BS:synced_stream, and BS::timer.
|
||||
*/
|
||||
|
||||
#define BS_THREAD_POOL_VERSION "v3.0.0 (2022-05-30)"
|
||||
|
||||
#include <atomic> // std::atomic
|
||||
#include <chrono> // std::chrono
|
||||
#include <condition_variable> // std::condition_variable
|
||||
#include <exception> // std::current_exception
|
||||
#include <functional> // std::function
|
||||
#include <future> // std::future, std::promise
|
||||
#include <iostream> // std::cout, std::ostream
|
||||
#include <memory> // std::make_shared, std::make_unique, std::shared_ptr, std::unique_ptr
|
||||
#include <mutex> // std::mutex, std::scoped_lock, std::unique_lock
|
||||
#include <queue> // std::queue
|
||||
#include <thread> // std::thread
|
||||
#include <type_traits> // std::common_type_t, std::decay_t, std::is_void_v, std::invoke_result_t
|
||||
#include <utility> // std::move, std::swap
|
||||
#include <vector> // std::vector
|
||||
|
||||
namespace BS
|
||||
{
|
||||
using concurrency_t = std::invoke_result_t<decltype(std::thread::hardware_concurrency)>;
|
||||
|
||||
// ============================================================================================= //
|
||||
// Begin class multi_future //
|
||||
|
||||
/**
|
||||
* @brief A helper class to facilitate waiting for and/or getting the results of multiple futures at once.
|
||||
*/
|
||||
template <typename T>
|
||||
class multi_future
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a multi_future object with the given number of futures.
|
||||
*
|
||||
* @param num_futures_ The desired number of futures to store.
|
||||
*/
|
||||
explicit multi_future(const size_t num_futures_ = 0) : f(num_futures_) {}
|
||||
|
||||
/**
|
||||
* @brief Get the results from all the futures stored in this multi_future object.
|
||||
*
|
||||
* @return A vector containing the results.
|
||||
*/
|
||||
std::vector<T> get()
|
||||
{
|
||||
std::vector<T> results(f.size());
|
||||
for (size_t i = 0; i < f.size(); ++i)
|
||||
results[i] = f[i].get();
|
||||
return results;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Wait for all the futures stored in this multi_future object.
|
||||
*/
|
||||
void wait() const
|
||||
{
|
||||
for (size_t i = 0; i < f.size(); ++i)
|
||||
f[i].wait();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief A vector to store the futures.
|
||||
*/
|
||||
std::vector<std::future<T>> f;
|
||||
};
|
||||
|
||||
// End class multi_future //
|
||||
// ============================================================================================= //
|
||||
|
||||
// ============================================================================================= //
|
||||
// Begin class thread_pool //
|
||||
|
||||
/**
|
||||
* @brief A fast, lightweight, and easy-to-use C++17 thread pool class.
|
||||
*/
|
||||
class thread_pool
|
||||
{
|
||||
public:
|
||||
// ============================
|
||||
// Constructors and destructors
|
||||
// ============================
|
||||
|
||||
/**
|
||||
* @brief Construct a new thread pool.
|
||||
*
|
||||
* @param thread_count_ The number of threads to use. The default value is the total number of hardware threads available, as reported by the implementation. This is usually determined by the number of cores in the CPU. If a core is hyperthreaded, it will count as two threads.
|
||||
*/
|
||||
explicit thread_pool(const concurrency_t thread_count_ = std::thread::hardware_concurrency()) : thread_count(thread_count_ ? thread_count_ : std::thread::hardware_concurrency()), threads(std::make_unique<std::thread[]>(thread_count_ ? thread_count_ : std::thread::hardware_concurrency()))
|
||||
{
|
||||
create_threads();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destruct the thread pool. Waits for all tasks to complete, then destroys all threads. Note that if the variable paused is set to true, then any tasks still in the queue will never be executed.
|
||||
*/
|
||||
~thread_pool()
|
||||
{
|
||||
wait_for_tasks();
|
||||
destroy_threads();
|
||||
}
|
||||
|
||||
// =======================
|
||||
// Public member functions
|
||||
// =======================
|
||||
|
||||
/**
|
||||
* @brief Get the number of tasks currently waiting in the queue to be executed by the threads.
|
||||
*
|
||||
* @return The number of queued tasks.
|
||||
*/
|
||||
size_t get_tasks_queued() const
|
||||
{
|
||||
const std::scoped_lock tasks_lock(tasks_mutex);
|
||||
return tasks.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the number of tasks currently being executed by the threads.
|
||||
*
|
||||
* @return The number of running tasks.
|
||||
*/
|
||||
size_t get_tasks_running() const
|
||||
{
|
||||
const std::scoped_lock tasks_lock(tasks_mutex);
|
||||
return tasks_total - tasks.size();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the total number of unfinished tasks: either still in the queue, or running in a thread. Note that get_tasks_total() == get_tasks_queued() + get_tasks_running().
|
||||
*
|
||||
* @return The total number of tasks.
|
||||
*/
|
||||
size_t get_tasks_total() const
|
||||
{
|
||||
return tasks_total;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the number of threads in the pool.
|
||||
*
|
||||
* @return The number of threads.
|
||||
*/
|
||||
concurrency_t get_thread_count() const
|
||||
{
|
||||
return thread_count;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Parallelize a loop by automatically splitting it into blocks and submitting each block separately to the queue.
|
||||
*
|
||||
* @tparam F The type of the function to loop through.
|
||||
* @tparam T1 The type of the first index in the loop. Should be a signed or unsigned integer.
|
||||
* @tparam T2 The type of the index after the last index in the loop. Should be a signed or unsigned integer. If T1 is not the same as T2, a common type will be automatically inferred.
|
||||
* @tparam T The common type of T1 and T2.
|
||||
* @tparam R The return value of the loop function F (can be void).
|
||||
* @param first_index The first index in the loop.
|
||||
* @param index_after_last The index after the last index in the loop. The loop will iterate from first_index to (index_after_last - 1) inclusive. In other words, it will be equivalent to "for (T i = first_index; i < index_after_last; ++i)". Note that if first_index == index_after_last, no blocks will be submitted.
|
||||
* @param loop The function to loop through. Will be called once per block. Should take exactly two arguments: the first index in the block and the index after the last index in the block. loop(start, end) should typically involve a loop of the form "for (T i = start; i < end; ++i)".
|
||||
* @param num_blocks The maximum number of blocks to split the loop into. The default is to use the number of threads in the pool.
|
||||
* @return A multi_future object that can be used to wait for all the blocks to finish. If the loop function returns a value, the multi_future object can be used to obtain the values returned by each block.
|
||||
*/
|
||||
template <typename F, typename T1, typename T2, typename T = std::common_type_t<T1, T2>, typename R = std::invoke_result_t<std::decay_t<F>, T, T>>
|
||||
multi_future<R> parallelize_loop(const T1& first_index, const T2& index_after_last, const F& loop, size_t num_blocks = 0)
|
||||
{
|
||||
T first_index_T = static_cast<T>(first_index);
|
||||
T index_after_last_T = static_cast<T>(index_after_last);
|
||||
if (first_index_T == index_after_last_T)
|
||||
return multi_future<R>();
|
||||
if (index_after_last_T < first_index_T)
|
||||
std::swap(index_after_last_T, first_index_T);
|
||||
if (num_blocks == 0)
|
||||
num_blocks = thread_count;
|
||||
const size_t total_size = static_cast<size_t>(index_after_last_T - first_index_T);
|
||||
size_t block_size = static_cast<size_t>(total_size / num_blocks);
|
||||
if (block_size == 0)
|
||||
{
|
||||
block_size = 1;
|
||||
num_blocks = total_size > 1 ? total_size : 1;
|
||||
}
|
||||
multi_future<R> mf(num_blocks);
|
||||
for (size_t i = 0; i < num_blocks; ++i)
|
||||
{
|
||||
const T start = (static_cast<T>(i * block_size) + first_index_T);
|
||||
const T end = (i == num_blocks - 1) ? index_after_last_T : (static_cast<T>((i + 1) * block_size) + first_index_T);
|
||||
mf.f[i] = submit(loop, start, end);
|
||||
}
|
||||
return mf;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Push a function with zero or more arguments, but no return value, into the task queue.
|
||||
*
|
||||
* @tparam F The type of the function.
|
||||
* @tparam A The types of the arguments.
|
||||
* @param task The function to push.
|
||||
* @param args The arguments to pass to the function.
|
||||
*/
|
||||
template <typename F, typename... A>
|
||||
void push_task(const F& task, const A&... args)
|
||||
{
|
||||
{
|
||||
const std::scoped_lock tasks_lock(tasks_mutex);
|
||||
if constexpr (sizeof...(args) == 0)
|
||||
tasks.push(std::function<void()>(task));
|
||||
else
|
||||
tasks.push(std::function<void()>([task, args...] { task(args...); }));
|
||||
}
|
||||
++tasks_total;
|
||||
task_available_cv.notify_one();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Reset the number of threads in the pool. Waits for all currently running tasks to be completed, then destroys all threads in the pool and creates a new thread pool with the new number of threads. Any tasks that were waiting in the queue before the pool was reset will then be executed by the new threads. If the pool was paused before resetting it, the new pool will be paused as well.
|
||||
*
|
||||
* @param thread_count_ The number of threads to use. The default value is the total number of hardware threads available, as reported by the implementation. This is usually determined by the number of cores in the CPU. If a core is hyperthreaded, it will count as two threads.
|
||||
*/
|
||||
void reset(const concurrency_t thread_count_ = std::thread::hardware_concurrency())
|
||||
{
|
||||
const bool was_paused = paused;
|
||||
paused = true;
|
||||
wait_for_tasks();
|
||||
destroy_threads();
|
||||
thread_count = thread_count_ ? thread_count_ : std::thread::hardware_concurrency();
|
||||
threads = std::make_unique<std::thread[]>(thread_count);
|
||||
paused = was_paused;
|
||||
create_threads();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Submit a function with zero or more arguments into the task queue. If the function has a return value, get a future for the eventual returned value. If the function has no return value, get an std::future<void> which can be used to wait until the task finishes.
|
||||
*
|
||||
* @tparam F The type of the function.
|
||||
* @tparam A The types of the zero or more arguments to pass to the function.
|
||||
* @tparam R The return type of the function (can be void).
|
||||
* @param task The function to submit.
|
||||
* @param args The zero or more arguments to pass to the function.
|
||||
* @return A future to be used later to wait for the function to finish executing and/or obtain its returned value if it has one.
|
||||
*/
|
||||
template <typename F, typename... A, typename R = std::invoke_result_t<std::decay_t<F>, std::decay_t<A>...>>
|
||||
std::future<R> submit(const F& task, const A&... args)
|
||||
{
|
||||
std::shared_ptr<std::promise<R>> task_promise = std::make_shared<std::promise<R>>();
|
||||
push_task(
|
||||
[task, args..., task_promise]
|
||||
{
|
||||
try
|
||||
{
|
||||
if constexpr (std::is_void_v<R>)
|
||||
{
|
||||
task(args...);
|
||||
task_promise->set_value();
|
||||
}
|
||||
else
|
||||
{
|
||||
task_promise->set_value(task(args...));
|
||||
}
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
try
|
||||
{
|
||||
task_promise->set_exception(std::current_exception());
|
||||
}
|
||||
catch (...)
|
||||
{
|
||||
}
|
||||
}
|
||||
});
|
||||
return task_promise->get_future();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Wait for tasks to be completed. Normally, this function waits for all tasks, both those that are currently running in the threads and those that are still waiting in the queue. However, if the pool is paused, this function only waits for the currently running tasks (otherwise it would wait forever). Note: To wait for just one specific task, use submit() instead, and call the wait() member function of the generated future.
|
||||
*/
|
||||
void wait_for_tasks()
|
||||
{
|
||||
waiting = true;
|
||||
std::unique_lock<std::mutex> tasks_lock(tasks_mutex);
|
||||
task_done_cv.wait(tasks_lock, [this] { return (tasks_total == (paused ? tasks.size() : 0)); });
|
||||
waiting = false;
|
||||
}
|
||||
|
||||
// ===========
|
||||
// Public data
|
||||
// ===========
|
||||
|
||||
/**
|
||||
* @brief An atomic variable indicating whether the workers should pause. When set to true, the workers temporarily stop retrieving new tasks out of the queue, although any tasks already executed will keep running until they are finished. Set to false again to resume retrieving tasks.
|
||||
*/
|
||||
std::atomic<bool> paused = false;
|
||||
|
||||
private:
|
||||
// ========================
|
||||
// Private member functions
|
||||
// ========================
|
||||
|
||||
/**
|
||||
* @brief Create the threads in the pool and assign a worker to each thread.
|
||||
*/
|
||||
void create_threads()
|
||||
{
|
||||
running = true;
|
||||
for (concurrency_t i = 0; i < thread_count; ++i)
|
||||
{
|
||||
threads[i] = std::thread(&thread_pool::worker, this);
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Destroy the threads in the pool.
|
||||
*/
|
||||
void destroy_threads()
|
||||
{
|
||||
running = false;
|
||||
task_available_cv.notify_all();
|
||||
for (concurrency_t i = 0; i < thread_count; ++i)
|
||||
{
|
||||
threads[i].join();
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief A worker function to be assigned to each thread in the pool. Waits until it is notified by push_task() that a task is available, and then retrieves the task from the queue and executes it. Once the task finishes, the worker notifies wait_for_tasks() in case it is waiting.
|
||||
*/
|
||||
void worker()
|
||||
{
|
||||
while (running)
|
||||
{
|
||||
std::function<void()> task;
|
||||
std::unique_lock<std::mutex> tasks_lock(tasks_mutex);
|
||||
task_available_cv.wait(tasks_lock, [&] { return !tasks.empty() || !running; });
|
||||
if (running && !paused)
|
||||
{
|
||||
task = std::move(tasks.front());
|
||||
tasks.pop();
|
||||
tasks_lock.unlock();
|
||||
task();
|
||||
--tasks_total;
|
||||
if (waiting)
|
||||
task_done_cv.notify_one();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ============
|
||||
// Private data
|
||||
// ============
|
||||
|
||||
/**
|
||||
* @brief An atomic variable indicating to the workers to keep running. When set to false, the workers permanently stop working.
|
||||
*/
|
||||
std::atomic<bool> running = false;
|
||||
|
||||
/**
|
||||
* @brief A condition variable used to notify worker() that a new task has become available.
|
||||
*/
|
||||
std::condition_variable task_available_cv = {};
|
||||
|
||||
/**
|
||||
* @brief A condition variable used to notify wait_for_tasks() that a tasks is done.
|
||||
*/
|
||||
std::condition_variable task_done_cv = {};
|
||||
|
||||
/**
|
||||
* @brief A queue of tasks to be executed by the threads.
|
||||
*/
|
||||
std::queue<std::function<void()>> tasks = {};
|
||||
|
||||
/**
|
||||
* @brief An atomic variable to keep track of the total number of unfinished tasks - either still in the queue, or running in a thread.
|
||||
*/
|
||||
std::atomic<size_t> tasks_total = 0;
|
||||
|
||||
/**
|
||||
* @brief A mutex to synchronize access to the task queue by different threads.
|
||||
*/
|
||||
mutable std::mutex tasks_mutex = {};
|
||||
|
||||
/**
|
||||
* @brief The number of threads in the pool.
|
||||
*/
|
||||
concurrency_t thread_count = 0;
|
||||
|
||||
/**
|
||||
* @brief A smart pointer to manage the memory allocated for the threads.
|
||||
*/
|
||||
std::unique_ptr<std::thread[]> threads = nullptr;
|
||||
|
||||
/**
|
||||
* @brief An atomic variable indicating that wait_for_tasks() is active and expects to be notified whenever a task is done.
|
||||
*/
|
||||
std::atomic<bool> waiting = false;
|
||||
};
|
||||
|
||||
// End class thread_pool //
|
||||
// ============================================================================================= //
|
||||
|
||||
// ============================================================================================= //
|
||||
// Begin class synced_stream //
|
||||
|
||||
/**
|
||||
* @brief A helper class to synchronize printing to an output stream by different threads.
|
||||
*/
|
||||
class synced_stream
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief Construct a new synced stream.
|
||||
*
|
||||
* @param out_stream_ The output stream to print to. The default value is std::cout.
|
||||
*/
|
||||
explicit synced_stream(std::ostream& out_stream_ = std::cout) : out_stream(out_stream_) {};
|
||||
|
||||
/**
|
||||
* @brief Print any number of items into the output stream. Ensures that no other threads print to this stream simultaneously, as long as they all exclusively use the same synced_stream object to print.
|
||||
*
|
||||
* @tparam T The types of the items
|
||||
* @param items The items to print.
|
||||
*/
|
||||
template <typename... T>
|
||||
void print(const T&... items)
|
||||
{
|
||||
const std::scoped_lock lock(stream_mutex);
|
||||
(out_stream << ... << items);
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Print any number of items into the output stream, followed by a newline character. Ensures that no other threads print to this stream simultaneously, as long as they all exclusively use the same synced_stream object to print.
|
||||
*
|
||||
* @tparam T The types of the items
|
||||
* @param items The items to print.
|
||||
*/
|
||||
template <typename... T>
|
||||
void println(const T&... items)
|
||||
{
|
||||
print(items..., '\n');
|
||||
}
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief The output stream to print to.
|
||||
*/
|
||||
std::ostream& out_stream;
|
||||
|
||||
/**
|
||||
* @brief A mutex to synchronize printing.
|
||||
*/
|
||||
mutable std::mutex stream_mutex = {};
|
||||
};
|
||||
|
||||
// End class synced_stream //
|
||||
// ============================================================================================= //
|
||||
|
||||
// ============================================================================================= //
|
||||
// Begin class timer //
|
||||
|
||||
/**
|
||||
* @brief A helper class to measure execution time for benchmarking purposes.
|
||||
*/
|
||||
class timer
|
||||
{
|
||||
public:
|
||||
/**
|
||||
* @brief Start (or restart) measuring time.
|
||||
*/
|
||||
void start()
|
||||
{
|
||||
start_time = std::chrono::steady_clock::now();
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Stop measuring time and store the elapsed time since start().
|
||||
*/
|
||||
void stop()
|
||||
{
|
||||
elapsed_time = std::chrono::steady_clock::now() - start_time;
|
||||
}
|
||||
|
||||
/**
|
||||
* @brief Get the number of milliseconds that have elapsed between start() and stop().
|
||||
*
|
||||
* @return The number of milliseconds.
|
||||
*/
|
||||
std::chrono::milliseconds::rep ms() const
|
||||
{
|
||||
return (std::chrono::duration_cast<std::chrono::milliseconds>(elapsed_time)).count();
|
||||
}
|
||||
|
||||
private:
|
||||
/**
|
||||
* @brief The time point when measuring started.
|
||||
*/
|
||||
std::chrono::time_point<std::chrono::steady_clock> start_time = std::chrono::steady_clock::now();
|
||||
|
||||
/**
|
||||
* @brief The duration that has elapsed between start() and stop().
|
||||
*/
|
||||
std::chrono::duration<double> elapsed_time = std::chrono::duration<double>::zero();
|
||||
};
|
||||
|
||||
// End class timer //
|
||||
// ============================================================================================= //
|
||||
|
||||
} // namespace BS
|
Loading…
Reference in New Issue