Intel(R) Threading Building Blocks Doxygen Documentation version 4.2.3
governor.cpp
Go to the documentation of this file.
1/*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#include <stdio.h>
18#include <stdlib.h>
19#include "governor.h"
20#include "tbb_main.h"
21#include "scheduler.h"
22#include "market.h"
23#include "arena.h"
24
26
27#include "dynamic_link.h"
28
29namespace tbb {
30namespace internal {
31
32//------------------------------------------------------------------------
33// governor
34//------------------------------------------------------------------------
35
36#if __TBB_SURVIVE_THREAD_SWITCH
37// Support for interoperability with Intel(R) Cilk(TM) Plus.
38
39#if _WIN32
40#define CILKLIB_NAME "cilkrts20.dll"
41#else
42#define CILKLIB_NAME "libcilkrts.so"
43#endif
44
46static __cilk_tbb_retcode (*watch_stack_handler)(struct __cilk_tbb_unwatch_thunk* u,
48
50static const dynamic_link_descriptor CilkLinkTable[] = {
51 DLD_NOWEAK(__cilkrts_watch_stack, watch_stack_handler)
52};
53
54static atomic<do_once_state> cilkrts_load_state;
55
56bool initialize_cilk_interop() {
57 // Pinning can fail. This is a normal situation, and means that the current
58 // thread does not use cilkrts and consequently does not need interop.
59 return dynamic_link( CILKLIB_NAME, CilkLinkTable, 1, /*handle=*/0, DYNAMIC_LINK_GLOBAL );
60}
61#endif /* __TBB_SURVIVE_THREAD_SWITCH */
62
63namespace rml {
64 tbb_server* make_private_server( tbb_client& client );
65}
66
68#if USE_PTHREAD
69 int status = theTLS.create(auto_terminate);
70#else
71 int status = theTLS.create();
72#endif
73 if( status )
74 handle_perror(status, "TBB failed to initialize task scheduler TLS\n");
77}
78
80 theRMLServerFactory.close();
82#if TBB_USE_ASSERT
84 runtime_warning( "TBB is unloaded while tbb::task_scheduler_init object is alive?" );
85#endif
86 int status = theTLS.destroy();
87 if( status )
88 runtime_warning("failed to destroy task scheduler TLS: %s", strerror(status));
90}
91
92rml::tbb_server* governor::create_rml_server ( rml::tbb_client& client ) {
93 rml::tbb_server* server = NULL;
94 if( !UsePrivateRML ) {
95 ::rml::factory::status_type status = theRMLServerFactory.make_server( server, client );
96 if( status != ::rml::factory::st_success ) {
97 UsePrivateRML = true;
98 runtime_warning( "rml::tbb_factory::make_server failed with status %x, falling back on private rml", status );
99 }
100 }
101 if ( !server ) {
103 server = rml::make_private_server( client );
104 }
105 __TBB_ASSERT( server, "Failed to create RML server" );
106 return server;
107}
108
109
111 __TBB_ASSERT( (uintptr_t(s)&1) == 0, "Bad pointer to the scheduler" );
112 // LSB marks the scheduler initialized with arena
113 return uintptr_t(s) | uintptr_t((s && (s->my_arena || s->is_worker()))? 1 : 0);
114}
115
118}
119
121 return theTLS.get() == tls_value_of(s);
122}
123
125 __TBB_ASSERT( is_set(NULL) && s, NULL );
127#if __TBB_SURVIVE_THREAD_SWITCH
128 if( watch_stack_handler ) {
130 o.routine = &stack_op_handler;
131 o.data = s;
132 if( (*watch_stack_handler)(&s->my_cilk_unwatch_thunk, o) ) {
133 // Failed to register with cilkrts, make sure we are clean
134 s->my_cilk_unwatch_thunk.routine = NULL;
135 }
136#if TBB_USE_ASSERT
137 else
138 s->my_cilk_state = generic_scheduler::cs_running;
139#endif /* TBB_USE_ASSERT */
140 }
141#endif /* __TBB_SURVIVE_THREAD_SWITCH */
142 __TBB_ASSERT( is_set(s), NULL );
143}
144
147 __TBB_ASSERT( is_set(s), "attempt to unregister a wrong scheduler instance" );
148 assume_scheduler(NULL);
149#if __TBB_SURVIVE_THREAD_SWITCH
150 __cilk_tbb_unwatch_thunk &ut = s->my_cilk_unwatch_thunk;
151 if ( ut.routine )
152 (*ut.routine)(ut.data);
153#endif /* __TBB_SURVIVE_THREAD_SWITCH */
154}
155
159#if __TBB_SURVIVE_THREAD_SWITCH
160 atomic_do_once( &initialize_cilk_interop, cilkrts_load_state );
161#endif /* __TBB_SURVIVE_THREAD_SWITCH */
162}
163
166 __TBB_ASSERT( is_set(NULL), "TLS contains a scheduler?" );
167 generic_scheduler* s = generic_scheduler::create_master( NULL ); // without arena
168 s->my_auto_initialized = true;
169 return s;
170}
171
172generic_scheduler* governor::init_scheduler( int num_threads, stack_size_type stack_size, bool auto_init ) {
174 if ( uintptr_t v = theTLS.get() ) {
176 if ( (v&1) == 0 ) { // TLS holds scheduler instance without arena
177 __TBB_ASSERT( s->my_ref_count == 1, "weakly initialized scheduler must have refcount equal to 1" );
178 __TBB_ASSERT( !s->my_arena, "weakly initialized scheduler must have no arena" );
179 __TBB_ASSERT( s->my_auto_initialized, "weakly initialized scheduler is supposed to be auto-initialized" );
180 s->attach_arena( market::create_arena( default_num_threads(), 1, 0 ), 0, /*is_master*/true );
181 __TBB_ASSERT( s->my_arena_index == 0, "Master thread must occupy the first slot in its arena" );
182 s->my_arena_slot->my_scheduler = s;
183#if __TBB_TASK_GROUP_CONTEXT
184 s->my_arena->my_default_ctx = s->default_context(); // it also transfers implied ownership
185#endif
186 // Mark the scheduler as fully initialized
188 }
189 // Increment refcount only for explicit instances of task_scheduler_init.
190 if ( !auto_init ) s->my_ref_count += 1;
191 __TBB_ASSERT( s->my_arena, "scheduler is not initialized fully" );
192 return s;
193 }
194 // Create new scheduler instance with arena
195 if( num_threads == task_scheduler_init::automatic )
196 num_threads = default_num_threads();
197 arena *a = market::create_arena( num_threads, 1, stack_size );
199 __TBB_ASSERT(s, "Somehow a local scheduler creation for a master thread failed");
200 __TBB_ASSERT( is_set(s), NULL );
201 s->my_auto_initialized = auto_init;
202 return s;
203}
204
206 bool ok = false;
207 __TBB_ASSERT( is_set(s), "Attempt to terminate non-local scheduler instance" );
208 if (0 == --(s->my_ref_count)) {
209 ok = s->cleanup_master( blocking );
210 __TBB_ASSERT( is_set(NULL), "cleanup_master has not cleared its TLS slot" );
211 }
212 return ok;
213}
214
216 generic_scheduler* s = tls_scheduler_of( uintptr_t(arg) ); // arg is equivalent to theTLS.get()
217 if( s && s->my_auto_initialized ) {
218 if( !--(s->my_ref_count) ) {
219 // If the TLS slot is already cleared by OS or underlying concurrency
220 // runtime, restore its value.
221 if( !is_set(s) )
223 s->cleanup_master( /*blocking_terminate=*/false );
224 __TBB_ASSERT( is_set(NULL), "cleanup_master has not cleared its TLS slot" );
225 }
226 }
227}
228
230 if ( UsePrivateRML )
231 PrintExtraVersionInfo( "RML", "private" );
232 else {
233 PrintExtraVersionInfo( "RML", "shared" );
234 theRMLServerFactory.call_with_server_info( PrintRMLVersionInfo, (void*)"" );
235 }
236#if __TBB_SURVIVE_THREAD_SWITCH
237 if( watch_stack_handler )
238 PrintExtraVersionInfo( "CILK", CILKLIB_NAME );
239#endif /* __TBB_SURVIVE_THREAD_SWITCH */
240}
241
243 ::rml::factory::status_type res = theRMLServerFactory.open();
244 UsePrivateRML = res != ::rml::factory::st_success;
245}
246
247#if __TBB_SURVIVE_THREAD_SWITCH
248__cilk_tbb_retcode governor::stack_op_handler( __cilk_tbb_stack_op op, void* data ) {
249 __TBB_ASSERT(data,NULL);
250 generic_scheduler* s = static_cast<generic_scheduler*>(data);
251#if TBB_USE_ASSERT
252 void* current = local_scheduler_if_initialized();
253#if _WIN32||_WIN64
254 uintptr_t thread_id = GetCurrentThreadId();
255#else
256 uintptr_t thread_id = uintptr_t(pthread_self());
257#endif
258#endif /* TBB_USE_ASSERT */
259 switch( op ) {
261 __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo ||
262 current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid adoption" );
263#if TBB_USE_ASSERT
264 if( current==s )
265 runtime_warning( "redundant adoption of %p by thread %p\n", s, (void*)thread_id );
266 s->my_cilk_state = generic_scheduler::cs_running;
267#endif /* TBB_USE_ASSERT */
269 break;
270 }
272 __TBB_ASSERT( current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid orphaning" );
273#if TBB_USE_ASSERT
274 s->my_cilk_state = generic_scheduler::cs_limbo;
275#endif /* TBB_USE_ASSERT */
276 assume_scheduler(NULL);
277 break;
278 }
280 __TBB_ASSERT( !current && s->my_cilk_state==generic_scheduler::cs_limbo ||
281 current==s && s->my_cilk_state==generic_scheduler::cs_running, "invalid release" );
282#if TBB_USE_ASSERT
283 s->my_cilk_state = generic_scheduler::cs_freed;
284#endif /* TBB_USE_ASSERT */
285 s->my_cilk_unwatch_thunk.routine = NULL;
286 auto_terminate( s );
287 break;
288 }
289 default:
290 __TBB_ASSERT(0, "invalid op");
291 }
292 return 0;
293}
294#endif /* __TBB_SURVIVE_THREAD_SWITCH */
295
296#if __TBB_NUMA_SUPPORT
297
298#if __TBB_WEAK_SYMBOLS_PRESENT
299#pragma weak initialize_numa_topology
300#pragma weak allocate_binding_handler
301#pragma weak deallocate_binding_handler
302#pragma weak bind_to_node
303#pragma weak restore_affinity
304
305extern "C" {
307 size_t groups_num, int& nodes_count, int*& indexes_list, int*& concurrency_list );
308
309binding_handler* allocate_binding_handler( int slot_num );
310void deallocate_binding_handler( binding_handler* handler_ptr );
311
312void bind_to_node( binding_handler* handler_ptr, int slot_num, int numa_id );
313void restore_affinity( binding_handler* handler_ptr, int slot_num );
314}
315#endif /* __TBB_WEAK_SYMBOLS_PRESENT */
316
317// Handlers for communication with TBBbind
318#if _WIN32 || _WIN64 || __linux__
319static void (*initialize_numa_topology_ptr)(
320 size_t groups_num, int& nodes_count, int*& indexes_list, int*& concurrency_list ) = NULL;
321#endif /* _WIN32 || _WIN64 || __linux__ */
322
323static binding_handler* (*allocate_binding_handler_ptr)( int slot_num ) = NULL;
324static void (*deallocate_binding_handler_ptr)( binding_handler* handler_ptr ) = NULL;
325
326static void (*bind_to_node_ptr)( binding_handler* handler_ptr, int slot_num, int numa_id ) = NULL;
327static void (*restore_affinity_ptr)( binding_handler* handler_ptr, int slot_num ) = NULL;
328
329#if _WIN32 || _WIN64 || __linux__
330// Table describing how to link the handlers.
331static const dynamic_link_descriptor TbbBindLinkTable[] = {
332 DLD(initialize_numa_topology, initialize_numa_topology_ptr),
333 DLD(allocate_binding_handler, allocate_binding_handler_ptr),
334 DLD(deallocate_binding_handler, deallocate_binding_handler_ptr),
335 DLD(bind_to_node, bind_to_node_ptr),
336 DLD(restore_affinity, restore_affinity_ptr)
337};
338
339static const unsigned LinkTableSize = 5;
340
341#if TBB_USE_DEBUG
342#define DEBUG_SUFFIX "_debug"
343#else
344#define DEBUG_SUFFIX
345#endif /* TBB_USE_DEBUG */
346
347#if _WIN32 || _WIN64
348#define TBBBIND_NAME "tbbbind" DEBUG_SUFFIX ".dll"
349#elif __linux__
350#define TBBBIND_NAME "libtbbbind" DEBUG_SUFFIX __TBB_STRING(.so.TBB_COMPATIBLE_INTERFACE_VERSION)
351#endif /* __linux__ */
352#endif /* _WIN32 || _WIN64 || __linux__ */
353
354// Stubs that will be used if TBBbind library is unavailable.
355static binding_handler* dummy_allocate_binding_handler ( int ) { return NULL; }
356static void dummy_deallocate_binding_handler ( binding_handler* ) { }
357static void dummy_bind_to_node ( binding_handler*, int, int ) { }
358static void dummy_restore_affinity ( binding_handler*, int ) { }
359
360// Representation of NUMA topology information on the TBB side.
361// NUMA topology may be initialized by third-party component (e.g. hwloc)
362// or just filled by default stubs (1 NUMA node with 0 index and
363// default_num_threads value as default_concurrency).
364namespace numa_topology {
365namespace {
366int numa_nodes_count = 0;
367int* numa_indexes = NULL;
368int* default_concurrency_list = NULL;
369static tbb::atomic<do_once_state> numa_topology_init_state;
370} // internal namespace
371
372// Tries to load TBBbind library API, if success, gets NUMA topology information from it,
373// in another case, fills NUMA topology by stubs.
374// TODO: Add TBBbind loading status if TBB_VERSION is set.
375void initialization_impl() {
377
378#if _WIN32 || _WIN64 || __linux__
379 bool load_tbbbind = true;
380#if _WIN32 && !_WIN64
381 // For 32-bit Windows applications, process affinity masks can only support up to 32 logical CPUs.
382 SYSTEM_INFO si;
383 GetNativeSystemInfo(&si);
384 load_tbbbind = si.dwNumberOfProcessors <= 32;
385#endif /* _WIN32 && !_WIN64 */
386
387 if (load_tbbbind && dynamic_link(TBBBIND_NAME, TbbBindLinkTable, LinkTableSize)) {
388 int number_of_groups = 1;
389#if _WIN32 || _WIN64
390 number_of_groups = NumberOfProcessorGroups();
391#endif /* _WIN32 || _WIN64 */
392 initialize_numa_topology_ptr(
393 number_of_groups, numa_nodes_count, numa_indexes, default_concurrency_list);
394
395 if (numa_nodes_count==1 && numa_indexes[0] >= 0) {
396 __TBB_ASSERT(default_concurrency_list[numa_indexes[0]] == (int)governor::default_num_threads(),
397 "default_concurrency() should be equal to governor::default_num_threads() on single"
398 "NUMA node systems.");
399 }
400 return;
401 }
402#endif /* _WIN32 || _WIN64 || __linux__ */
403
404 static int dummy_index = -1;
405 static int dummy_concurrency = governor::default_num_threads();
406
407 numa_nodes_count = 1;
408 numa_indexes = &dummy_index;
409 default_concurrency_list = &dummy_concurrency;
410
411 allocate_binding_handler_ptr = dummy_allocate_binding_handler;
412 deallocate_binding_handler_ptr = dummy_deallocate_binding_handler;
413
414 bind_to_node_ptr = dummy_bind_to_node;
415 restore_affinity_ptr = dummy_restore_affinity;
416}
417
418void initialize() {
419 atomic_do_once(initialization_impl, numa_topology_init_state);
420}
421
422unsigned nodes_count() {
423 initialize();
424 return numa_nodes_count;
425}
426
427void fill( int* indexes_array ) {
428 initialize();
429 for ( int i = 0; i < numa_nodes_count; i++ ) {
430 indexes_array[i] = numa_indexes[i];
431 }
432}
433
434int default_concurrency( int node_id ) {
435 if (node_id >= 0) {
436 initialize();
437 return default_concurrency_list[node_id];
438 }
440}
441
442} // namespace numa_topology
443
444binding_handler* construct_binding_handler(int slot_num) {
445 __TBB_ASSERT(allocate_binding_handler_ptr, "tbbbind loading was not perfromed");
446 return allocate_binding_handler_ptr(slot_num);
447}
448
449void destroy_binding_handler(binding_handler* handler_ptr) {
450 __TBB_ASSERT(deallocate_binding_handler_ptr, "tbbbind loading was not perfromed");
451 deallocate_binding_handler_ptr(handler_ptr);
452}
453
454void bind_thread_to_node(binding_handler* handler_ptr, int slot_num , int numa_id) {
455 __TBB_ASSERT(slot_num >= 0, "Negative thread index");
456 __TBB_ASSERT(bind_to_node_ptr, "tbbbind loading was not perfromed");
457 bind_to_node_ptr(handler_ptr, slot_num, numa_id);
458}
459
460void restore_affinity_mask(binding_handler* handler_ptr, int slot_num) {
461 __TBB_ASSERT(slot_num >= 0, "Negative thread index");
462 __TBB_ASSERT(restore_affinity_ptr, "tbbbind loading was not perfromed");
463 restore_affinity_ptr(handler_ptr, slot_num);
464}
465
466#endif /* __TBB_NUMA_SUPPORT */
467
468} // namespace internal
469
470//------------------------------------------------------------------------
471// task_scheduler_init
472//------------------------------------------------------------------------
473
474using namespace internal;
475
477void task_scheduler_init::initialize( int number_of_threads ) {
478 initialize( number_of_threads, 0 );
479}
480
481void task_scheduler_init::initialize( int number_of_threads, stack_size_type thread_stack_size ) {
482#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
483 uintptr_t new_mode = thread_stack_size & propagation_mode_mask;
484#endif
485 thread_stack_size &= ~(stack_size_type)propagation_mode_mask;
486 if( number_of_threads!=deferred ) {
487 __TBB_ASSERT_RELEASE( !my_scheduler, "task_scheduler_init already initialized" );
488 __TBB_ASSERT_RELEASE( number_of_threads==automatic || number_of_threads > 0,
489 "number_of_threads for task_scheduler_init must be automatic or positive" );
490 internal::generic_scheduler *s = governor::init_scheduler( number_of_threads, thread_stack_size, /*auto_init=*/false );
491#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
492 if ( s->master_outermost_level() ) {
493 uintptr_t &vt = s->default_context()->my_version_and_traits;
494 uintptr_t prev_mode = vt & task_group_context::exact_exception ? propagation_mode_exact : 0;
496 : new_mode & propagation_mode_captured ? vt & ~task_group_context::exact_exception : vt;
497 // Use least significant bit of the scheduler pointer to store previous mode.
498 // This is necessary when components compiled with different compilers and/or
499 // TBB versions initialize the
500 my_scheduler = static_cast<scheduler*>((generic_scheduler*)((uintptr_t)s | prev_mode));
501 }
502 else
503#endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
504 my_scheduler = s;
505 } else {
506 __TBB_ASSERT_RELEASE( !thread_stack_size, "deferred initialization ignores stack size setting" );
507 }
508}
509
511#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
512 uintptr_t prev_mode = (uintptr_t)my_scheduler & propagation_mode_exact;
513 my_scheduler = (scheduler*)((uintptr_t)my_scheduler & ~(uintptr_t)propagation_mode_exact);
514#endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
516 my_scheduler = NULL;
517 __TBB_ASSERT_RELEASE( s, "task_scheduler_init::terminate without corresponding task_scheduler_init::initialize()");
518#if __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS
519 if ( s->master_outermost_level() ) {
520 uintptr_t &vt = s->default_context()->my_version_and_traits;
522 : vt & ~task_group_context::exact_exception;
523 }
524#endif /* __TBB_TASK_GROUP_CONTEXT && TBB_USE_EXCEPTIONS */
525 return governor::terminate_scheduler(s, blocking);
526}
527
529 internal_terminate(/*blocking_terminate=*/false);
530}
531
532#if __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
533bool task_scheduler_init::internal_blocking_terminate( bool throwing ) {
534 bool ok = internal_terminate( /*blocking_terminate=*/true );
535#if TBB_USE_EXCEPTIONS
536 if( throwing && !ok )
537 throw_exception( eid_blocking_thread_join_impossible );
538#else
539 suppress_unused_warning( throwing );
540#endif
541 return ok;
542}
543#endif // __TBB_SUPPORTS_WORKERS_WAITING_IN_TERMINATE
544
547}
548
549} // namespace tbb
#define __TBB_ASSERT_RELEASE(predicate, message)
Definition: tbb_stddef.h:134
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
const int DYNAMIC_LINK_GLOBAL
Definition: dynamic_link.h:77
#define DLD(s, h)
The helper to construct dynamic_link_descriptor structure.
Definition: dynamic_link.h:56
#define DLD_NOWEAK(s, h)
Definition: dynamic_link.h:57
__cilk_tbb_stack_op
@ CILK_TBB_STACK_RELEASE
@ CILK_TBB_STACK_ADOPT
@ CILK_TBB_STACK_ORPHAN
int __cilk_tbb_retcode
CILK_EXPORT __cilk_tbb_retcode __cilkrts_watch_stack(struct __cilk_tbb_unwatch_thunk *u, struct __cilk_tbb_stack_op_thunk o)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void * data
void const char const char int ITT_FORMAT __itt_group_sync s
OPEN_INTERNAL_NAMESPACE bool dynamic_link(const char *, const dynamic_link_descriptor *, size_t, dynamic_link_handle *handle, int)
void dynamic_unlink_all()
The graph class.
std::size_t stack_size_type
bool gcc_rethrow_exception_broken()
Definition: tbb_misc.cpp:198
void __TBB_EXPORTED_FUNC runtime_warning(const char *format,...)
Report a runtime warning.
void atomic_do_once(const F &initializer, atomic< do_once_state > &state)
One-time initialization function.
Definition: tbb_misc.h:213
void bind_to_node(binding_handler *handler_ptr, int slot_num, int numa_id)
Definition: tbb_bind.cpp:283
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:398
void __TBB_EXPORTED_FUNC handle_perror(int error_code, const char *aux_info)
Throws std::runtime_error with what() returning error_code description prefixed with aux_info.
Definition: tbb_misc.cpp:87
void destroy_process_mask()
Definition: tbb_misc.h:266
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
void DoOneTimeInitializations()
Performs thread-safe lazy one-time general TBB initialization.
Definition: tbb_main.cpp:215
void initialize_numa_topology(size_t groups_num, int &nodes_count, int *&indexes_list, int *&concurrency_list)
Definition: tbb_bind.cpp:267
void PrintExtraVersionInfo(const char *category, const char *format,...)
Prints arbitrary extra TBB version information on stderr.
Definition: tbb_misc.cpp:211
void PrintRMLVersionInfo(void *arg, const char *server_info)
A callback routine to print RML version information on stderr.
Definition: tbb_misc.cpp:222
bool cpu_has_speculation()
check for transaction support.
Definition: tbb_misc.cpp:230
void restore_affinity(binding_handler *handler_ptr, int slot_num)
Definition: tbb_bind.cpp:290
void deallocate_binding_handler(binding_handler *handler_ptr)
Definition: tbb_bind.cpp:278
binding_handler * allocate_binding_handler(int slot_num)
Definition: tbb_bind.cpp:273
tbb_server * make_private_server(tbb_client &client)
Factory method called from task.cpp to create a private_server.
Used to form groups of tasks.
Definition: task.h:358
void __TBB_EXPORTED_METHOD initialize(int number_of_threads=automatic)
Ensure that scheduler exists for this thread.
Definition: governor.cpp:477
bool internal_terminate(bool blocking)
Definition: governor.cpp:510
static int __TBB_EXPORTED_FUNC default_num_threads()
Returns the number of threads TBB scheduler would create if initialized by default.
Definition: governor.cpp:545
static const int automatic
Typedef for number of threads that is automatic.
internal::scheduler * my_scheduler
static const int deferred
Argument to initialize() or constructor that causes initialization to be deferred.
void __TBB_EXPORTED_METHOD terminate()
Inverse of method initialize.
Definition: governor.cpp:528
__cilk_tbb_pfn_stack_op routine
__cilk_tbb_pfn_unwatch_stacks routine
Association between a handler name and location of pointer to it.
Definition: dynamic_link.h:60
static bool terminate_scheduler(generic_scheduler *s, bool blocking)
Processes scheduler termination request (possibly nested) in a master thread.
Definition: governor.cpp:205
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
Definition: governor.cpp:120
static void release_resources()
Destroy the thread-local storage key and deinitialize RML.
Definition: governor.cpp:79
static bool is_rethrow_broken
Definition: governor.h:68
static generic_scheduler * local_scheduler_if_initialized()
Definition: governor.h:139
static generic_scheduler * init_scheduler_weak()
Automatic initialization of scheduler in a master thread with default settings without arena.
Definition: governor.cpp:164
static uintptr_t tls_value_of(generic_scheduler *s)
Computes the value of the TLS.
Definition: governor.cpp:110
static void sign_off(generic_scheduler *s)
Unregister TBB scheduler instance from thread-local storage.
Definition: governor.cpp:145
static void print_version_info()
Definition: governor.cpp:229
static void auto_terminate(void *scheduler)
The internal routine to undo automatic initialization.
Definition: governor.cpp:215
static void acquire_resources()
Create key for thread-local storage and initialize RML.
Definition: governor.cpp:67
static basic_tls< uintptr_t > theTLS
TLS for scheduler instances associated with individual threads.
Definition: governor.h:54
static unsigned default_num_threads()
Definition: governor.h:84
static void sign_on(generic_scheduler *s)
Register TBB scheduler instance in thread-local storage.
Definition: governor.cpp:124
static void assume_scheduler(generic_scheduler *s)
Temporarily set TLS slot to the given scheduler.
Definition: governor.cpp:116
static generic_scheduler * init_scheduler(int num_threads, stack_size_type stack_size, bool auto_init)
Processes scheduler initialization request (possibly nested) in a master thread.
Definition: governor.cpp:172
static generic_scheduler * tls_scheduler_of(uintptr_t v)
Converts TLS value to the scheduler pointer.
Definition: governor.h:122
static rml::tbb_factory theRMLServerFactory
Definition: governor.h:62
static bool UsePrivateRML
Definition: governor.h:64
static void initialize_rml_factory()
Definition: governor.cpp:242
static rml::tbb_server * create_rml_server(rml::tbb_client &)
Definition: governor.cpp:92
static bool is_speculation_enabled
Definition: governor.h:67
static void one_time_init()
Definition: governor.cpp:156
static arena * create_arena(int num_slots, int num_reserved_slots, size_t stack_size)
Creates an arena object.
Definition: market.cpp:308
Work stealing task scheduler.
Definition: scheduler.h:140
static generic_scheduler * create_master(arena *a)
Initialize a scheduler for a master thread.
Definition: scheduler.cpp:1287
static bool initialization_done()
Definition: tbb_main.h:64
void set(T value)
Definition: tls.h:56

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.