21 #ifndef _TBB_scheduler_H 22 #define _TBB_scheduler_H 29 #include "../rml/include/rml_tbb.h" 33 #if __TBB_SURVIVE_THREAD_SWITCH 40 template<
typename SchedulerTraits>
class custom_scheduler;
46 #define EmptyTaskPool ((task**)0) 47 #define LockedTaskPool ((task**)~(intptr_t)0) 58 #if __TBB_PREVIEW_CRITICAL_TASKS 59 bool has_taken_critical_task : 1;
97 #if __TBB_SCHEDULER_OBSERVER 98 observer_proxy* my_last_global_observer;
102 #if __TBB_ARENA_OBSERVER 103 observer_proxy* my_last_local_observer;
106 #if __TBB_TASK_PRIORITY 110 volatile intptr_t *my_ref_top_priority;
113 volatile uintptr_t *my_ref_reload_epoch;
134 #if __TBB_PREVIEW_CRITICAL_TASKS 135 return (t.
prefix().extra_state & 0x7)>=0x1;
137 return (t.
prefix().extra_state & 0x0F)>=0x1;
144 uintptr_t my_rsb_stealing_threshold;
167 #if __TBB_HOARD_NONLOCAL_TASKS 168 task* my_nonlocal_free_list;
186 #if __TBB_COUNT_TASK_NODES 187 intptr_t my_task_node_count;
258 #if __TBB_TASK_ISOLATION 283 #if __TBB_PREVIEW_CRITICAL_TASKS 289 bool handled_as_critical(
task& t );
318 #if TBB_USE_ASSERT > 1 351 template<free_task_h
int h>
375 #if __TBB_COUNT_TASK_NODES 376 intptr_t get_task_node_count(
bool count_arena_workers =
false );
396 #if __TBB_TASK_GROUP_CONTEXT 422 uintptr_t my_context_state_propagation_epoch;
430 #if __TBB_TASK_PRIORITY 431 inline intptr_t effective_reference_priority ()
const;
436 task* my_offloaded_tasks;
439 task** my_offloaded_task_list_tail_link;
442 uintptr_t my_local_reload_epoch;
445 volatile bool my_pool_reshuffling_pending;
462 inline void offload_task (
task& t, intptr_t task_priority );
467 void cleanup_local_context_list ();
471 template <
typename T>
480 __TBB_ASSERT(is_alive(ctx),
"referenced task_group_context was destroyed");
481 static const char *msg =
"task_group_context is invalid";
490 #if __TBB_TASK_PRIORITY 494 #if TBB_USE_ASSERT > 1 506 ::rml::server::execution_resource_t master_exec_resource;
510 #if __TBB_TASK_GROUP_CONTEXT 516 #if __TBB_SURVIVE_THREAD_SWITCH 527 cilk_state_t my_cilk_state;
535 mutable statistics_counters my_counters;
583 #if __TBB_TASK_GROUP_CONTEXT 609 p.extra_state = 0xFF;
613 #if __TBB_COUNT_TASK_NODES 614 --my_task_node_count;
618 #if __TBB_COUNT_TASK_NODES 619 inline intptr_t generic_scheduler::get_task_node_count(
bool count_arena_workers ) {
620 return my_task_node_count + (count_arena_workers?
my_arena->workers_task_node_count(): 0);
633 __TBB_ASSERT ( new_tail <= my_arena_slot->my_task_pool_size,
"task deque end was overwritten" );
643 "Task pool must be locked when calling commit_relocated_tasks()" );
651 template<free_task_h
int h
int>
653 #if __TBB_HOARD_NONLOCAL_TASKS 673 }
else if( !(
h&
local_task) &&
p.origin && uintptr_t(
p.origin) < uintptr_t(4096) ) {
678 #if __TBB_HOARD_NONLOCAL_TASKS 680 p.next = my_nonlocal_free_list;
681 my_nonlocal_free_list = &t;
691 #if __TBB_TASK_PRIORITY 692 inline intptr_t generic_scheduler::effective_reference_priority ()
const {
701 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY 702 &&
my_arena->my_concurrency_mode!=arena_base::cm_enforced_global
704 ) ? *my_ref_top_priority :
my_arena->my_top_priority;
707 inline void generic_scheduler::offload_task (
task& t, intptr_t ) {
710 __TBB_ASSERT( my_offloaded_task_list_tail_link && !*my_offloaded_task_list_tail_link, NULL );
714 t.
prefix().next_offloaded = my_offloaded_tasks;
715 my_offloaded_tasks = &t;
719 #if __TBB_PREVIEW_CRITICAL_TASKS 720 class critical_task_count_guard : internal::no_copy {
722 critical_task_count_guard(scheduler_properties& properties,
task& t)
723 : my_properties(properties),
724 my_original_critical_task_state(properties.has_taken_critical_task) {
727 ~critical_task_count_guard() {
728 my_properties.has_taken_critical_task = my_original_critical_task_state;
731 scheduler_properties& my_properties;
732 bool my_original_critical_task_state;
736 #if __TBB_FP_CONTEXT || __TBB_TASK_GROUP_CONTEXT 741 template <
bool report_tasks>
742 class context_guard_helper {
743 #if __TBB_TASK_GROUP_CONTEXT 744 const task_group_context *curr_ctx;
747 cpu_ctl_env guard_cpu_ctl_env;
748 cpu_ctl_env curr_cpu_ctl_env;
751 context_guard_helper()
752 #if __TBB_TASK_GROUP_CONTEXT 757 guard_cpu_ctl_env.get_env();
758 curr_cpu_ctl_env = guard_cpu_ctl_env;
761 ~context_guard_helper() {
763 if ( curr_cpu_ctl_env != guard_cpu_ctl_env )
764 guard_cpu_ctl_env.set_env();
766 #if __TBB_TASK_GROUP_CONTEXT 767 if (report_tasks && curr_ctx)
771 void set_ctx(
const task_group_context *ctx ) {
772 generic_scheduler::assert_context_valid(ctx);
774 const cpu_ctl_env &ctl = *punned_cast<cpu_ctl_env*>(&ctx->my_cpu_ctl_env);
776 #if __TBB_TASK_GROUP_CONTEXT 777 if(ctx != curr_ctx) {
780 if ( ctl != curr_cpu_ctl_env ) {
781 curr_cpu_ctl_env = ctl;
782 curr_cpu_ctl_env.set_env();
785 #if __TBB_TASK_GROUP_CONTEXT 801 if ( curr_cpu_ctl_env != guard_cpu_ctl_env ) {
802 guard_cpu_ctl_env.set_env();
803 curr_cpu_ctl_env = guard_cpu_ctl_env;
bool can_steal()
Returns true if stealing is allowed.
bool my_auto_initialized
True if *this was created by automatic TBB initialization.
unsigned char
Reserved bits.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
unsigned max_threads_in_arena()
Returns the concurrency limit of the current arena.
void publish_task_pool()
Used by workers to enter the task pool.
#define __TBB_CONTEXT_ARG(arg1, context)
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena.
void set_ctx(__TBB_CONTEXT_ARG1(task_group_context *))
Memory prefix to a task object.
unsigned short affinity_id
An id as used for specifying affinity.
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
task * steal_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempts to steal a task from a randomly chosen thread/scheduler.
void attach_arena(arena *, size_t index, bool is_master)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
Disable caching for a small task.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Task is known to have been allocated by this scheduler.
static generic_scheduler * create_master(arena *a)
Initialize a scheduler for a master thread.
task object is freshly allocated or recycled.
#define __TBB_ISOLATION_EXPR(isolation)
#define ITT_TASK_BEGIN(type, name, id)
task ** lock_task_pool(arena_slot *victim_arena_slot) const
Locks victim's task pool, and returns pointer to it. The pointer can be NULL.
task * steal_task_from(__TBB_ISOLATION_ARG(arena_slot &victim_arena_slot, isolation_tag isolation))
Steal task from another scheduler's ready pool.
bool is_quiescent_local_task_pool_reset() const
auto first(Container &c) -> decltype(begin(c))
Used to form groups of tasks.
A fast random number generator.
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Bit-field representing properties of a sheduler.
Task is known to be a small task.
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
bool cleanup_master(bool blocking_terminate)
Perform necessary cleanup when a master thread stops using TBB.
static const size_t null_arena_index
bool is_worker() const
True if running on a worker thread, false otherwise.
__TBB_atomic size_t head
Index of the first ready task in the deque.
Base class for user-defined tasks.
void release_task_pool() const
Unlocks the local task pool.
Work stealing task scheduler.
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
void unlock_task_pool(arena_slot *victim_arena_slot, task **victim_task_pool) const
Unlocks victim's task pool.
intptr_t isolation_tag
A tag for task isolation.
unsigned my_num_slots
The number of slots in the arena.
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
task is running, and will be destroyed after method execute() completes.
task * get_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get a task from the local pool.
market * my_market
The market I am in.
void const char const char int ITT_FORMAT __itt_group_sync p
free_task_hint
Optimization hint to free_task that enables it omit unnecessary tests and code.
void free_scheduler()
Destroy and deallocate this scheduler object.
task * get_mailbox_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempt to get a task from the mailbox.
A lock that occupies a single byte.
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
bool is_critical(task &t)
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
#define __TBB_ISOLATION_ARG(arg1, isolation)
size_t prepare_task_pool(size_t n)
Makes sure that the task pool can accommodate at least n more elements.
void __TBB_store_relaxed(volatile T &location, V value)
generic_scheduler(market &)
void init_stack_info()
Sets up the data necessary for the stealing limiting heuristics.
bool is_quiescent_local_task_pool_empty() const
task **__TBB_atomic task_pool
void attach(mail_outbox &putter)
Attach inbox to a corresponding outbox.
void commit_spawned_tasks(size_t new_tail)
Makes newly spawned tasks visible to thieves.
__TBB_atomic intptr_t my_small_task_count
Number of small tasks that have been allocated by this scheduler.
task * my_free_list
Free list of small tasks that can be reused.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
__TBB_atomic size_t tail
Index of the element following the last ready task in the deque.
void deallocate_task(task &t)
Return task object to the memory allocator.
void local_spawn(task *first, task *&next)
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
static bool is_version_3_task(task &t)
void enqueue(task &, void *reserved) __TBB_override
For internal use only.
#define GATHER_STATISTIC(x)
bool is_local_task_pool_quiescent() const
Class representing source of mail.
void commit_relocated_tasks(size_t new_tail)
Makes relocated tasks visible to thieves and releases the local task pool.
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
static bool is_proxy(const task &t)
True if t is a task_proxy.
void assert_task_pool_valid() const
bool master_outermost_level() const
True if the scheduler is on the outermost dispatch level in a master thread.
bool type
Indicates that a scheduler acts as a master or a worker.
void spawn(task &first, task *&next) __TBB_override
For internal use only.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
intptr_t my_priority
Priority level of the task group (in normalized representation)
task * prepare_for_spawning(task *t)
Checks if t is affinitized to another thread, and if so, bundles it as proxy.
task is in ready pool, or is going to be put there, or was just taken off.
static task * plugged_return_list()
Special value used to mark my_return_list as not taking any more entries.
task object is on free list, or is going to be put there, or was just taken off.
bool outermost
Indicates that a scheduler is on outermost level.
void acquire_task_pool() const
Locks the local task pool.
void poison_pointer(T *__TBB_atomic &)
scheduler_properties my_properties
#define __TBB_store_release
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
#define ITT_NOTIFY(name, obj)
void attach_mailbox(affinity_id id)
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
#define __TBB_CONTEXT_ARG1(context)
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
task * my_dummy_task
Fake root task created by slave threads.
T __TBB_load_relaxed(const volatile T &location)
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
task * my_return_list
List of small tasks that have been returned to this scheduler by other schedulers.
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
static const size_t quick_task_size
If sizeof(task) is <=quick_task_size, it is handled on a free list instead of malloc'd.
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
void * __TBB_get_bsp()
Retrieves the current RSE backing store pointer. IA64 specific.
static generic_scheduler * create_worker(market &m, size_t index)
Initialize a scheduler for a worker thread.
virtual task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation))=0
Try getting a task from other threads (via mailbox, stealing, FIFO queue, orphans adoption).
virtual void local_wait_for_all(task &parent, task *child)=0
task & allocate_task(size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context))
Allocate task object, either from the heap or a free list.
void local_spawn_root_and_wait(task *first, task *&next)
state_type state() const
Current execution state.
intptr_t reference_count
A reference count.
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
long my_ref_count
Reference count for scheduler.
void leave_task_pool()
Leave the task pool.
void reset_task_pool_and_leave()
Resets head and tail indices to 0, and leaves task pool.
Data structure to be inherited by the types that can form intrusive lists.
const size_t task_prefix_reservation_size
Number of bytes reserved for a task prefix.
A scheduler with a customized evaluation loop.
Bitwise-OR of local_task and small_task.
unsigned num_workers_active()
The number of workers active in the arena.
void free_task(task &t)
Put task on free list.
void spawn_root_and_wait(task &first, task *&next) __TBB_override
For internal use only.
bool is_task_pool_published() const
uintptr_t my_stealing_threshold
Position in the call stack specifying its maximal filling when stealing is still allowed.
void __TBB_store_with_release(volatile T &location, V value)
affinity_id my_affinity_id
The mailbox id assigned to this scheduler.
void nested_arena_entry(arena *, size_t)
bool worker_outermost_level() const
True if the scheduler is on the outermost dispatch level in a worker thread.
static const kind_type dying
bool outermost_level() const
True if the scheduler is on the outermost dispatch level.
void free_nonlocal_small_task(task &t)
Free a small task t that that was allocated by a different scheduler.
static const size_t min_task_pool_size
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)