Intel(R) Threading Building Blocks Doxygen Documentation  version 4.2.3
scheduler.h
Go to the documentation of this file.
1 /*
2  Copyright (c) 2005-2019 Intel Corporation
3 
4  Licensed under the Apache License, Version 2.0 (the "License");
5  you may not use this file except in compliance with the License.
6  You may obtain a copy of the License at
7 
8  http://www.apache.org/licenses/LICENSE-2.0
9 
10  Unless required by applicable law or agreed to in writing, software
11  distributed under the License is distributed on an "AS IS" BASIS,
12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  See the License for the specific language governing permissions and
14  limitations under the License.
15 
16 
17 
18 
19 */
20 
21 #ifndef _TBB_scheduler_H
22 #define _TBB_scheduler_H
23 
24 #include "scheduler_common.h"
25 #include "tbb/spin_mutex.h"
26 #include "mailbox.h"
27 #include "tbb_misc.h" // for FastRandom
28 #include "itt_notify.h"
29 #include "../rml/include/rml_tbb.h"
30 
31 #include "intrusive_list.h"
32 
33 #if __TBB_SURVIVE_THREAD_SWITCH
34 #include "cilk-tbb-interop.h"
35 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
36 
37 namespace tbb {
38 namespace internal {
39 
40 template<typename SchedulerTraits> class custom_scheduler;
41 
42 //------------------------------------------------------------------------
43 // generic_scheduler
44 //------------------------------------------------------------------------
45 
46 #define EmptyTaskPool ((task**)0)
47 #define LockedTaskPool ((task**)~(intptr_t)0)
48 
51  static const bool worker = false;
52  static const bool master = true;
54  bool type : 1;
56 
57  bool outermost : 1;
58 #if __TBB_PREVIEW_CRITICAL_TASKS
59  bool has_taken_critical_task : 1;
61 
63  unsigned char : 5;
64 #else
65  unsigned char : 6;
67 #endif /* __TBB_PREVIEW_CRITICAL_TASKS */
68 };
69 
72  size_t my_arena_index; // TODO: make it unsigned and pair with my_affinity_id to fit into cache line
73 
76 
79 
82 
83 
85 
87 
94 
96 
97 #if __TBB_SCHEDULER_OBSERVER
98  observer_proxy* my_last_global_observer;
100 #endif
101 
102 #if __TBB_ARENA_OBSERVER
103  observer_proxy* my_last_local_observer;
105 #endif
106 #if __TBB_TASK_PRIORITY
107 
110  volatile intptr_t *my_ref_top_priority;
111 
113  volatile uintptr_t *my_ref_reload_epoch;
114 #endif /* __TBB_TASK_PRIORITY */
115 };
116 
118 
125  , public ::rml::job
126  , public intrusive_list_node
127  , public scheduler_state {
128 public: // almost every class in TBB uses generic_scheduler
129 
132 
133  static bool is_version_3_task( task& t ) {
134 #if __TBB_PREVIEW_CRITICAL_TASKS
135  return (t.prefix().extra_state & 0x7)>=0x1;
136 #else
137  return (t.prefix().extra_state & 0x0F)>=0x1;
138 #endif
139  }
140 
143 #if __TBB_ipf
144  uintptr_t my_rsb_stealing_threshold;
146 #endif
147 
148  static const size_t null_arena_index = ~size_t(0);
149 
150  inline bool is_task_pool_published () const;
151 
152  inline bool is_local_task_pool_quiescent () const;
153 
154  inline bool is_quiescent_local_task_pool_empty () const;
155 
156  inline bool is_quiescent_local_task_pool_reset () const;
157 
160 
163 
166 
167 #if __TBB_HOARD_NONLOCAL_TASKS
168  task* my_nonlocal_free_list;
170 #endif
171 
174 
176 
178 
179  inline void attach_mailbox( affinity_id id );
180 
181  /* A couple of bools can be located here because space is otherwise just padding after my_affinity_id. */
182 
185 
186 #if __TBB_COUNT_TASK_NODES
187  intptr_t my_task_node_count;
189 #endif /* __TBB_COUNT_TASK_NODES */
190 
192  void init_stack_info ();
193 
195  bool can_steal () {
196  int anchor;
197  // TODO IDEA: Add performance warning?
198 #if __TBB_ipf
199  return my_stealing_threshold < (uintptr_t)&anchor && (uintptr_t)__TBB_get_bsp() < my_rsb_stealing_threshold;
200 #else
201  return my_stealing_threshold < (uintptr_t)&anchor;
202 #endif
203  }
204 
206 
207  void publish_task_pool();
208 
210 
211  void leave_task_pool();
212 
214 
215  inline void reset_task_pool_and_leave ();
216 
218 
219  task** lock_task_pool( arena_slot* victim_arena_slot ) const;
220 
222 
223  void unlock_task_pool( arena_slot* victim_arena_slot, task** victim_task_pool ) const;
224 
226 
228  void acquire_task_pool() const;
229 
231 
233  void release_task_pool() const;
234 
236 
238 
240  inline void commit_spawned_tasks( size_t new_tail );
241 
243 
244  inline void commit_relocated_tasks( size_t new_tail );
245 
247 
251 
253 
258 #if __TBB_TASK_ISOLATION
259  task* get_task( size_t T, isolation_tag isolation, bool& tasks_omitted );
260 #else
261  task* get_task( size_t T );
262 #endif /* __TBB_TASK_ISOLATION */
263 
271 
273  static bool is_proxy( const task& t ) {
274  return t.prefix().extra_state==es_task_proxy;
275  }
276 
279 
281  task* steal_task_from( __TBB_ISOLATION_ARG( arena_slot& victim_arena_slot, isolation_tag isolation ) );
282 
283 #if __TBB_PREVIEW_CRITICAL_TASKS
284  task* get_critical_task( __TBB_ISOLATION_EXPR(isolation_tag isolation) );
286 
289  bool handled_as_critical( task& t );
290 #endif
291 
294  static const size_t min_task_pool_size = 64;
295 
297 
299  size_t prepare_task_pool( size_t n );
300 
302  static generic_scheduler* create_master( arena* a );
303 
305  bool cleanup_master( bool blocking_terminate );
306 
308  static generic_scheduler* create_worker( market& m, size_t index );
309 
311  static void cleanup_worker( void* arg, bool worker );
312 
313 protected:
314  template<typename SchedulerTraits> friend class custom_scheduler;
316 
317 public:
318 #if TBB_USE_ASSERT > 1
319 
321  void assert_task_pool_valid() const;
322 #else
323  void assert_task_pool_valid() const {}
324 #endif /* TBB_USE_ASSERT <= 1 */
325 
326  void attach_arena( arena*, size_t index, bool is_master );
327  void nested_arena_entry( arena*, size_t );
328  void nested_arena_exit();
329  void wait_until_empty();
330 
331  void spawn( task& first, task*& next ) __TBB_override;
332 
334 
335  void enqueue( task&, void* reserved ) __TBB_override;
336 
337  void local_spawn( task* first, task*& next );
338  void local_spawn_root_and_wait( task* first, task*& next );
339  virtual void local_wait_for_all( task& parent, task* child ) = 0;
340 
342  void free_scheduler();
343 
345 
346  task& allocate_task( size_t number_of_bytes,
348 
350 
351  template<free_task_hint h>
352  void free_task( task& t );
353 
355  inline void deallocate_task( task& t );
356 
358  inline bool is_worker() const;
359 
361  inline bool outermost_level() const;
362 
364 
367  inline bool master_outermost_level () const;
368 
370  inline bool worker_outermost_level () const;
371 
373  unsigned max_threads_in_arena();
374 
375 #if __TBB_COUNT_TASK_NODES
376  intptr_t get_task_node_count( bool count_arena_workers = false );
377 #endif /* __TBB_COUNT_TASK_NODES */
378 
380  static task* plugged_return_list() {return (task*)(intptr_t)(-1);}
381 
384 
386  // TODO IDEA: see if putting my_return_list on separate cache line improves performance
388 
390 
391  virtual task* receive_or_steal_task( __TBB_ISOLATION_ARG( __TBB_atomic reference_count& completion_ref_count, isolation_tag isolation ) ) = 0;
392 
394  void free_nonlocal_small_task( task& t );
395 
396 #if __TBB_TASK_GROUP_CONTEXT
397 
403  inline task_group_context* default_context ();
404 
406  char _padding1[NFS_MaxLineSize - sizeof(context_list_node_t)];
407 
409  context_list_node_t my_context_list_head;
410 
412  // TODO: check whether it can be deadly preempted and replace by spinning/sleeping mutex
413  spin_mutex my_context_list_mutex;
414 
416 
422  uintptr_t my_context_state_propagation_epoch;
423 
425 
428  tbb::atomic<uintptr_t> my_local_ctx_list_update;
429 
430 #if __TBB_TASK_PRIORITY
431  inline intptr_t effective_reference_priority () const;
433 
434  // TODO: move into slots and fix is_out_of_work
436  task* my_offloaded_tasks;
437 
439  task** my_offloaded_task_list_tail_link;
440 
442  uintptr_t my_local_reload_epoch;
443 
445  volatile bool my_pool_reshuffling_pending;
446 
448 
449  task* reload_tasks( __TBB_ISOLATION_EXPR( isolation_tag isolation ) );
450 
451  task* reload_tasks( task*& offloaded_tasks, task**& offloaded_task_list_link, __TBB_ISOLATION_ARG( intptr_t top_priority, isolation_tag isolation ) );
452 
454 
455  task* winnow_task_pool ( __TBB_ISOLATION_EXPR( isolation_tag isolation ) );
456 
458 
459  task *get_task_and_activate_task_pool( size_t H0 , __TBB_ISOLATION_ARG( size_t T0, isolation_tag isolation ) );
460 
462  inline void offload_task ( task& t, intptr_t task_priority );
463 #endif /* __TBB_TASK_PRIORITY */
464 
466 
467  void cleanup_local_context_list ();
468 
471  template <typename T>
472  void propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state );
473 
474  // check consistency
475  static void assert_context_valid(const task_group_context *tgc) {
477 #if TBB_USE_ASSERT
478  __TBB_ASSERT(tgc, NULL);
479  uintptr_t ctx = tgc->my_version_and_traits;
480  __TBB_ASSERT(is_alive(ctx), "referenced task_group_context was destroyed");
481  static const char *msg = "task_group_context is invalid";
482  __TBB_ASSERT(!(ctx&~(3|(7<<task_group_context::traits_offset))), msg); // the value fits known values of versions and traits
487  __TBB_ASSERT(tgc->my_owner, msg);
488  __TBB_ASSERT(tgc->my_node.my_next && tgc->my_node.my_prev, msg);
489  }
490 #if __TBB_TASK_PRIORITY
491  assert_priority_valid(tgc->my_priority);
492 #endif
493  if(tgc->my_parent)
494 #if TBB_USE_ASSERT > 1
495  assert_context_valid(tgc->my_parent);
496 #else
497  __TBB_ASSERT(is_alive(tgc->my_parent->my_version_and_traits), msg);
498 #endif
499 #endif
500  }
501 #endif /* __TBB_TASK_GROUP_CONTEXT */
502 
503 #if _WIN32||_WIN64
504 private:
506  ::rml::server::execution_resource_t master_exec_resource;
507 public:
508 #endif /* _WIN32||_WIN64 */
509 
510 #if __TBB_TASK_GROUP_CONTEXT
511 
513  tbb::atomic<uintptr_t> my_nonlocal_ctx_list_update;
514 #endif /* __TBB_TASK_GROUP_CONTEXT */
515 
516 #if __TBB_SURVIVE_THREAD_SWITCH
517  __cilk_tbb_unwatch_thunk my_cilk_unwatch_thunk;
518 #if TBB_USE_ASSERT
519 
521  enum cilk_state_t {
522  cs_none=0xF000, // Start at nonzero value so that we can detect use of zeroed memory.
523  cs_running,
524  cs_limbo,
525  cs_freed
526  };
527  cilk_state_t my_cilk_state;
528 #endif /* TBB_USE_ASSERT */
529 #endif /* __TBB_SURVIVE_THREAD_SWITCH */
530 
531 #if __TBB_STATISTICS
532 
535  mutable statistics_counters my_counters;
536 #endif /* __TBB_STATISTICS */
537 
538 }; // class generic_scheduler
539 
540 
541 } // namespace internal
542 } // namespace tbb
543 
544 #include "arena.h"
545 #include "governor.h"
546 
547 namespace tbb {
548 namespace internal {
549 
553 }
554 
557  task** tp = my_arena_slot->task_pool;
558  return tp == EmptyTaskPool || tp == LockedTaskPool;
559 }
560 
562  __TBB_ASSERT( is_local_task_pool_quiescent(), "Task pool is not quiescent" );
564 }
565 
567  __TBB_ASSERT( is_local_task_pool_quiescent(), "Task pool is not quiescent" );
569 }
570 
572  return my_properties.outermost;
573 }
574 
576  return !is_worker() && outermost_level();
577 }
578 
580  return is_worker() && outermost_level();
581 }
582 
583 #if __TBB_TASK_GROUP_CONTEXT
584 inline task_group_context* generic_scheduler::default_context () {
585  return my_dummy_task->prefix().context;
586 }
587 #endif /* __TBB_TASK_GROUP_CONTEXT */
588 
590  __TBB_ASSERT(id>0,NULL);
592  my_affinity_id = id;
593 }
594 
595 inline bool generic_scheduler::is_worker() const {
597 }
598 
600  __TBB_ASSERT(my_arena, NULL);
601  return my_arena->my_num_slots;
602 }
603 
606 #if TBB_USE_ASSERT
607  task_prefix& p = t.prefix();
608  p.state = 0xFF;
609  p.extra_state = 0xFF;
610  poison_pointer(p.next);
611 #endif /* TBB_USE_ASSERT */
613 #if __TBB_COUNT_TASK_NODES
614  --my_task_node_count;
615 #endif /* __TBB_COUNT_TASK_NODES */
616 }
617 
618 #if __TBB_COUNT_TASK_NODES
619 inline intptr_t generic_scheduler::get_task_node_count( bool count_arena_workers ) {
620  return my_task_node_count + (count_arena_workers? my_arena->workers_task_node_count(): 0);
621 }
622 #endif /* __TBB_COUNT_TASK_NODES */
623 
625  __TBB_ASSERT( my_arena_slot->task_pool == LockedTaskPool, "Task pool must be locked when resetting task pool" );
628  leave_task_pool();
629 }
630 
631 //TODO: move to arena_slot
632 inline void generic_scheduler::commit_spawned_tasks( size_t new_tail ) {
633  __TBB_ASSERT ( new_tail <= my_arena_slot->my_task_pool_size, "task deque end was overwritten" );
634  // emit "task was released" signal
635  ITT_NOTIFY(sync_releasing, (void*)((uintptr_t)my_arena_slot+sizeof(uintptr_t)));
636  // Release fence is necessary to make sure that previously stored task pointers
637  // are visible to thieves.
639 }
640 
643  "Task pool must be locked when calling commit_relocated_tasks()" );
645  // Tail is updated last to minimize probability of a thread making arena
646  // snapshot being misguided into thinking that this task pool is empty.
647  __TBB_store_release( my_arena_slot->tail, new_tail );
649 }
650 
651 template<free_task_hint hint>
653 #if __TBB_HOARD_NONLOCAL_TASKS
654  static const int h = hint&(~local_task);
655 #else
656  static const free_task_hint h = hint;
657 #endif
658  GATHER_STATISTIC(--my_counters.active_tasks);
659  task_prefix& p = t.prefix();
660  // Verify that optimization hints are correct.
661  __TBB_ASSERT( h!=small_local_task || p.origin==this, NULL );
662  __TBB_ASSERT( !(h&small_task) || p.origin, NULL );
663  __TBB_ASSERT( !(h&local_task) || (!p.origin || uintptr_t(p.origin) > uintptr_t(4096)), "local_task means allocated");
664  poison_value(p.depth);
665  poison_value(p.ref_count);
666  poison_pointer(p.owner);
667  __TBB_ASSERT( 1L<<t.state() & (1L<<task::executing|1L<<task::allocated), NULL );
668  p.state = task::freed;
669  if( h==small_local_task || p.origin==this ) {
670  GATHER_STATISTIC(++my_counters.free_list_length);
671  p.next = my_free_list;
672  my_free_list = &t;
673  } else if( !(h&local_task) && p.origin && uintptr_t(p.origin) < uintptr_t(4096) ) {
674  // a special value reserved for future use, do nothing since
675  // origin is not pointing to a scheduler instance
676  } else if( !(h&local_task) && p.origin ) {
677  GATHER_STATISTIC(++my_counters.free_list_length);
678 #if __TBB_HOARD_NONLOCAL_TASKS
679  if( !(h&no_cache) ) {
680  p.next = my_nonlocal_free_list;
681  my_nonlocal_free_list = &t;
682  } else
683 #endif
685  } else {
686  GATHER_STATISTIC(--my_counters.big_tasks);
687  deallocate_task(t);
688  }
689 }
690 
691 #if __TBB_TASK_PRIORITY
692 inline intptr_t generic_scheduler::effective_reference_priority () const {
693  // Workers on the outermost dispatch level (i.e. with empty stack) use market's
694  // priority as a reference point (to speedup discovering process level priority
695  // changes). But when there are enough workers to service (even if only partially)
696  // a lower priority arena, they should use arena's priority as a reference, lest
697  // be trapped in a futile spinning (because market's priority would prohibit
698  // executing ANY tasks in this arena).
699  return !worker_outermost_level() ||
701 #if __TBB_ENQUEUE_ENFORCED_CONCURRENCY
702  && my_arena->my_concurrency_mode!=arena_base::cm_enforced_global
703 #endif
704  ) ? *my_ref_top_priority : my_arena->my_top_priority;
705 }
706 
707 inline void generic_scheduler::offload_task ( task& t, intptr_t /*priority*/ ) {
708  GATHER_STATISTIC( ++my_counters.prio_tasks_offloaded );
709  __TBB_ASSERT( !is_proxy(t), "The proxy task cannot be offloaded" );
710  __TBB_ASSERT( my_offloaded_task_list_tail_link && !*my_offloaded_task_list_tail_link, NULL );
711 #if TBB_USE_ASSERT
712  t.prefix().state = task::ready;
713 #endif /* TBB_USE_ASSERT */
714  t.prefix().next_offloaded = my_offloaded_tasks;
715  my_offloaded_tasks = &t;
716 }
717 #endif /* __TBB_TASK_PRIORITY */
718 
719 #if __TBB_PREVIEW_CRITICAL_TASKS
720 class critical_task_count_guard : internal::no_copy {
721 public:
722  critical_task_count_guard(scheduler_properties& properties, task& t)
723  : my_properties(properties),
724  my_original_critical_task_state(properties.has_taken_critical_task) {
725  my_properties.has_taken_critical_task |= internal::is_critical(t);
726  }
727  ~critical_task_count_guard() {
728  my_properties.has_taken_critical_task = my_original_critical_task_state;
729  }
730 private:
731  scheduler_properties& my_properties;
732  bool my_original_critical_task_state;
733 };
734 #endif /* __TBB_PREVIEW_CRITICAL_TASKS */
735 
736 #if __TBB_FP_CONTEXT || __TBB_TASK_GROUP_CONTEXT
737 
741 template <bool report_tasks>
742 class context_guard_helper {
743 #if __TBB_TASK_GROUP_CONTEXT
744  const task_group_context *curr_ctx;
745 #endif
746 #if __TBB_FP_CONTEXT
747  cpu_ctl_env guard_cpu_ctl_env;
748  cpu_ctl_env curr_cpu_ctl_env;
749 #endif
750 public:
751  context_guard_helper()
752 #if __TBB_TASK_GROUP_CONTEXT
753  : curr_ctx(NULL)
754 #endif
755  {
756 #if __TBB_FP_CONTEXT
757  guard_cpu_ctl_env.get_env();
758  curr_cpu_ctl_env = guard_cpu_ctl_env;
759 #endif
760  }
761  ~context_guard_helper() {
762 #if __TBB_FP_CONTEXT
763  if ( curr_cpu_ctl_env != guard_cpu_ctl_env )
764  guard_cpu_ctl_env.set_env();
765 #endif
766 #if __TBB_TASK_GROUP_CONTEXT
767  if (report_tasks && curr_ctx)
768  ITT_TASK_END;
769 #endif
770  }
771  void set_ctx( const task_group_context *ctx ) {
772  generic_scheduler::assert_context_valid(ctx);
773 #if __TBB_FP_CONTEXT
774  const cpu_ctl_env &ctl = *punned_cast<cpu_ctl_env*>(&ctx->my_cpu_ctl_env);
775 #endif
776 #if __TBB_TASK_GROUP_CONTEXT
777  if(ctx != curr_ctx) {
778 #endif
779 #if __TBB_FP_CONTEXT
780  if ( ctl != curr_cpu_ctl_env ) {
781  curr_cpu_ctl_env = ctl;
782  curr_cpu_ctl_env.set_env();
783  }
784 #endif
785 #if __TBB_TASK_GROUP_CONTEXT
786  // if task group context was active, report end of current execution frame.
787  if (report_tasks) {
788  if (curr_ctx)
789  ITT_TASK_END;
790  // reporting begin of new task group context execution frame.
791  // using address of task group context object to group tasks (parent).
792  // id of task execution frame is NULL and reserved for future use.
793  ITT_TASK_BEGIN(ctx,ctx->my_name,NULL);
794  curr_ctx = ctx;
795  }
796  }
797 #endif
798  }
799  void restore_default() {
800 #if __TBB_FP_CONTEXT
801  if ( curr_cpu_ctl_env != guard_cpu_ctl_env ) {
802  guard_cpu_ctl_env.set_env();
803  curr_cpu_ctl_env = guard_cpu_ctl_env;
804  }
805 #endif
806  }
807 };
808 #else
809 template <bool T>
812  void restore_default() {}
813 };
814 #endif /* __TBB_FP_CONTEXT */
815 
816 } // namespace internal
817 } // namespace tbb
818 
819 #endif /* _TBB_scheduler_H */
bool can_steal()
Returns true if stealing is allowed.
Definition: scheduler.h:195
bool my_auto_initialized
True if *this was created by automatic TBB initialization.
Definition: scheduler.h:184
unsigned char
Reserved bits.
Definition: scheduler.h:66
#define __TBB_override
Definition: tbb_stddef.h:244
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function h
unsigned max_threads_in_arena()
Returns the concurrency limit of the current arena.
Definition: scheduler.h:599
void publish_task_pool()
Used by workers to enter the task pool.
Definition: scheduler.cpp:1214
#define __TBB_CONTEXT_ARG(arg1, context)
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena.
Definition: arena.h:55
#define LockedTaskPool
Definition: scheduler.h:47
void set_ctx(__TBB_CONTEXT_ARG1(task_group_context *))
Definition: scheduler.h:811
Memory prefix to a task object.
Definition: task.h:188
#define EmptyTaskPool
Definition: scheduler.h:46
unsigned short affinity_id
An id as used for specifying affinity.
Definition: task.h:124
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
Definition: task.h:432
task * steal_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempts to steal a task from a randomly chosen thread/scheduler.
Definition: scheduler.cpp:1075
void attach_arena(arena *, size_t index, bool is_master)
Definition: arena.cpp:40
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id id
Disable caching for a small task.
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:169
Task is known to have been allocated by this scheduler.
static generic_scheduler * create_master(arena *a)
Initialize a scheduler for a master thread.
Definition: scheduler.cpp:1252
task object is freshly allocated or recycled.
Definition: task.h:620
#define __TBB_ISOLATION_EXPR(isolation)
#define ITT_TASK_BEGIN(type, name, id)
Definition: itt_notify.h:129
task ** lock_task_pool(arena_slot *victim_arena_slot) const
Locks victim's task pool, and returns pointer to it. The pointer can be NULL.
Definition: scheduler.cpp:504
task * steal_task_from(__TBB_ISOLATION_ARG(arena_slot &victim_arena_slot, isolation_tag isolation))
Steal task from another scheduler's ready pool.
Definition: scheduler.cpp:1110
bool is_quiescent_local_task_pool_reset() const
Definition: scheduler.h:566
auto first(Container &c) -> decltype(begin(c))
Used to form groups of tasks.
Definition: task.h:335
A fast random number generator.
Definition: tbb_misc.h:132
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:381
Bit-field representing properties of a sheduler.
Definition: scheduler.h:50
Task is known to be a small task.
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
Definition: scheduler.h:162
bool cleanup_master(bool blocking_terminate)
Perform necessary cleanup when a master thread stops using TBB.
Definition: scheduler.cpp:1304
static const size_t null_arena_index
Definition: scheduler.h:148
bool is_worker() const
True if running on a worker thread, false otherwise.
Definition: scheduler.h:595
__TBB_atomic size_t head
Index of the first ready task in the deque.
Base class for user-defined tasks.
Definition: task.h:592
void release_task_pool() const
Unlocks the local task pool.
Definition: scheduler.cpp:489
Work stealing task scheduler.
Definition: scheduler.h:124
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
Definition: scheduler.h:81
Tag for v3 task_proxy.
void unlock_task_pool(arena_slot *victim_arena_slot, task **victim_task_pool) const
Unlocks victim's task pool.
Definition: scheduler.cpp:553
intptr_t isolation_tag
A tag for task isolation.
Definition: task.h:128
unsigned my_num_slots
The number of slots in the arena.
Definition: arena.h:153
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
Definition: task.h:429
task is running, and will be destroyed after method execute() completes.
Definition: task.h:614
task * get_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get a task from the local pool.
Definition: scheduler.cpp:978
market * my_market
The market I am in.
Definition: scheduler.h:159
void const char const char int ITT_FORMAT __itt_group_sync p
free_task_hint
Optimization hint to free_task that enables it omit unnecessary tests and code.
void free_scheduler()
Destroy and deallocate this scheduler object.
Definition: scheduler.cpp:264
task * get_mailbox_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempt to get a task from the mailbox.
Definition: scheduler.cpp:1200
A lock that occupies a single byte.
Definition: spin_mutex.h:40
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
Definition: scheduler.h:72
bool is_critical(task &t)
Definition: task.h:953
static void cleanup_worker(void *arg, bool worker)
Perform necessary cleanup when a worker thread finishes.
Definition: scheduler.cpp:1294
#define poison_value(g)
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
Definition: scheduler.h:75
#define __TBB_ISOLATION_ARG(arg1, isolation)
size_t prepare_task_pool(size_t n)
Makes sure that the task pool can accommodate at least n more elements.
Definition: scheduler.cpp:406
void __TBB_store_relaxed(volatile T &location, V value)
Definition: tbb_machine.h:743
void init_stack_info()
Sets up the data necessary for the stealing limiting heuristics.
Definition: scheduler.cpp:150
bool is_quiescent_local_task_pool_empty() const
Definition: scheduler.h:561
void attach(mail_outbox &putter)
Attach inbox to a corresponding outbox.
Definition: mailbox.h:197
void commit_spawned_tasks(size_t new_tail)
Makes newly spawned tasks visible to thieves.
Definition: scheduler.h:632
__TBB_atomic intptr_t my_small_task_count
Number of small tasks that have been allocated by this scheduler.
Definition: scheduler.h:383
task * my_free_list
Free list of small tasks that can be reused.
Definition: scheduler.h:165
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
__TBB_atomic size_t tail
Index of the element following the last ready task in the deque.
void deallocate_task(task &t)
Return task object to the memory allocator.
Definition: scheduler.h:605
The graph class.
void local_spawn(task *first, task *&next)
Definition: scheduler.cpp:618
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
Definition: task.h:382
static bool is_version_3_task(task &t)
Definition: scheduler.h:133
void enqueue(task &, void *reserved) __TBB_override
For internal use only.
Definition: scheduler.cpp:714
#define GATHER_STATISTIC(x)
bool is_local_task_pool_quiescent() const
Definition: scheduler.h:555
Class representing source of mail.
Definition: mailbox.h:189
void commit_relocated_tasks(size_t new_tail)
Makes relocated tasks visible to thieves and releases the local task pool.
Definition: scheduler.h:641
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
Definition: task.h:417
static bool is_proxy(const task &t)
True if t is a task_proxy.
Definition: scheduler.h:273
bool master_outermost_level() const
True if the scheduler is on the outermost dispatch level in a master thread.
Definition: scheduler.h:575
bool type
Indicates that a scheduler acts as a master or a worker.
Definition: scheduler.h:54
void spawn(task &first, task *&next) __TBB_override
For internal use only.
Definition: scheduler.cpp:706
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
intptr_t my_priority
Priority level of the task group (in normalized representation)
Definition: task.h:436
task * prepare_for_spawning(task *t)
Checks if t is affinitized to another thread, and if so, bundles it as proxy.
Definition: scheduler.cpp:562
task is in ready pool, or is going to be put there, or was just taken off.
Definition: task.h:618
static task * plugged_return_list()
Special value used to mark my_return_list as not taking any more entries.
Definition: scheduler.h:380
task object is on free list, or is going to be put there, or was just taken off.
Definition: task.h:622
bool outermost
Indicates that a scheduler is on outermost level.
Definition: scheduler.h:57
void acquire_task_pool() const
Locks the local task pool.
Definition: scheduler.cpp:460
void poison_pointer(T *__TBB_atomic &)
Definition: tbb_stddef.h:309
scheduler_properties my_properties
Definition: scheduler.h:95
#define __TBB_store_release
Definition: tbb_machine.h:861
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
Definition: task.h:423
#define ITT_NOTIFY(name, obj)
Definition: itt_notify.h:120
void attach_mailbox(affinity_id id)
Definition: scheduler.h:589
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
Definition: tbb_stddef.h:220
#define __TBB_CONTEXT_ARG1(context)
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
Definition: task.h:387
task * my_dummy_task
Fake root task created by slave threads.
Definition: scheduler.h:173
T __TBB_load_relaxed(const volatile T &location)
Definition: tbb_machine.h:739
mail_outbox & mailbox(affinity_id id)
Get reference to mailbox corresponding to given affinity_id.
Definition: arena.h:208
task * my_return_list
List of small tasks that have been returned to this scheduler by other schedulers.
Definition: scheduler.h:387
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
Definition: task.h:392
#define __TBB_atomic
Definition: tbb_stddef.h:241
static const size_t quick_task_size
If sizeof(task) is <=quick_task_size, it is handled on a free list instead of malloc'd.
Definition: scheduler.h:131
void __TBB_EXPORTED_FUNC NFS_Free(void *)
Free memory allocated by NFS_Allocate.
void * __TBB_get_bsp()
Retrieves the current RSE backing store pointer. IA64 specific.
#define ITT_TASK_END
Definition: itt_notify.h:130
static generic_scheduler * create_worker(market &m, size_t index)
Initialize a scheduler for a worker thread.
Definition: scheduler.cpp:1239
virtual task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation))=0
Try getting a task from other threads (via mailbox, stealing, FIFO queue, orphans adoption).
virtual void local_wait_for_all(task &parent, task *child)=0
task & allocate_task(size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context))
Allocate task object, either from the heap or a free list.
Definition: scheduler.cpp:304
void local_spawn_root_and_wait(task *first, task *&next)
Definition: scheduler.cpp:685
state_type state() const
Current execution state.
Definition: task.h:859
intptr_t reference_count
A reference count.
Definition: task.h:121
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
Definition: task.h:941
long my_ref_count
Reference count for scheduler.
Definition: scheduler.h:177
void leave_task_pool()
Leave the task pool.
Definition: scheduler.cpp:1226
void reset_task_pool_and_leave()
Resets head and tail indices to 0, and leaves task pool.
Definition: scheduler.h:624
Data structure to be inherited by the types that can form intrusive lists.
const size_t task_prefix_reservation_size
Number of bytes reserved for a task prefix.
A scheduler with a customized evaluation loop.
Bitwise-OR of local_task and small_task.
unsigned num_workers_active()
The number of workers active in the arena.
Definition: arena.h:237
void free_task(task &t)
Put task on free list.
Definition: scheduler.h:652
void spawn_root_and_wait(task &first, task *&next) __TBB_override
For internal use only.
Definition: scheduler.cpp:710
uintptr_t my_stealing_threshold
Position in the call stack specifying its maximal filling when stealing is still allowed.
Definition: scheduler.h:142
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:717
affinity_id my_affinity_id
The mailbox id assigned to this scheduler.
Definition: scheduler.h:93
void nested_arena_entry(arena *, size_t)
Definition: arena.cpp:679
bool worker_outermost_level() const
True if the scheduler is on the outermost dispatch level in a worker thread.
Definition: scheduler.h:579
static const kind_type dying
Definition: task.h:569
bool outermost_level() const
True if the scheduler is on the outermost dispatch level.
Definition: scheduler.h:571
void free_nonlocal_small_task(task &t)
Free a small task t that that was allocated by a different scheduler.
Definition: scheduler.cpp:379
static const size_t min_task_pool_size
Definition: scheduler.h:294
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
Definition: scheduler.h:78

Copyright © 2005-2019 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.