00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #ifndef __TBB_task_H
00022 #define __TBB_task_H
00023
00024 #include "tbb_stddef.h"
00025 #include "tbb_machine.h"
00026
00027 namespace tbb {
00028
00029 class task;
00030 class task_list;
00031
00032 #if __TBB_EXCEPTIONS
00033 class task_group_context;
00034 #endif
00035
00037 namespace internal {
00038
00039 class scheduler: no_copy {
00040 public:
00042 virtual void spawn( task& first, task*& next ) = 0;
00043
00045 virtual void wait_for_all( task& parent, task* child ) = 0;
00046
00048 virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
00049
00051
00052 virtual ~scheduler() = 0;
00053 };
00054
00056
00057 typedef intptr reference_count;
00058
00060 typedef unsigned short affinity_id;
00061
00062 #if __TBB_EXCEPTIONS
00063 struct context_list_node_t {
00064 context_list_node_t *my_prev,
00065 *my_next;
00066 };
00067
00068 class allocate_root_with_context_proxy: no_assign {
00069 task_group_context& my_context;
00070 public:
00071 allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
00072 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00073 void __TBB_EXPORTED_METHOD free( task& ) const;
00074 };
00075 #endif
00076
00077 class allocate_root_proxy: no_assign {
00078 public:
00079 static task& __TBB_EXPORTED_FUNC allocate( size_t size );
00080 static void __TBB_EXPORTED_FUNC free( task& );
00081 };
00082
00083 class allocate_continuation_proxy: no_assign {
00084 public:
00085 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00086 void __TBB_EXPORTED_METHOD free( task& ) const;
00087 };
00088
00089 class allocate_child_proxy: no_assign {
00090 public:
00091 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00092 void __TBB_EXPORTED_METHOD free( task& ) const;
00093 };
00094
00095 class allocate_additional_child_of_proxy: no_assign {
00096 task& self;
00097 task& parent;
00098 public:
00099 allocate_additional_child_of_proxy( task& self_, task& parent_ ) : self(self_), parent(parent_) {}
00100 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00101 void __TBB_EXPORTED_METHOD free( task& ) const;
00102 };
00103
00104 class task_group_base;
00105
00107
00112 class task_prefix {
00113 private:
00114 friend class tbb::task;
00115 friend class tbb::task_list;
00116 friend class internal::scheduler;
00117 friend class internal::allocate_root_proxy;
00118 friend class internal::allocate_child_proxy;
00119 friend class internal::allocate_continuation_proxy;
00120 friend class internal::allocate_additional_child_of_proxy;
00121 friend class internal::task_group_base;
00122
00123 #if __TBB_EXCEPTIONS
00125
00128 task_group_context *context;
00129 #endif
00130
00132
00137 scheduler* origin;
00138
00140 scheduler* owner;
00141
00143
00146 tbb::task* parent;
00147
00149
00153 reference_count ref_count;
00154
00156
00157 int depth;
00158
00160
00161 unsigned char state;
00162
00164
00169 unsigned char extra_state;
00170
00171 affinity_id affinity;
00172
00174 tbb::task* next;
00175
00177 tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
00178 };
00179
00180 }
00182
00183 #if __TBB_EXCEPTIONS
00184
00185 #if TBB_USE_CAPTURED_EXCEPTION
00186 class tbb_exception;
00187 #else
00188 namespace internal {
00189 class tbb_exception_ptr;
00190 }
00191 #endif
00192
00194
00214 class task_group_context : internal::no_copy
00215 {
00216 private:
00217 #if TBB_USE_CAPTURED_EXCEPTION
00218 typedef tbb_exception exception_container_type;
00219 #else
00220 typedef internal::tbb_exception_ptr exception_container_type;
00221 #endif
00222
00223 enum version_traits_word_layout {
00224 traits_offset = 16,
00225 version_mask = 0xFFFF,
00226 traits_mask = 0xFFFFul << traits_offset
00227 };
00228
00229 public:
00230 enum kind_type {
00231 isolated,
00232 bound
00233 };
00234
00235 enum traits_type {
00236 exact_exception = 0x0001ul << traits_offset,
00237 no_cancellation = 0x0002ul << traits_offset,
00238 concurrent_wait = 0x0004ul << traits_offset,
00239 #if TBB_USE_CAPTURED_EXCEPTION
00240 default_traits = 0
00241 #else
00242 default_traits = exact_exception
00243 #endif
00244 };
00245
00246 private:
00247 union {
00249 kind_type my_kind;
00250 uintptr_t _my_kind_aligner;
00251 };
00252
00254 task_group_context *my_parent;
00255
00257
00259 internal::context_list_node_t my_node;
00260
00262
00265 char _leading_padding[internal::NFS_MaxLineSize -
00266 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)];
00267
00269 uintptr_t my_cancellation_requested;
00270
00272
00275 uintptr_t my_version_and_traits;
00276
00278 exception_container_type *my_exception;
00279
00281
00284 void *my_owner;
00285
00287
00288 char _trailing_padding[internal::NFS_MaxLineSize - sizeof(intptr_t) - 2 * sizeof(void*)];
00289
00290 public:
00292
00319 task_group_context ( kind_type relation_with_parent = bound,
00320 uintptr_t traits = default_traits )
00321 : my_kind(relation_with_parent)
00322 , my_version_and_traits(1 | traits)
00323 {
00324 init();
00325 }
00326
00327 __TBB_EXPORTED_METHOD ~task_group_context ();
00328
00330
00337 void __TBB_EXPORTED_METHOD reset ();
00338
00340
00347 bool __TBB_EXPORTED_METHOD cancel_group_execution ();
00348
00350 bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
00351
00353
00359 void __TBB_EXPORTED_METHOD register_pending_exception ();
00360
00361 protected:
00363
00364 void __TBB_EXPORTED_METHOD init ();
00365
00366 private:
00367 friend class task;
00368 friend class internal::allocate_root_with_context_proxy;
00369
00370 static const kind_type binding_required = bound;
00371 static const kind_type binding_completed = kind_type(bound+1);
00372
00375 void propagate_cancellation_from_ancestors ();
00376
00378 bool is_alive () {
00379 #if TBB_USE_DEBUG
00380 return my_version_and_traits != 0xDeadBeef;
00381 #else
00382 return true;
00383 #endif
00384 }
00385 };
00386
00387 #endif
00388
00390
00391 class task: internal::no_copy {
00393 void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
00394
00396 internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
00397
00398 protected:
00400 task() {prefix().extra_state=1;}
00401
00402 public:
00404 virtual ~task() {}
00405
00407 virtual task* execute() = 0;
00408
00410 enum state_type {
00412 executing,
00414 reexecute,
00416 ready,
00418 allocated,
00420 freed,
00422 recycle
00423 };
00424
00425
00426
00427
00428
00430 static internal::allocate_root_proxy allocate_root() {
00431 return internal::allocate_root_proxy();
00432 }
00433
00434 #if __TBB_EXCEPTIONS
00436 static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
00437 return internal::allocate_root_with_context_proxy(ctx);
00438 }
00439 #endif
00440
00442
00443 internal::allocate_continuation_proxy& allocate_continuation() {
00444 return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
00445 }
00446
00448 internal::allocate_child_proxy& allocate_child() {
00449 return *reinterpret_cast<internal::allocate_child_proxy*>(this);
00450 }
00451
00453
00455 internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
00456 return internal::allocate_additional_child_of_proxy(*this,t);
00457 }
00458
00460
00464 void __TBB_EXPORTED_METHOD destroy( task& victim );
00465
00466
00467
00468
00469
00471
00477 void recycle_as_continuation() {
00478 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00479 prefix().state = allocated;
00480 }
00481
00483
00484 void recycle_as_safe_continuation() {
00485 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00486 prefix().state = recycle;
00487 }
00488
00490 void recycle_as_child_of( task& new_parent ) {
00491 internal::task_prefix& p = prefix();
00492 __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
00493 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
00494 __TBB_ASSERT( p.parent==NULL, "parent must be null" );
00495 __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
00496 __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
00497 p.state = allocated;
00498 p.parent = &new_parent;
00499 #if __TBB_EXCEPTIONS
00500 p.context = new_parent.prefix().context;
00501 #endif
00502 }
00503
00505
00506 void recycle_to_reexecute() {
00507 __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
00508 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
00509 prefix().state = reexecute;
00510 }
00511
00512
00513
00514 intptr_t depth() const {return 0;}
00515 void set_depth( intptr_t ) {}
00516 void add_to_depth( int ) {}
00517
00518
00519
00520
00521
00522
00524 void set_ref_count( int count ) {
00525 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
00526 internal_set_ref_count(count);
00527 #else
00528 prefix().ref_count = count;
00529 #endif
00530 }
00531
00533
00534 void increment_ref_count() {
00535 __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
00536 }
00537
00539
00540 int decrement_ref_count() {
00541 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
00542 return int(internal_decrement_ref_count());
00543 #else
00544 return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
00545 #endif
00546 }
00547
00549
00553 void spawn( task& child ) {
00554 prefix().owner->spawn( child, child.prefix().next );
00555 }
00556
00558 void spawn( task_list& list );
00559
00561 void spawn_and_wait_for_all( task& child ) {
00562 prefix().owner->wait_for_all( *this, &child );
00563 }
00564
00566 void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
00567
00569
00571 static void spawn_root_and_wait( task& root ) {
00572 root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
00573 }
00574
00576
00578 static void spawn_root_and_wait( task_list& root_list );
00579
00581
00582 void wait_for_all() {
00583 prefix().owner->wait_for_all( *this, NULL );
00584 }
00585
00587 static task& __TBB_EXPORTED_FUNC self();
00588
00590 task* parent() const {return prefix().parent;}
00591
00592 #if __TBB_EXCEPTIONS
00594 task_group_context* context() {return prefix().context;}
00595 #endif
00596
00598 bool is_stolen_task() const {
00599 return (prefix().extra_state & 0x80)!=0;
00600 }
00601
00602
00603
00604
00605
00607 state_type state() const {return state_type(prefix().state);}
00608
00610 int ref_count() const {
00611 #if TBB_USE_ASSERT
00612 internal::reference_count ref_count = prefix().ref_count;
00613 __TBB_ASSERT( ref_count==int(ref_count), "integer overflow error");
00614 #endif
00615 return int(prefix().ref_count);
00616 }
00617
00619 bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
00620
00621
00622
00623
00624
00626
00627 typedef internal::affinity_id affinity_id;
00628
00630 void set_affinity( affinity_id id ) {prefix().affinity = id;}
00631
00633 affinity_id affinity() const {return prefix().affinity;}
00634
00636
00640 virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
00641
00642 #if __TBB_EXCEPTIONS
00644
00645 bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
00646
00648 bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
00649 #endif
00650
00651 private:
00652 friend class task_list;
00653 friend class internal::scheduler;
00654 friend class internal::allocate_root_proxy;
00655 #if __TBB_EXCEPTIONS
00656 friend class internal::allocate_root_with_context_proxy;
00657 #endif
00658 friend class internal::allocate_continuation_proxy;
00659 friend class internal::allocate_child_proxy;
00660 friend class internal::allocate_additional_child_of_proxy;
00661
00662 friend class internal::task_group_base;
00663
00665
00666 internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
00667 return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
00668 }
00669 };
00670
00672
00673 class empty_task: public task {
00674 task* execute() {
00675 return NULL;
00676 }
00677 };
00678
00680
00682 class task_list: internal::no_copy {
00683 private:
00684 task* first;
00685 task** next_ptr;
00686 friend class task;
00687 public:
00689 task_list() : first(NULL), next_ptr(&first) {}
00690
00692 ~task_list() {}
00693
00695 bool empty() const {return !first;}
00696
00698 void push_back( task& task ) {
00699 task.prefix().next = NULL;
00700 *next_ptr = &task;
00701 next_ptr = &task.prefix().next;
00702 }
00703
00705 task& pop_front() {
00706 __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
00707 task* result = first;
00708 first = result->prefix().next;
00709 if( !first ) next_ptr = &first;
00710 return *result;
00711 }
00712
00714 void clear() {
00715 first=NULL;
00716 next_ptr=&first;
00717 }
00718 };
00719
00720 inline void task::spawn( task_list& list ) {
00721 if( task* t = list.first ) {
00722 prefix().owner->spawn( *t, *list.next_ptr );
00723 list.clear();
00724 }
00725 }
00726
00727 inline void task::spawn_root_and_wait( task_list& root_list ) {
00728 if( task* t = root_list.first ) {
00729 t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
00730 root_list.clear();
00731 }
00732 }
00733
00734 }
00735
00736 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
00737 return &tbb::internal::allocate_root_proxy::allocate(bytes);
00738 }
00739
00740 inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
00741 tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
00742 }
00743
00744 #if __TBB_EXCEPTIONS
00745 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
00746 return &p.allocate(bytes);
00747 }
00748
00749 inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
00750 p.free( *static_cast<tbb::task*>(task) );
00751 }
00752 #endif
00753
00754 inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
00755 return &p.allocate(bytes);
00756 }
00757
00758 inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
00759 p.free( *static_cast<tbb::task*>(task) );
00760 }
00761
00762 inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
00763 return &p.allocate(bytes);
00764 }
00765
00766 inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
00767 p.free( *static_cast<tbb::task*>(task) );
00768 }
00769
00770 inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00771 return &p.allocate(bytes);
00772 }
00773
00774 inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00775 p.free( *static_cast<tbb::task*>(task) );
00776 }
00777
00778 #endif