1/*
2 Copyright (c) 2005-2021 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#ifndef __TBB_task_group_H
18#define __TBB_task_group_H
19
20#include "detail/_config.h"
21#include "detail/_namespace_injection.h"
22#include "detail/_assert.h"
23#include "detail/_utils.h"
24#include "detail/_template_helpers.h"
25#include "detail/_exception.h"
26#include "detail/_task.h"
27#include "detail/_small_object_pool.h"
28#include "detail/_intrusive_list_node.h"
29
30#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
31#include "detail/_task_handle.h"
32#endif
33
34#include "profiling.h"
35
36#include <type_traits>
37
38#if _MSC_VER && !defined(__INTEL_COMPILER)
39 // Suppress warning: structure was padded due to alignment specifier
40 #pragma warning(push)
41 #pragma warning(disable:4324)
42#endif
43
44namespace tbb {
45namespace detail {
46
47namespace d1 {
48class delegate_base;
49class task_arena_base;
50class task_group_context;
51class task_group_base;
52}
53
54namespace r1 {
55// Forward declarations
56class tbb_exception_ptr;
57class market;
58class thread_data;
59class task_dispatcher;
60template <bool>
61class context_guard_helper;
62struct task_arena_impl;
63class context_list;
64
65TBB_EXPORT void __TBB_EXPORTED_FUNC execute(d1::task_arena_base&, d1::delegate_base&);
66TBB_EXPORT void __TBB_EXPORTED_FUNC isolate_within_arena(d1::delegate_base&, std::intptr_t);
67
68TBB_EXPORT void __TBB_EXPORTED_FUNC initialize(d1::task_group_context&);
69TBB_EXPORT void __TBB_EXPORTED_FUNC destroy(d1::task_group_context&);
70TBB_EXPORT void __TBB_EXPORTED_FUNC reset(d1::task_group_context&);
71TBB_EXPORT bool __TBB_EXPORTED_FUNC cancel_group_execution(d1::task_group_context&);
72TBB_EXPORT bool __TBB_EXPORTED_FUNC is_group_execution_cancelled(d1::task_group_context&);
73TBB_EXPORT void __TBB_EXPORTED_FUNC capture_fp_settings(d1::task_group_context&);
74
75struct task_group_context_impl;
76}
77
78namespace d2 {
79
80#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
81namespace {
82template<typename F>
83d1::task* task_ptr_or_nullptr(F&& f);
84}
85
86template<typename F>
87class function_task : public task_handle_task {
88 //TODO: apply empty base optimization here
89 const F m_func;
90
91private:
92 d1::task* execute(d1::execution_data& ed) override {
93 __TBB_ASSERT(ed.context == &this->ctx(), "The task group context should be used for all tasks");
94 task* res = task_ptr_or_nullptr(m_func);
95 finalize(&ed);
96 return res;
97 }
98 d1::task* cancel(d1::execution_data& ed) override {
99 finalize(&ed);
100 return nullptr;
101 }
102public:
103 template<typename FF>
104 function_task(FF&& f, d1::wait_context& wo, d1::task_group_context& ctx, d1::small_object_allocator& alloc)
105 : task_handle_task{wo, ctx, alloc},
106 m_func(std::forward<FF>(f)) {}
107};
108
109namespace {
110 template<typename F>
111 d1::task* task_ptr_or_nullptr_impl(std::false_type, F&& f){
112 task_handle th = std::forward<F>(f)();
113 return task_handle_accessor::release(th);
114 }
115
116 template<typename F>
117 d1::task* task_ptr_or_nullptr_impl(std::true_type, F&& f){
118 std::forward<F>(f)();
119 return nullptr;
120 }
121
122 template<typename F>
123 d1::task* task_ptr_or_nullptr(F&& f){
124 using is_void_t = std::is_void<
125 decltype(std::forward<F>(f)())
126 >;
127
128 return task_ptr_or_nullptr_impl(is_void_t{}, std::forward<F>(f));
129 }
130}
131#else
132namespace {
133 template<typename F>
134 d1::task* task_ptr_or_nullptr(F&& f){
135 std::forward<F>(f)();
136 return nullptr;
137 }
138} // namespace
139#endif // __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
140} // namespace d2
141
142namespace d1 {
143
144// This structure is left here for backward compatibility check
145struct context_list_node {
146 std::atomic<context_list_node*> prev{};
147 std::atomic<context_list_node*> next{};
148};
149
150//! Used to form groups of tasks
151/** @ingroup task_scheduling
152 The context services explicit cancellation requests from user code, and unhandled
153 exceptions intercepted during tasks execution. Intercepting an exception results
154 in generating internal cancellation requests (which is processed in exactly the
155 same way as external ones).
156
157 The context is associated with one or more root tasks and defines the cancellation
158 group that includes all the descendants of the corresponding root task(s). Association
159 is established when a context object is passed as an argument to the task::allocate_root()
160 method. See task_group_context::task_group_context for more details.
161
162 The context can be bound to another one, and other contexts can be bound to it,
163 forming a tree-like structure: parent -> this -> children. Arrows here designate
164 cancellation propagation direction. If a task in a cancellation group is cancelled
165 all the other tasks in this group and groups bound to it (as children) get cancelled too.
166**/
167class task_group_context : no_copy {
168public:
169 enum traits_type {
170 fp_settings = 1 << 1,
171 concurrent_wait = 1 << 2,
172 default_traits = 0
173 };
174 enum kind_type {
175 isolated,
176 bound
177 };
178private:
179 //! Space for platform-specific FPU settings.
180 /** Must only be accessed inside TBB binaries, and never directly in user
181 code or inline methods. */
182 std::uint64_t my_cpu_ctl_env;
183
184 //! Specifies whether cancellation was requested for this task group.
185 std::atomic<std::uint32_t> my_cancellation_requested;
186
187 //! Versioning for run-time checks and behavioral traits of the context.
188 enum class task_group_context_version : std::uint8_t {
189 gold_2021U1 = 0, // version of task_group_context released in oneTBB 2021.1 GOLD
190 proxy_support = 1 // backward compatible support for 'this' context to act as a proxy
191 };
192 task_group_context_version my_version;
193
194 //! The context traits.
195 struct context_traits {
196 bool fp_settings : 1;
197 bool concurrent_wait : 1;
198 bool bound : 1;
199 bool proxy : 1; // true if 'this' acts as a proxy for user-specified context
200 bool reserved1 : 1;
201 bool reserved2 : 1;
202 bool reserved3 : 1;
203 bool reserved4 : 1;
204 } my_traits;
205
206 static_assert(sizeof(context_traits) == 1, "Traits shall fit into one byte.");
207
208 static constexpr std::uint8_t may_have_children = 1;
209 //! The context internal state (currently only may_have_children).
210 std::atomic<std::uint8_t> my_state;
211
212 enum class lifetime_state : std::uint8_t {
213 created,
214 locked,
215 isolated,
216 bound,
217 dead
218 };
219
220 //! The synchronization machine state to manage lifetime.
221 std::atomic<lifetime_state> my_lifetime_state;
222
223 union {
224 //! Pointer to the context of the parent cancellation group. NULL for isolated contexts.
225 task_group_context* my_parent;
226
227 //! Pointer to the actual context 'this' context represents a proxy of.
228 task_group_context* my_actual_context;
229 };
230
231 //! Thread data instance that registered this context in its list.
232 r1::context_list* my_context_list;
233 static_assert(sizeof(std::atomic<r1::thread_data*>) == sizeof(r1::context_list*), "To preserve backward compatibility these types should have the same size");
234
235 //! Used to form the thread specific list of contexts without additional memory allocation.
236 /** A context is included into the list of the current thread when its binding to
237 its parent happens. Any context can be present in the list of one thread only. **/
238 intrusive_list_node my_node;
239 static_assert(sizeof(intrusive_list_node) == sizeof(context_list_node), "To preserve backward compatibility these types should have the same size");
240
241 //! Pointer to the container storing exception being propagated across this task group.
242 std::atomic<r1::tbb_exception_ptr*> my_exception;
243 static_assert(sizeof(std::atomic<r1::tbb_exception_ptr*>) == sizeof(r1::tbb_exception_ptr*),
244 "backward compatibility check");
245
246 //! Used to set and maintain stack stitching point for Intel Performance Tools.
247 void* my_itt_caller;
248
249 //! Description of algorithm for scheduler based instrumentation.
250 string_resource_index my_name;
251
252 char padding[max_nfs_size
253 - sizeof(std::uint64_t) // my_cpu_ctl_env
254 - sizeof(std::atomic<std::uint32_t>) // my_cancellation_requested
255 - sizeof(std::uint8_t) // my_version
256 - sizeof(context_traits) // my_traits
257 - sizeof(std::atomic<std::uint8_t>) // my_state
258 - sizeof(std::atomic<lifetime_state>) // my_lifetime_state
259 - sizeof(task_group_context*) // my_parent
260 - sizeof(r1::context_list*) // my_context_list
261 - sizeof(intrusive_list_node) // my_node
262 - sizeof(std::atomic<r1::tbb_exception_ptr*>) // my_exception
263 - sizeof(void*) // my_itt_caller
264 - sizeof(string_resource_index) // my_name
265 ];
266
267 task_group_context(context_traits t, string_resource_index name)
268 : my_version{task_group_context_version::proxy_support}, my_name{name}
269 {
270 my_traits = t; // GCC4.8 issues warning list initialization for bitset (missing-field-initializers)
271 r1::initialize(*this);
272 }
273
274#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
275 task_group_context(task_group_context* actual_context)
276 : my_version{task_group_context_version::proxy_support}
277 , my_actual_context{actual_context}
278 {
279 __TBB_ASSERT(my_actual_context, "Passed pointer value points to nothing.");
280 my_traits.proxy = true;
281 my_name = actual_context->my_name;
282
283 // no need to initialize 'this' context as it acts as a proxy for my_actual_context, which
284 // initialization is a user-side responsibility.
285 }
286#endif
287
288 static context_traits make_traits(kind_type relation_with_parent, std::uintptr_t user_traits) {
289 context_traits ct;
290 ct.fp_settings = (user_traits & fp_settings) == fp_settings;
291 ct.concurrent_wait = (user_traits & concurrent_wait) == concurrent_wait;
292 ct.bound = relation_with_parent == bound;
293 ct.proxy = false;
294 ct.reserved1 = ct.reserved2 = ct.reserved3 = ct.reserved4 = false;
295 return ct;
296 }
297
298#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
299 bool is_proxy() const {
300 return my_version >= task_group_context_version::proxy_support && my_traits.proxy;
301 }
302#endif
303
304 task_group_context& actual_context() noexcept {
305#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
306 if (is_proxy()) {
307 __TBB_ASSERT(my_actual_context, "Actual task_group_context is not set.");
308 return *my_actual_context;
309 }
310#endif
311 return *this;
312 }
313
314 const task_group_context& actual_context() const noexcept {
315#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
316 if (is_proxy()) {
317 __TBB_ASSERT(my_actual_context, "Actual task_group_context is not set.");
318 return *my_actual_context;
319 }
320#endif
321 return *this;
322 }
323
324public:
325 //! Default & binding constructor.
326 /** By default a bound context is created. That is this context will be bound
327 (as child) to the context of the currently executing task . Cancellation
328 requests passed to the parent context are propagated to all the contexts
329 bound to it. Similarly priority change is propagated from the parent context
330 to its children.
331
332 If task_group_context::isolated is used as the argument, then the tasks associated
333 with this context will never be affected by events in any other context.
334
335 Creating isolated contexts involve much less overhead, but they have limited
336 utility. Normally when an exception occurs in an algorithm that has nested
337 ones running, it is desirably to have all the nested algorithms cancelled
338 as well. Such a behavior requires nested algorithms to use bound contexts.
339
340 There is one good place where using isolated algorithms is beneficial. It is
341 an external thread. That is if a particular algorithm is invoked directly from
342 the external thread (not from a TBB task), supplying it with explicitly
343 created isolated context will result in a faster algorithm startup.
344
345 VERSIONING NOTE:
346 Implementation(s) of task_group_context constructor(s) cannot be made
347 entirely out-of-line because the run-time version must be set by the user
348 code. This will become critically important for binary compatibility, if
349 we ever have to change the size of the context object. **/
350
351 task_group_context(kind_type relation_with_parent = bound,
352 std::uintptr_t t = default_traits)
353 : task_group_context(make_traits(relation_with_parent, user_traits: t), CUSTOM_CTX) {}
354
355 // Custom constructor for instrumentation of oneTBB algorithm
356 task_group_context(string_resource_index name )
357 : task_group_context(make_traits(relation_with_parent: bound, user_traits: default_traits), name) {}
358
359 // Do not introduce any logic on user side since it might break state propagation assumptions
360 ~task_group_context() {
361#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
362 // When 'this' serves as a proxy, the initialization does not happen - nor should the
363 // destruction.
364 if (!is_proxy())
365#endif
366 {
367 r1::destroy(*this);
368 }
369 }
370
371 //! Forcefully reinitializes the context after the task tree it was associated with is completed.
372 /** Because the method assumes that all the tasks that used to be associated with
373 this context have already finished, calling it while the context is still
374 in use somewhere in the task hierarchy leads to undefined behavior.
375
376 IMPORTANT: This method is not thread safe!
377
378 The method does not change the context's parent if it is set. **/
379 void reset() {
380 r1::reset(actual_context());
381 }
382
383 //! Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
384 /** \return false if cancellation has already been requested, true otherwise.
385
386 Note that canceling never fails. When false is returned, it just means that
387 another thread (or this one) has already sent cancellation request to this
388 context or to one of its ancestors (if this context is bound). It is guaranteed
389 that when this method is concurrently called on the same not yet cancelled
390 context, true will be returned by one and only one invocation. **/
391 bool cancel_group_execution() {
392 return r1::cancel_group_execution(actual_context());
393 }
394
395 //! Returns true if the context received cancellation request.
396 bool is_group_execution_cancelled() {
397 return r1::is_group_execution_cancelled(actual_context());
398 }
399
400#if __TBB_FP_CONTEXT
401 //! Captures the current FPU control settings to the context.
402 /** Because the method assumes that all the tasks that used to be associated with
403 this context have already finished, calling it while the context is still
404 in use somewhere in the task hierarchy leads to undefined behavior.
405
406 IMPORTANT: This method is not thread safe!
407
408 The method does not change the FPU control settings of the context's parent. **/
409 void capture_fp_settings() {
410 r1::capture_fp_settings(actual_context());
411 }
412#endif
413
414 //! Returns the user visible context trait
415 std::uintptr_t traits() const {
416 std::uintptr_t t{};
417 const task_group_context& ctx = actual_context();
418 t |= ctx.my_traits.fp_settings ? fp_settings : 0;
419 t |= ctx.my_traits.concurrent_wait ? concurrent_wait : 0;
420 return t;
421 }
422private:
423 //// TODO: cleanup friends
424 friend class r1::market;
425 friend class r1::thread_data;
426 friend class r1::task_dispatcher;
427 template <bool>
428 friend class r1::context_guard_helper;
429 friend struct r1::task_arena_impl;
430 friend struct r1::task_group_context_impl;
431 friend class task_group_base;
432}; // class task_group_context
433
434static_assert(sizeof(task_group_context) == 128, "Wrong size of task_group_context");
435
436enum task_group_status {
437 not_complete,
438 complete,
439 canceled
440};
441
442class task_group;
443class structured_task_group;
444#if TBB_PREVIEW_ISOLATED_TASK_GROUP
445class isolated_task_group;
446#endif
447
448template<typename F>
449class function_task : public task {
450 const F m_func;
451 wait_context& m_wait_ctx;
452 small_object_allocator m_allocator;
453
454 void finalize(const execution_data& ed) {
455 // Make a local reference not to access this after destruction.
456 wait_context& wo = m_wait_ctx;
457 // Copy allocator to the stack
458 auto allocator = m_allocator;
459 // Destroy user functor before release wait.
460 this->~function_task();
461 wo.release();
462
463 allocator.deallocate(this, ed);
464 }
465 task* execute(execution_data& ed) override {
466 task* res = d2::task_ptr_or_nullptr(m_func);
467 finalize(ed);
468 return res;
469 }
470 task* cancel(execution_data& ed) override {
471 finalize(ed);
472 return nullptr;
473 }
474public:
475 function_task(const F& f, wait_context& wo, small_object_allocator& alloc)
476 : m_func(f)
477 , m_wait_ctx(wo)
478 , m_allocator(alloc) {}
479
480 function_task(F&& f, wait_context& wo, small_object_allocator& alloc)
481 : m_func(std::move(f))
482 , m_wait_ctx(wo)
483 , m_allocator(alloc) {}
484};
485
486template <typename F>
487class function_stack_task : public task {
488 const F& m_func;
489 wait_context& m_wait_ctx;
490
491 void finalize() {
492 m_wait_ctx.release();
493 }
494 task* execute(execution_data&) override {
495 task* res = d2::task_ptr_or_nullptr(m_func);
496 finalize();
497 return res;
498 }
499 task* cancel(execution_data&) override {
500 finalize();
501 return nullptr;
502 }
503public:
504 function_stack_task(const F& f, wait_context& wo) : m_func(f), m_wait_ctx(wo) {}
505};
506
507class task_group_base : no_copy {
508protected:
509 wait_context m_wait_ctx;
510 task_group_context m_context;
511
512 template<typename F>
513 task_group_status internal_run_and_wait(const F& f) {
514 function_stack_task<F> t{ f, m_wait_ctx };
515 m_wait_ctx.reserve();
516 bool cancellation_status = false;
517 try_call([&] {
518 execute_and_wait(t, context(), m_wait_ctx, context());
519 }).on_completion([&] {
520 // TODO: the reset method is not thread-safe. Ensure the correct behavior.
521 cancellation_status = context().is_group_execution_cancelled();
522 context().reset();
523 });
524 return cancellation_status ? canceled : complete;
525 }
526
527#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
528 task_group_status internal_run_and_wait(d2::task_handle&& h) {
529 if (h == nullptr) {
530 throw_exception(exception_id::bad_task_handle);
531 }
532
533 using acs = d2::task_handle_accessor;
534 if (&acs::ctx_of(h) != &context()) {
535 throw_exception(exception_id::bad_task_handle_wrong_task_group);
536 }
537
538 bool cancellation_status = false;
539 try_call([&] {
540 execute_and_wait(*acs::release(h), context(), m_wait_ctx, context());
541 }).on_completion([&] {
542 // TODO: the reset method is not thread-safe. Ensure the correct behavior.
543 cancellation_status = context().is_group_execution_cancelled();
544 context().reset();
545 });
546 return cancellation_status ? canceled : complete;
547 }
548#endif
549 template<typename F>
550 task* prepare_task(F&& f) {
551 m_wait_ctx.reserve();
552 small_object_allocator alloc{};
553 return alloc.new_object<function_task<typename std::decay<F>::type>>(std::forward<F>(f), m_wait_ctx, alloc);
554 }
555
556 task_group_context& context() noexcept {
557 return m_context.actual_context();
558 }
559
560#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
561 template<typename F>
562 d2::task_handle prepare_task_handle(F&& f) {
563 m_wait_ctx.reserve();
564 small_object_allocator alloc{};
565 using function_task_t = d2::function_task<typename std::decay<F>::type>;
566 d2::task_handle_task* function_task_p = alloc.new_object<function_task_t>(std::forward<F>(f), m_wait_ctx, context(), alloc);
567
568 return d2::task_handle_accessor::construct(function_task_p);
569 }
570#endif
571
572public:
573 task_group_base(uintptr_t traits = 0)
574 : m_wait_ctx(0)
575 , m_context(task_group_context::bound, task_group_context::default_traits | traits)
576 {}
577
578#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
579 task_group_base(task_group_context& ctx)
580 : m_wait_ctx(0)
581 , m_context(&ctx)
582 {}
583#endif
584
585 ~task_group_base() noexcept(false) {
586 if (m_wait_ctx.continue_execution()) {
587#if __TBB_CPP17_UNCAUGHT_EXCEPTIONS_PRESENT
588 bool stack_unwinding_in_progress = std::uncaught_exceptions() > 0;
589#else
590 bool stack_unwinding_in_progress = std::uncaught_exception();
591#endif
592 // Always attempt to do proper cleanup to avoid inevitable memory corruption
593 // in case of missing wait (for the sake of better testability & debuggability)
594 if (!context().is_group_execution_cancelled())
595 cancel();
596 d1::wait(wait_ctx&: m_wait_ctx, ctx&: context());
597 if (!stack_unwinding_in_progress)
598 throw_exception(exception_id::missing_wait);
599 }
600 }
601
602 task_group_status wait() {
603 bool cancellation_status = false;
604 try_call(b: [&] {
605 d1::wait(wait_ctx&: m_wait_ctx, ctx&: context());
606 }).on_completion(on_completion_body: [&] {
607 // TODO: the reset method is not thread-safe. Ensure the correct behavior.
608 cancellation_status = m_context.is_group_execution_cancelled();
609 context().reset();
610 });
611 return cancellation_status ? canceled : complete;
612 }
613
614 void cancel() {
615 context().cancel_group_execution();
616 }
617}; // class task_group_base
618
619class task_group : public task_group_base {
620public:
621 task_group() : task_group_base(task_group_context::concurrent_wait) {}
622
623#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
624 task_group(task_group_context& ctx) : task_group_base(ctx) {}
625#endif
626
627 template<typename F>
628 void run(F&& f) {
629 spawn(*prepare_task(std::forward<F>(f)), context());
630 }
631
632#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
633 void run(d2::task_handle&& h) {
634 if (h == nullptr) {
635 throw_exception(exception_id::bad_task_handle);
636 }
637
638 using acs = d2::task_handle_accessor;
639 if (&acs::ctx_of(h) != &context()) {
640 throw_exception(exception_id::bad_task_handle_wrong_task_group);
641 }
642
643 spawn(*acs::release(h), context());
644 }
645
646 template<typename F>
647 d2::task_handle defer(F&& f) {
648 return prepare_task_handle(std::forward<F>(f));
649
650 }
651#endif //__TBB_PREVIEW_TASK_GROUP_EXTENSIONS
652
653 template<typename F>
654 task_group_status run_and_wait(const F& f) {
655 return internal_run_and_wait(f);
656 }
657
658#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
659 task_group_status run_and_wait(d2::task_handle&& h) {
660 return internal_run_and_wait(std::move(h));
661 }
662#endif
663}; // class task_group
664
665#if TBB_PREVIEW_ISOLATED_TASK_GROUP
666class spawn_delegate : public delegate_base {
667 task* task_to_spawn;
668 task_group_context& context;
669 bool operator()() const override {
670 spawn(*task_to_spawn, context);
671 return true;
672 }
673public:
674 spawn_delegate(task* a_task, task_group_context& ctx)
675 : task_to_spawn(a_task), context(ctx)
676 {}
677};
678
679class wait_delegate : public delegate_base {
680 bool operator()() const override {
681 status = tg.wait();
682 return true;
683 }
684protected:
685 task_group& tg;
686 task_group_status& status;
687public:
688 wait_delegate(task_group& a_group, task_group_status& tgs)
689 : tg(a_group), status(tgs) {}
690};
691
692template<typename F>
693class run_wait_delegate : public wait_delegate {
694 F& func;
695 bool operator()() const override {
696 status = tg.run_and_wait(func);
697 return true;
698 }
699public:
700 run_wait_delegate(task_group& a_group, F& a_func, task_group_status& tgs)
701 : wait_delegate(a_group, tgs), func(a_func) {}
702};
703
704class isolated_task_group : public task_group {
705 intptr_t this_isolation() {
706 return reinterpret_cast<intptr_t>(this);
707 }
708public:
709 isolated_task_group() : task_group() {}
710
711#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
712 isolated_task_group(task_group_context& ctx) : task_group(ctx) {}
713#endif
714
715 template<typename F>
716 void run(F&& f) {
717 spawn_delegate sd(prepare_task(std::forward<F>(f)), context());
718 r1::isolate_within_arena(sd, this_isolation());
719 }
720
721#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
722 void run(d2::task_handle&& h) {
723 if (h == nullptr) {
724 throw_exception(exception_id::bad_task_handle);
725 }
726
727 using acs = d2::task_handle_accessor;
728 if (&acs::ctx_of(h) != &context()) {
729 throw_exception(exception_id::bad_task_handle_wrong_task_group);
730 }
731
732 spawn_delegate sd(acs::release(h), context());
733 r1::isolate_within_arena(sd, this_isolation());
734 }
735#endif //__TBB_PREVIEW_TASK_GROUP_EXTENSIONS
736
737 template<typename F>
738 task_group_status run_and_wait( const F& f ) {
739 task_group_status result = not_complete;
740 run_wait_delegate<const F> rwd(*this, f, result);
741 r1::isolate_within_arena(rwd, this_isolation());
742 __TBB_ASSERT(result != not_complete, "premature exit from wait?");
743 return result;
744 }
745
746 task_group_status wait() {
747 task_group_status result = not_complete;
748 wait_delegate wd(*this, result);
749 r1::isolate_within_arena(wd, this_isolation());
750 __TBB_ASSERT(result != not_complete, "premature exit from wait?");
751 return result;
752 }
753}; // class isolated_task_group
754#endif // TBB_PREVIEW_ISOLATED_TASK_GROUP
755
756inline bool is_current_task_group_canceling() {
757 task_group_context* ctx = current_context();
758 return ctx ? ctx->is_group_execution_cancelled() : false;
759}
760
761} // namespace d1
762} // namespace detail
763
764inline namespace v1 {
765using detail::d1::task_group_context;
766using detail::d1::task_group;
767#if TBB_PREVIEW_ISOLATED_TASK_GROUP
768using detail::d1::isolated_task_group;
769#endif
770
771using detail::d1::task_group_status;
772using detail::d1::not_complete;
773using detail::d1::complete;
774using detail::d1::canceled;
775
776using detail::d1::is_current_task_group_canceling;
777using detail::r1::missing_wait;
778
779#if __TBB_PREVIEW_TASK_GROUP_EXTENSIONS
780using detail::d2::task_handle;
781#endif
782}
783
784} // namespace tbb
785
786#if _MSC_VER && !defined(__INTEL_COMPILER)
787 #pragma warning(pop) // 4324 warning
788#endif
789
790#endif // __TBB_task_group_H
791

source code of include/oneapi/tbb/task_group.h