191
191
__parallel_transform_reduce (_ExecutionPolicy&&, _Index __first, _Index __last, _Up __u, _Tp __init, _Cp __combine,
192
192
_Rp __brick_reduce)
193
193
{
194
- __par_backend ::__par_trans_red_body<_Index, _Up, _Tp, _Cp, _Rp> __body (__u, __init, __combine, __brick_reduce);
194
+ __tbb_backend ::__par_trans_red_body<_Index, _Up, _Tp, _Cp, _Rp> __body (__u, __init, __combine, __brick_reduce);
195
195
// The grain size of 3 is used in order to provide mininum 2 elements for each body
196
196
tbb::this_task_arena::isolate (
197
197
[__first, __last, &__body]() { tbb::parallel_reduce (tbb::blocked_range<_Index>(__first, __last, 3 ), __body); });
@@ -305,9 +305,9 @@ __upsweep(_Index __i, _Index __m, _Index __tilesize, _Tp* __r, _Index __lastsize
305
305
{
306
306
_Index __k = __split (__m);
307
307
tbb::parallel_invoke (
308
- [=] { __par_backend ::__upsweep (__i, __k, __tilesize, __r, __tilesize, __reduce, __combine); },
308
+ [=] { __tbb_backend ::__upsweep (__i, __k, __tilesize, __r, __tilesize, __reduce, __combine); },
309
309
[=] {
310
- __par_backend ::__upsweep (__i + __k, __m - __k, __tilesize, __r + __k, __lastsize, __reduce, __combine);
310
+ __tbb_backend ::__upsweep (__i + __k, __m - __k, __tilesize, __r + __k, __lastsize, __reduce, __combine);
311
311
});
312
312
if (__m == 2 * __k)
313
313
__r[__m - 1 ] = __combine (__r[__k - 1 ], __r[__m - 1 ]);
@@ -325,11 +325,11 @@ __downsweep(_Index __i, _Index __m, _Index __tilesize, _Tp* __r, _Index __lastsi
325
325
{
326
326
const _Index __k = __split (__m);
327
327
tbb::parallel_invoke (
328
- [=] { __par_backend ::__downsweep (__i, __k, __tilesize, __r, __tilesize, __initial, __combine, __scan); },
328
+ [=] { __tbb_backend ::__downsweep (__i, __k, __tilesize, __r, __tilesize, __initial, __combine, __scan); },
329
329
// Assumes that __combine never throws.
330
330
// TODO: Consider adding a requirement for user functors to be constant.
331
331
[=, &__combine] {
332
- __par_backend ::__downsweep (__i + __k, __m - __k, __tilesize, __r + __k, __lastsize,
332
+ __tbb_backend ::__downsweep (__i + __k, __m - __k, __tilesize, __r + __k, __lastsize,
333
333
__combine (__initial, __r[__k - 1 ]), __combine, __scan);
334
334
});
335
335
}
@@ -363,7 +363,7 @@ __parallel_strict_scan(_ExecutionPolicy&&, _Index __n, _Tp __initial, _Rp __redu
363
363
_Index __m = (__n - 1 ) / __tilesize;
364
364
__buffer<_Tp> __buf (__m + 1 );
365
365
_Tp* __r = __buf.get ();
366
- __par_backend ::__upsweep (_Index (0 ), _Index (__m + 1 ), __tilesize, __r, __n - __m * __tilesize, __reduce,
366
+ __tbb_backend ::__upsweep (_Index (0 ), _Index (__m + 1 ), __tilesize, __r, __n - __m * __tilesize, __reduce,
367
367
__combine);
368
368
369
369
// When __apex is a no-op and __combine has no side effects, a good optimizer
@@ -375,7 +375,7 @@ __parallel_strict_scan(_ExecutionPolicy&&, _Index __n, _Tp __initial, _Rp __redu
375
375
while ((__k &= __k - 1 ))
376
376
__t = __combine (__r[__k - 1 ], __t );
377
377
__apex (__combine (__initial, __t ));
378
- __par_backend ::__downsweep (_Index (0 ), _Index (__m + 1 ), __tilesize, __r, __n - __m * __tilesize, __initial,
378
+ __tbb_backend ::__downsweep (_Index (0 ), _Index (__m + 1 ), __tilesize, __r, __n - __m * __tilesize, __initial,
379
379
__combine, __scan);
380
380
return ;
381
381
}
@@ -874,7 +874,7 @@ template <typename _RandomAccessIterator1, typename _RandomAccessIterator2, type
874
874
tbb::task*
875
875
__stable_sort_task<_RandomAccessIterator1, _RandomAccessIterator2, _Compare, _LeafSort>::execute()
876
876
{
877
- typedef __merge_task<_RandomAccessIterator1, _RandomAccessIterator2, _Compare, __serial_destroy,
877
+ typedef __merge_task<_RandomAccessIterator1, _RandomAccessIterator2, _Compare, __utils:: __serial_destroy,
878
878
__utils::__serial_move_merge>
879
879
_MergeTaskType;
880
880
0 commit comments