1 | // The template and inlines for the -*- C++ -*- internal _Array helper class. |
2 | |
3 | // Copyright (C) 1997-2021 Free Software Foundation, Inc. |
4 | // |
5 | // This file is part of the GNU ISO C++ Library. This library is free |
6 | // software; you can redistribute it and/or modify it under the |
7 | // terms of the GNU General Public License as published by the |
8 | // Free Software Foundation; either version 3, or (at your option) |
9 | // any later version. |
10 | |
11 | // This library is distributed in the hope that it will be useful, |
12 | // but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | // GNU General Public License for more details. |
15 | |
16 | // Under Section 7 of GPL version 3, you are granted additional |
17 | // permissions described in the GCC Runtime Library Exception, version |
18 | // 3.1, as published by the Free Software Foundation. |
19 | |
20 | // You should have received a copy of the GNU General Public License and |
21 | // a copy of the GCC Runtime Library Exception along with this program; |
22 | // see the files COPYING3 and COPYING.RUNTIME respectively. If not, see |
23 | // <http://www.gnu.org/licenses/>. |
24 | |
25 | /** @file bits/valarray_array.h |
26 | * This is an internal header file, included by other library headers. |
27 | * Do not attempt to use it directly. @headername{valarray} |
28 | */ |
29 | |
30 | // Written by Gabriel Dos Reis <Gabriel.Dos-Reis@DPTMaths.ENS-Cachan.Fr> |
31 | |
32 | #ifndef _VALARRAY_ARRAY_H |
33 | #define _VALARRAY_ARRAY_H 1 |
34 | |
35 | #pragma GCC system_header |
36 | |
37 | #include <bits/c++config.h> |
38 | #include <bits/cpp_type_traits.h> |
39 | #include <cstdlib> |
40 | #include <new> |
41 | |
42 | namespace std _GLIBCXX_VISIBILITY(default) |
43 | { |
44 | _GLIBCXX_BEGIN_NAMESPACE_VERSION |
45 | |
46 | // |
47 | // Helper functions on raw pointers |
48 | // |
49 | |
50 | // We get memory the old fashioned way |
51 | template<typename _Tp> |
52 | _Tp* |
53 | __valarray_get_storage(size_t) __attribute__((__malloc__)); |
54 | |
55 | template<typename _Tp> |
56 | inline _Tp* |
57 | __valarray_get_storage(size_t __n) |
58 | { return static_cast<_Tp*>(operator new(__n * sizeof(_Tp))); } |
59 | |
60 | // Return memory to the system |
61 | inline void |
62 | __valarray_release_memory(void* __p) |
63 | { operator delete(__p); } |
64 | |
65 | // Turn a raw-memory into an array of _Tp filled with _Tp() |
66 | // This is required in 'valarray<T> v(n);' |
67 | template<typename _Tp, bool> |
68 | struct _Array_default_ctor |
69 | { |
70 | // Please note that this isn't exception safe. But |
71 | // valarrays aren't required to be exception safe. |
72 | inline static void |
73 | _S_do_it(_Tp* __b, _Tp* __e) |
74 | { |
75 | while (__b != __e) |
76 | new(__b++) _Tp(); |
77 | } |
78 | }; |
79 | |
80 | template<typename _Tp> |
81 | struct _Array_default_ctor<_Tp, true> |
82 | { |
83 | // For fundamental types, it suffices to say 'memset()' |
84 | inline static void |
85 | _S_do_it(_Tp* __b, _Tp* __e) |
86 | { __builtin_memset(__b, 0, (__e - __b) * sizeof(_Tp)); } |
87 | }; |
88 | |
89 | template<typename _Tp> |
90 | inline void |
91 | __valarray_default_construct(_Tp* __b, _Tp* __e) |
92 | { |
93 | _Array_default_ctor<_Tp, __is_scalar<_Tp>::__value>::_S_do_it(__b, __e); |
94 | } |
95 | |
96 | // Turn a raw-memory into an array of _Tp filled with __t |
97 | // This is the required in valarray<T> v(n, t). Also |
98 | // used in valarray<>::resize(). |
99 | template<typename _Tp, bool> |
100 | struct _Array_init_ctor |
101 | { |
102 | // Please note that this isn't exception safe. But |
103 | // valarrays aren't required to be exception safe. |
104 | inline static void |
105 | _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t) |
106 | { |
107 | while (__b != __e) |
108 | new(__b++) _Tp(__t); |
109 | } |
110 | }; |
111 | |
112 | template<typename _Tp> |
113 | struct _Array_init_ctor<_Tp, true> |
114 | { |
115 | inline static void |
116 | _S_do_it(_Tp* __b, _Tp* __e, const _Tp __t) |
117 | { |
118 | while (__b != __e) |
119 | *__b++ = __t; |
120 | } |
121 | }; |
122 | |
123 | template<typename _Tp> |
124 | inline void |
125 | __valarray_fill_construct(_Tp* __b, _Tp* __e, const _Tp __t) |
126 | { |
127 | _Array_init_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __t); |
128 | } |
129 | |
130 | // |
131 | // copy-construct raw array [__o, *) from plain array [__b, __e) |
132 | // We can't just say 'memcpy()' |
133 | // |
134 | template<typename _Tp, bool> |
135 | struct _Array_copy_ctor |
136 | { |
137 | // Please note that this isn't exception safe. But |
138 | // valarrays aren't required to be exception safe. |
139 | inline static void |
140 | _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o) |
141 | { |
142 | while (__b != __e) |
143 | new(__o++) _Tp(*__b++); |
144 | } |
145 | }; |
146 | |
147 | template<typename _Tp> |
148 | struct _Array_copy_ctor<_Tp, true> |
149 | { |
150 | inline static void |
151 | _S_do_it(const _Tp* __b, const _Tp* __e, _Tp* __restrict__ __o) |
152 | { |
153 | if (__b) |
154 | __builtin_memcpy(__o, __b, (__e - __b) * sizeof(_Tp)); |
155 | } |
156 | }; |
157 | |
158 | template<typename _Tp> |
159 | inline void |
160 | __valarray_copy_construct(const _Tp* __b, const _Tp* __e, |
161 | _Tp* __restrict__ __o) |
162 | { |
163 | _Array_copy_ctor<_Tp, __is_trivial(_Tp)>::_S_do_it(__b, __e, __o); |
164 | } |
165 | |
166 | // copy-construct raw array [__o, *) from strided array __a[<__n : __s>] |
167 | template<typename _Tp> |
168 | inline void |
169 | __valarray_copy_construct (const _Tp* __restrict__ __a, size_t __n, |
170 | size_t __s, _Tp* __restrict__ __o) |
171 | { |
172 | if (__is_trivial(_Tp)) |
173 | while (__n--) |
174 | { |
175 | *__o++ = *__a; |
176 | __a += __s; |
177 | } |
178 | else |
179 | while (__n--) |
180 | { |
181 | new(__o++) _Tp(*__a); |
182 | __a += __s; |
183 | } |
184 | } |
185 | |
186 | // copy-construct raw array [__o, *) from indexed array __a[__i[<__n>]] |
187 | template<typename _Tp> |
188 | inline void |
189 | __valarray_copy_construct (const _Tp* __restrict__ __a, |
190 | const size_t* __restrict__ __i, |
191 | _Tp* __restrict__ __o, size_t __n) |
192 | { |
193 | if (__is_trivial(_Tp)) |
194 | while (__n--) |
195 | *__o++ = __a[*__i++]; |
196 | else |
197 | while (__n--) |
198 | new (__o++) _Tp(__a[*__i++]); |
199 | } |
200 | |
201 | // Do the necessary cleanup when we're done with arrays. |
202 | template<typename _Tp> |
203 | inline void |
204 | __valarray_destroy_elements(_Tp* __b, _Tp* __e) |
205 | { |
206 | if (!__is_trivial(_Tp)) |
207 | while (__b != __e) |
208 | { |
209 | __b->~_Tp(); |
210 | ++__b; |
211 | } |
212 | } |
213 | |
214 | // Fill a plain array __a[<__n>] with __t |
215 | template<typename _Tp> |
216 | inline void |
217 | __valarray_fill(_Tp* __restrict__ __a, size_t __n, const _Tp& __t) |
218 | { |
219 | while (__n--) |
220 | *__a++ = __t; |
221 | } |
222 | |
223 | // fill strided array __a[<__n-1 : __s>] with __t |
224 | template<typename _Tp> |
225 | inline void |
226 | __valarray_fill(_Tp* __restrict__ __a, size_t __n, |
227 | size_t __s, const _Tp& __t) |
228 | { |
229 | for (size_t __i = 0; __i < __n; ++__i, __a += __s) |
230 | *__a = __t; |
231 | } |
232 | |
233 | // fill indirect array __a[__i[<__n>]] with __i |
234 | template<typename _Tp> |
235 | inline void |
236 | __valarray_fill(_Tp* __restrict__ __a, const size_t* __restrict__ __i, |
237 | size_t __n, const _Tp& __t) |
238 | { |
239 | for (size_t __j = 0; __j < __n; ++__j, ++__i) |
240 | __a[*__i] = __t; |
241 | } |
242 | |
243 | // copy plain array __a[<__n>] in __b[<__n>] |
244 | // For non-fundamental types, it is wrong to say 'memcpy()' |
245 | template<typename _Tp, bool> |
246 | struct _Array_copier |
247 | { |
248 | inline static void |
249 | _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) |
250 | { |
251 | while(__n--) |
252 | *__b++ = *__a++; |
253 | } |
254 | }; |
255 | |
256 | template<typename _Tp> |
257 | struct _Array_copier<_Tp, true> |
258 | { |
259 | inline static void |
260 | _S_do_it(const _Tp* __restrict__ __a, size_t __n, _Tp* __restrict__ __b) |
261 | { |
262 | if (__n != 0) |
263 | __builtin_memcpy(__b, __a, __n * sizeof (_Tp)); |
264 | } |
265 | }; |
266 | |
267 | // Copy a plain array __a[<__n>] into a play array __b[<>] |
268 | template<typename _Tp> |
269 | inline void |
270 | __valarray_copy(const _Tp* __restrict__ __a, size_t __n, |
271 | _Tp* __restrict__ __b) |
272 | { |
273 | _Array_copier<_Tp, __is_trivial(_Tp)>::_S_do_it(__a, __n, __b); |
274 | } |
275 | |
276 | // Copy strided array __a[<__n : __s>] in plain __b[<__n>] |
277 | template<typename _Tp> |
278 | inline void |
279 | __valarray_copy(const _Tp* __restrict__ __a, size_t __n, size_t __s, |
280 | _Tp* __restrict__ __b) |
281 | { |
282 | for (size_t __i = 0; __i < __n; ++__i, ++__b, __a += __s) |
283 | *__b = *__a; |
284 | } |
285 | |
286 | // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] |
287 | template<typename _Tp> |
288 | inline void |
289 | __valarray_copy(const _Tp* __restrict__ __a, _Tp* __restrict__ __b, |
290 | size_t __n, size_t __s) |
291 | { |
292 | for (size_t __i = 0; __i < __n; ++__i, ++__a, __b += __s) |
293 | *__b = *__a; |
294 | } |
295 | |
296 | // Copy strided array __src[<__n : __s1>] into another |
297 | // strided array __dst[< : __s2>]. Their sizes must match. |
298 | template<typename _Tp> |
299 | inline void |
300 | __valarray_copy(const _Tp* __restrict__ __src, size_t __n, size_t __s1, |
301 | _Tp* __restrict__ __dst, size_t __s2) |
302 | { |
303 | for (size_t __i = 0; __i < __n; ++__i) |
304 | __dst[__i * __s2] = __src[__i * __s1]; |
305 | } |
306 | |
307 | // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] |
308 | template<typename _Tp> |
309 | inline void |
310 | __valarray_copy(const _Tp* __restrict__ __a, |
311 | const size_t* __restrict__ __i, |
312 | _Tp* __restrict__ __b, size_t __n) |
313 | { |
314 | for (size_t __j = 0; __j < __n; ++__j, ++__b, ++__i) |
315 | *__b = __a[*__i]; |
316 | } |
317 | |
318 | // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] |
319 | template<typename _Tp> |
320 | inline void |
321 | __valarray_copy(const _Tp* __restrict__ __a, size_t __n, |
322 | _Tp* __restrict__ __b, const size_t* __restrict__ __i) |
323 | { |
324 | for (size_t __j = 0; __j < __n; ++__j, ++__a, ++__i) |
325 | __b[*__i] = *__a; |
326 | } |
327 | |
328 | // Copy the __n first elements of an indexed array __src[<__i>] into |
329 | // another indexed array __dst[<__j>]. |
330 | template<typename _Tp> |
331 | inline void |
332 | __valarray_copy(const _Tp* __restrict__ __src, size_t __n, |
333 | const size_t* __restrict__ __i, |
334 | _Tp* __restrict__ __dst, const size_t* __restrict__ __j) |
335 | { |
336 | for (size_t __k = 0; __k < __n; ++__k) |
337 | __dst[*__j++] = __src[*__i++]; |
338 | } |
339 | |
340 | // |
341 | // Compute the sum of elements in range [__f, __l) which must not be empty. |
342 | // This is a naive algorithm. It suffers from cancelling. |
343 | // In the future try to specialize for _Tp = float, double, long double |
344 | // using a more accurate algorithm. |
345 | // |
346 | template<typename _Tp> |
347 | inline _Tp |
348 | __valarray_sum(const _Tp* __f, const _Tp* __l) |
349 | { |
350 | _Tp __r = *__f++; |
351 | while (__f != __l) |
352 | __r += *__f++; |
353 | return __r; |
354 | } |
355 | |
356 | // Compute the min/max of an array-expression |
357 | template<typename _Ta> |
358 | inline typename _Ta::value_type |
359 | __valarray_min(const _Ta& __a) |
360 | { |
361 | size_t __s = __a.size(); |
362 | typedef typename _Ta::value_type _Value_type; |
363 | _Value_type __r = __s == 0 ? _Value_type() : __a[0]; |
364 | for (size_t __i = 1; __i < __s; ++__i) |
365 | { |
366 | _Value_type __t = __a[__i]; |
367 | if (__t < __r) |
368 | __r = __t; |
369 | } |
370 | return __r; |
371 | } |
372 | |
373 | template<typename _Ta> |
374 | inline typename _Ta::value_type |
375 | __valarray_max(const _Ta& __a) |
376 | { |
377 | size_t __s = __a.size(); |
378 | typedef typename _Ta::value_type _Value_type; |
379 | _Value_type __r = __s == 0 ? _Value_type() : __a[0]; |
380 | for (size_t __i = 1; __i < __s; ++__i) |
381 | { |
382 | _Value_type __t = __a[__i]; |
383 | if (__t > __r) |
384 | __r = __t; |
385 | } |
386 | return __r; |
387 | } |
388 | |
389 | // |
390 | // Helper class _Array, first layer of valarray abstraction. |
391 | // All operations on valarray should be forwarded to this class |
392 | // whenever possible. -- gdr |
393 | // |
394 | |
395 | template<typename _Tp> |
396 | struct _Array |
397 | { |
398 | explicit _Array(_Tp* const __restrict__); |
399 | explicit _Array(const valarray<_Tp>&); |
400 | _Array(const _Tp* __restrict__, size_t); |
401 | |
402 | _Tp* begin() const; |
403 | |
404 | _Tp* const __restrict__ _M_data; |
405 | }; |
406 | |
407 | |
408 | // Copy-construct plain array __b[<__n>] from indexed array __a[__i[<__n>]] |
409 | template<typename _Tp> |
410 | inline void |
411 | __valarray_copy_construct(_Array<_Tp> __a, _Array<size_t> __i, |
412 | _Array<_Tp> __b, size_t __n) |
413 | { std::__valarray_copy_construct(__a._M_data, __i._M_data, |
414 | __b._M_data, __n); } |
415 | |
416 | // Copy-construct plain array __b[<__n>] from strided array __a[<__n : __s>] |
417 | template<typename _Tp> |
418 | inline void |
419 | __valarray_copy_construct(_Array<_Tp> __a, size_t __n, size_t __s, |
420 | _Array<_Tp> __b) |
421 | { std::__valarray_copy_construct(__a._M_data, __n, __s, __b._M_data); } |
422 | |
423 | template<typename _Tp> |
424 | inline void |
425 | __valarray_fill (_Array<_Tp> __a, size_t __n, const _Tp& __t) |
426 | { std::__valarray_fill(__a._M_data, __n, __t); } |
427 | |
428 | template<typename _Tp> |
429 | inline void |
430 | __valarray_fill(_Array<_Tp> __a, size_t __n, size_t __s, const _Tp& __t) |
431 | { std::__valarray_fill(__a._M_data, __n, __s, __t); } |
432 | |
433 | template<typename _Tp> |
434 | inline void |
435 | __valarray_fill(_Array<_Tp> __a, _Array<size_t> __i, |
436 | size_t __n, const _Tp& __t) |
437 | { std::__valarray_fill(__a._M_data, __i._M_data, __n, __t); } |
438 | |
439 | // Copy a plain array __a[<__n>] into a play array __b[<>] |
440 | template<typename _Tp> |
441 | inline void |
442 | __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) |
443 | { std::__valarray_copy(__a._M_data, __n, __b._M_data); } |
444 | |
445 | // Copy strided array __a[<__n : __s>] in plain __b[<__n>] |
446 | template<typename _Tp> |
447 | inline void |
448 | __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s, _Array<_Tp> __b) |
449 | { std::__valarray_copy(__a._M_data, __n, __s, __b._M_data); } |
450 | |
451 | // Copy a plain array __a[<__n>] into a strided array __b[<__n : __s>] |
452 | template<typename _Tp> |
453 | inline void |
454 | __valarray_copy(_Array<_Tp> __a, _Array<_Tp> __b, size_t __n, size_t __s) |
455 | { __valarray_copy(__a._M_data, __b._M_data, __n, __s); } |
456 | |
457 | // Copy strided array __src[<__n : __s1>] into another |
458 | // strided array __dst[< : __s2>]. Their sizes must match. |
459 | template<typename _Tp> |
460 | inline void |
461 | __valarray_copy(_Array<_Tp> __a, size_t __n, size_t __s1, |
462 | _Array<_Tp> __b, size_t __s2) |
463 | { std::__valarray_copy(__a._M_data, __n, __s1, __b._M_data, __s2); } |
464 | |
465 | // Copy an indexed array __a[__i[<__n>]] in plain array __b[<__n>] |
466 | template<typename _Tp> |
467 | inline void |
468 | __valarray_copy(_Array<_Tp> __a, _Array<size_t> __i, |
469 | _Array<_Tp> __b, size_t __n) |
470 | { std::__valarray_copy(__a._M_data, __i._M_data, __b._M_data, __n); } |
471 | |
472 | // Copy a plain array __a[<__n>] in an indexed array __b[__i[<__n>]] |
473 | template<typename _Tp> |
474 | inline void |
475 | __valarray_copy(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b, |
476 | _Array<size_t> __i) |
477 | { std::__valarray_copy(__a._M_data, __n, __b._M_data, __i._M_data); } |
478 | |
479 | // Copy the __n first elements of an indexed array __src[<__i>] into |
480 | // another indexed array __dst[<__j>]. |
481 | template<typename _Tp> |
482 | inline void |
483 | __valarray_copy(_Array<_Tp> __src, size_t __n, _Array<size_t> __i, |
484 | _Array<_Tp> __dst, _Array<size_t> __j) |
485 | { |
486 | std::__valarray_copy(__src._M_data, __n, __i._M_data, |
487 | __dst._M_data, __j._M_data); |
488 | } |
489 | |
490 | template<typename _Tp> |
491 | inline |
492 | _Array<_Tp>::_Array(_Tp* const __restrict__ __p) |
493 | : _M_data (__p) {} |
494 | |
495 | template<typename _Tp> |
496 | inline |
497 | _Array<_Tp>::_Array(const valarray<_Tp>& __v) |
498 | : _M_data (__v._M_data) {} |
499 | |
500 | template<typename _Tp> |
501 | inline |
502 | _Array<_Tp>::_Array(const _Tp* __restrict__ __b, size_t __s) |
503 | : _M_data(__valarray_get_storage<_Tp>(__s)) |
504 | { std::__valarray_copy_construct(__b, __s, _M_data); } |
505 | |
506 | template<typename _Tp> |
507 | inline _Tp* |
508 | _Array<_Tp>::begin () const |
509 | { return _M_data; } |
510 | |
511 | #define _DEFINE_ARRAY_FUNCTION(_Op, _Name) \ |
512 | template<typename _Tp> \ |
513 | inline void \ |
514 | _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, const _Tp& __t) \ |
515 | { \ |
516 | for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; ++__p) \ |
517 | *__p _Op##= __t; \ |
518 | } \ |
519 | \ |
520 | template<typename _Tp> \ |
521 | inline void \ |
522 | _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, _Array<_Tp> __b) \ |
523 | { \ |
524 | _Tp* __p = __a._M_data; \ |
525 | for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; ++__p, ++__q) \ |
526 | *__p _Op##= *__q; \ |
527 | } \ |
528 | \ |
529 | template<typename _Tp, class _Dom> \ |
530 | void \ |
531 | _Array_augmented_##_Name(_Array<_Tp> __a, \ |
532 | const _Expr<_Dom, _Tp>& __e, size_t __n) \ |
533 | { \ |
534 | _Tp* __p(__a._M_data); \ |
535 | for (size_t __i = 0; __i < __n; ++__i, ++__p) \ |
536 | *__p _Op##= __e[__i]; \ |
537 | } \ |
538 | \ |
539 | template<typename _Tp> \ |
540 | inline void \ |
541 | _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, size_t __s, \ |
542 | _Array<_Tp> __b) \ |
543 | { \ |
544 | _Tp* __q(__b._M_data); \ |
545 | for (_Tp* __p = __a._M_data; __p < __a._M_data + __s * __n; \ |
546 | __p += __s, ++__q) \ |
547 | *__p _Op##= *__q; \ |
548 | } \ |
549 | \ |
550 | template<typename _Tp> \ |
551 | inline void \ |
552 | _Array_augmented_##_Name(_Array<_Tp> __a, _Array<_Tp> __b, \ |
553 | size_t __n, size_t __s) \ |
554 | { \ |
555 | _Tp* __q(__b._M_data); \ |
556 | for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \ |
557 | ++__p, __q += __s) \ |
558 | *__p _Op##= *__q; \ |
559 | } \ |
560 | \ |
561 | template<typename _Tp, class _Dom> \ |
562 | void \ |
563 | _Array_augmented_##_Name(_Array<_Tp> __a, size_t __s, \ |
564 | const _Expr<_Dom, _Tp>& __e, size_t __n) \ |
565 | { \ |
566 | _Tp* __p(__a._M_data); \ |
567 | for (size_t __i = 0; __i < __n; ++__i, __p += __s) \ |
568 | *__p _Op##= __e[__i]; \ |
569 | } \ |
570 | \ |
571 | template<typename _Tp> \ |
572 | inline void \ |
573 | _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \ |
574 | _Array<_Tp> __b, size_t __n) \ |
575 | { \ |
576 | _Tp* __q(__b._M_data); \ |
577 | for (size_t* __j = __i._M_data; __j < __i._M_data + __n; \ |
578 | ++__j, ++__q) \ |
579 | __a._M_data[*__j] _Op##= *__q; \ |
580 | } \ |
581 | \ |
582 | template<typename _Tp> \ |
583 | inline void \ |
584 | _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \ |
585 | _Array<_Tp> __b, _Array<size_t> __i) \ |
586 | { \ |
587 | _Tp* __p(__a._M_data); \ |
588 | for (size_t* __j = __i._M_data; __j<__i._M_data + __n; \ |
589 | ++__j, ++__p) \ |
590 | *__p _Op##= __b._M_data[*__j]; \ |
591 | } \ |
592 | \ |
593 | template<typename _Tp, class _Dom> \ |
594 | void \ |
595 | _Array_augmented_##_Name(_Array<_Tp> __a, _Array<size_t> __i, \ |
596 | const _Expr<_Dom, _Tp>& __e, size_t __n) \ |
597 | { \ |
598 | size_t* __j(__i._M_data); \ |
599 | for (size_t __k = 0; __k<__n; ++__k, ++__j) \ |
600 | __a._M_data[*__j] _Op##= __e[__k]; \ |
601 | } \ |
602 | \ |
603 | template<typename _Tp> \ |
604 | void \ |
605 | _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \ |
606 | _Array<_Tp> __b, size_t __n) \ |
607 | { \ |
608 | bool* __ok(__m._M_data); \ |
609 | _Tp* __p(__a._M_data); \ |
610 | for (_Tp* __q = __b._M_data; __q < __b._M_data + __n; \ |
611 | ++__q, ++__ok, ++__p) \ |
612 | { \ |
613 | while (! *__ok) \ |
614 | { \ |
615 | ++__ok; \ |
616 | ++__p; \ |
617 | } \ |
618 | *__p _Op##= *__q; \ |
619 | } \ |
620 | } \ |
621 | \ |
622 | template<typename _Tp> \ |
623 | void \ |
624 | _Array_augmented_##_Name(_Array<_Tp> __a, size_t __n, \ |
625 | _Array<_Tp> __b, _Array<bool> __m) \ |
626 | { \ |
627 | bool* __ok(__m._M_data); \ |
628 | _Tp* __q(__b._M_data); \ |
629 | for (_Tp* __p = __a._M_data; __p < __a._M_data + __n; \ |
630 | ++__p, ++__ok, ++__q) \ |
631 | { \ |
632 | while (! *__ok) \ |
633 | { \ |
634 | ++__ok; \ |
635 | ++__q; \ |
636 | } \ |
637 | *__p _Op##= *__q; \ |
638 | } \ |
639 | } \ |
640 | \ |
641 | template<typename _Tp, class _Dom> \ |
642 | void \ |
643 | _Array_augmented_##_Name(_Array<_Tp> __a, _Array<bool> __m, \ |
644 | const _Expr<_Dom, _Tp>& __e, size_t __n) \ |
645 | { \ |
646 | bool* __ok(__m._M_data); \ |
647 | _Tp* __p(__a._M_data); \ |
648 | for (size_t __i = 0; __i < __n; ++__i, ++__ok, ++__p) \ |
649 | { \ |
650 | while (! *__ok) \ |
651 | { \ |
652 | ++__ok; \ |
653 | ++__p; \ |
654 | } \ |
655 | *__p _Op##= __e[__i]; \ |
656 | } \ |
657 | } |
658 | |
659 | _DEFINE_ARRAY_FUNCTION(+, __plus) |
660 | _DEFINE_ARRAY_FUNCTION(-, __minus) |
661 | _DEFINE_ARRAY_FUNCTION(*, __multiplies) |
662 | _DEFINE_ARRAY_FUNCTION(/, __divides) |
663 | _DEFINE_ARRAY_FUNCTION(%, __modulus) |
664 | _DEFINE_ARRAY_FUNCTION(^, __bitwise_xor) |
665 | _DEFINE_ARRAY_FUNCTION(|, __bitwise_or) |
666 | _DEFINE_ARRAY_FUNCTION(&, __bitwise_and) |
667 | _DEFINE_ARRAY_FUNCTION(<<, __shift_left) |
668 | _DEFINE_ARRAY_FUNCTION(>>, __shift_right) |
669 | |
670 | #undef _DEFINE_ARRAY_FUNCTION |
671 | |
672 | _GLIBCXX_END_NAMESPACE_VERSION |
673 | } // namespace |
674 | |
675 | # include <bits/valarray_array.tcc> |
676 | |
677 | #endif /* _ARRAY_H */ |
678 | |