1 | /* |
---|---|
2 | * Copyright 2011 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #ifndef SkTArray_DEFINED |
9 | #define SkTArray_DEFINED |
10 | |
11 | #include "include/private/base/SkAlignedStorage.h" |
12 | #include "include/private/base/SkAssert.h" |
13 | #include "include/private/base/SkAttributes.h" |
14 | #include "include/private/base/SkContainers.h" |
15 | #include "include/private/base/SkDebug.h" |
16 | #include "include/private/base/SkMalloc.h" |
17 | #include "include/private/base/SkMath.h" |
18 | #include "include/private/base/SkSpan_impl.h" |
19 | #include "include/private/base/SkTo.h" |
20 | #include "include/private/base/SkTypeTraits.h" // IWYU pragma: keep |
21 | |
22 | #include <algorithm> |
23 | #include <climits> |
24 | #include <cstddef> |
25 | #include <cstdint> |
26 | #include <cstring> |
27 | #include <initializer_list> |
28 | #include <new> |
29 | #include <utility> |
30 | |
31 | namespace skia_private { |
32 | /** TArray<T> implements a typical, mostly std::vector-like array. |
33 | Each T will be default-initialized on allocation, and ~T will be called on destruction. |
34 | |
35 | MEM_MOVE controls the behavior when a T needs to be moved (e.g. when the array is resized) |
36 | - true: T will be bit-copied via memcpy. |
37 | - false: T will be moved via move-constructors. |
38 | */ |
39 | template <typename T, bool MEM_MOVE = sk_is_trivially_relocatable_v<T>> class TArray { |
40 | public: |
41 | using value_type = T; |
42 | |
43 | /** |
44 | * Creates an empty array with no initial storage |
45 | */ |
46 | TArray() : fOwnMemory(true), fCapacity{0} {} |
47 | |
48 | /** |
49 | * Creates an empty array that will preallocate space for reserveCount elements. |
50 | */ |
51 | explicit TArray(int reserveCount) : TArray() { this->reserve_exact(reserveCount); } |
52 | |
53 | /** |
54 | * Copies one array to another. The new array will be heap allocated. |
55 | */ |
56 | TArray(const TArray& that) : TArray(that.fData, that.fSize) {} |
57 | |
58 | TArray(TArray&& that) { |
59 | if (that.fOwnMemory) { |
60 | this->setData(that); |
61 | that.setData({}); |
62 | } else { |
63 | this->initData(that.fSize); |
64 | that.move(fData); |
65 | } |
66 | fSize = std::exchange(that.fSize, 0); |
67 | } |
68 | |
69 | /** |
70 | * Creates a TArray by copying contents of a standard C array. The new |
71 | * array will be heap allocated. Be careful not to use this constructor |
72 | * when you really want the (void*, int) version. |
73 | */ |
74 | TArray(const T* array, int count) { |
75 | this->initData(count); |
76 | this->copy(array); |
77 | } |
78 | |
79 | /** |
80 | * Creates a TArray by copying contents of an initializer list. |
81 | */ |
82 | TArray(std::initializer_list<T> data) : TArray(data.begin(), data.size()) {} |
83 | |
84 | TArray& operator=(const TArray& that) { |
85 | if (this == &that) { |
86 | return *this; |
87 | } |
88 | this->clear(); |
89 | this->checkRealloc(that.size(), kExactFit); |
90 | fSize = that.fSize; |
91 | this->copy(that.fData); |
92 | return *this; |
93 | } |
94 | TArray& operator=(TArray&& that) { |
95 | if (this != &that) { |
96 | this->clear(); |
97 | if (that.fOwnMemory) { |
98 | // The storage is on the heap, so move the data pointer. |
99 | if (fOwnMemory) { |
100 | sk_free(fData); |
101 | } |
102 | |
103 | fData = std::exchange(that.fData, nullptr); |
104 | |
105 | // Can't use exchange with bitfields. |
106 | fCapacity = that.fCapacity; |
107 | that.fCapacity = 0; |
108 | |
109 | fOwnMemory = true; |
110 | } else { |
111 | // The data is stored inline in that, so move it element-by-element. |
112 | this->checkRealloc(that.size(), kExactFit); |
113 | that.move(fData); |
114 | } |
115 | fSize = std::exchange(that.fSize, 0); |
116 | } |
117 | return *this; |
118 | } |
119 | |
120 | ~TArray() { |
121 | this->destroyAll(); |
122 | if (fOwnMemory) { |
123 | sk_free(fData); |
124 | } |
125 | } |
126 | |
127 | /** |
128 | * Resets to size() = n newly constructed T objects and resets any reserve count. |
129 | */ |
130 | void reset(int n) { |
131 | SkASSERT(n >= 0); |
132 | this->clear(); |
133 | this->checkRealloc(n, kExactFit); |
134 | fSize = n; |
135 | for (int i = 0; i < this->size(); ++i) { |
136 | new (fData + i) T; |
137 | } |
138 | } |
139 | |
140 | /** |
141 | * Resets to a copy of a C array and resets any reserve count. |
142 | */ |
143 | void reset(const T* array, int count) { |
144 | SkASSERT(count >= 0); |
145 | this->clear(); |
146 | this->checkRealloc(count, kExactFit); |
147 | fSize = count; |
148 | this->copy(array); |
149 | } |
150 | |
151 | /** |
152 | * Ensures there is enough reserved space for at least n elements. This is guaranteed at least |
153 | * until the array size grows above n and subsequently shrinks below n, any version of reset() |
154 | * is called, or reserve() is called again. |
155 | */ |
156 | void reserve(int n) { |
157 | SkASSERT(n >= 0); |
158 | if (n > this->size()) { |
159 | this->checkRealloc(n - this->size(), kGrowing); |
160 | } |
161 | } |
162 | |
163 | /** |
164 | * Ensures there is enough reserved space for exactly n elements. The same capacity guarantees |
165 | * as above apply. |
166 | */ |
167 | void reserve_exact(int n) { |
168 | SkASSERT(n >= 0); |
169 | if (n > this->size()) { |
170 | this->checkRealloc(n - this->size(), kExactFit); |
171 | } |
172 | } |
173 | |
174 | void removeShuffle(int n) { |
175 | SkASSERT(n < this->size()); |
176 | int newCount = fSize - 1; |
177 | fSize = newCount; |
178 | fData[n].~T(); |
179 | if (n != newCount) { |
180 | this->move(n, newCount); |
181 | } |
182 | } |
183 | |
184 | // Is the array empty. |
185 | bool empty() const { return fSize == 0; } |
186 | |
187 | /** |
188 | * Adds 1 new default-initialized T value and returns it by reference. Note |
189 | * the reference only remains valid until the next call that adds or removes |
190 | * elements. |
191 | */ |
192 | T& push_back() { |
193 | void* newT = this->push_back_raw(1); |
194 | return *new (newT) T; |
195 | } |
196 | |
197 | /** |
198 | * Version of above that uses a copy constructor to initialize the new item |
199 | */ |
200 | T& push_back(const T& t) { |
201 | void* newT = this->push_back_raw(1); |
202 | return *new (newT) T(t); |
203 | } |
204 | |
205 | /** |
206 | * Version of above that uses a move constructor to initialize the new item |
207 | */ |
208 | T& push_back(T&& t) { |
209 | void* newT = this->push_back_raw(1); |
210 | return *new (newT) T(std::move(t)); |
211 | } |
212 | |
213 | /** |
214 | * Construct a new T at the back of this array. |
215 | */ |
216 | template<class... Args> T& emplace_back(Args&&... args) { |
217 | void* newT = this->push_back_raw(1); |
218 | return *new (newT) T(std::forward<Args>(args)...); |
219 | } |
220 | |
221 | /** |
222 | * Allocates n more default-initialized T values, and returns the address of |
223 | * the start of that new range. Note: this address is only valid until the |
224 | * next API call made on the array that might add or remove elements. |
225 | */ |
226 | T* push_back_n(int n) { |
227 | SkASSERT(n >= 0); |
228 | T* newTs = TCast(buffer: this->push_back_raw(n)); |
229 | for (int i = 0; i < n; ++i) { |
230 | new (&newTs[i]) T; |
231 | } |
232 | return newTs; |
233 | } |
234 | |
235 | /** |
236 | * Version of above that uses a copy constructor to initialize all n items |
237 | * to the same T. |
238 | */ |
239 | T* push_back_n(int n, const T& t) { |
240 | SkASSERT(n >= 0); |
241 | T* newTs = TCast(buffer: this->push_back_raw(n)); |
242 | for (int i = 0; i < n; ++i) { |
243 | new (&newTs[i]) T(t); |
244 | } |
245 | return static_cast<T*>(newTs); |
246 | } |
247 | |
248 | /** |
249 | * Version of above that uses a copy constructor to initialize the n items |
250 | * to separate T values. |
251 | */ |
252 | T* push_back_n(int n, const T t[]) { |
253 | SkASSERT(n >= 0); |
254 | this->checkRealloc(n, kGrowing); |
255 | T* end = this->end(); |
256 | for (int i = 0; i < n; ++i) { |
257 | new (end + i) T(t[i]); |
258 | } |
259 | fSize += n; |
260 | return end; |
261 | } |
262 | |
263 | /** |
264 | * Version of above that uses the move constructor to set n items. |
265 | */ |
266 | T* move_back_n(int n, T* t) { |
267 | SkASSERT(n >= 0); |
268 | this->checkRealloc(n, kGrowing); |
269 | T* end = this->end(); |
270 | for (int i = 0; i < n; ++i) { |
271 | new (end + i) T(std::move(t[i])); |
272 | } |
273 | fSize += n; |
274 | return end; |
275 | } |
276 | |
277 | /** |
278 | * Removes the last element. Not safe to call when size() == 0. |
279 | */ |
280 | void pop_back() { |
281 | sk_collection_not_empty(this->empty()); |
282 | --fSize; |
283 | fData[fSize].~T(); |
284 | } |
285 | |
286 | /** |
287 | * Removes the last n elements. Not safe to call when size() < n. |
288 | */ |
289 | void pop_back_n(int n) { |
290 | SkASSERT(n >= 0); |
291 | SkASSERT(this->size() >= n); |
292 | int i = fSize; |
293 | while (i-- > fSize - n) { |
294 | (*this)[i].~T(); |
295 | } |
296 | fSize -= n; |
297 | } |
298 | |
299 | /** |
300 | * Pushes or pops from the back to resize. Pushes will be default |
301 | * initialized. |
302 | */ |
303 | void resize_back(int newCount) { |
304 | SkASSERT(newCount >= 0); |
305 | |
306 | if (newCount > this->size()) { |
307 | this->push_back_n(newCount - fSize); |
308 | } else if (newCount < this->size()) { |
309 | this->pop_back_n(fSize - newCount); |
310 | } |
311 | } |
312 | |
313 | /** Swaps the contents of this array with that array. Does a pointer swap if possible, |
314 | otherwise copies the T values. */ |
315 | void swap(TArray& that) { |
316 | using std::swap; |
317 | if (this == &that) { |
318 | return; |
319 | } |
320 | if (fOwnMemory && that.fOwnMemory) { |
321 | swap(fData, that.fData); |
322 | swap(fSize, that.fSize); |
323 | |
324 | // Can't use swap because fCapacity is a bit field. |
325 | auto allocCount = fCapacity; |
326 | fCapacity = that.fCapacity; |
327 | that.fCapacity = allocCount; |
328 | } else { |
329 | // This could be more optimal... |
330 | TArray copy(std::move(that)); |
331 | that = std::move(*this); |
332 | *this = std::move(copy); |
333 | } |
334 | } |
335 | |
336 | T* begin() { |
337 | return fData; |
338 | } |
339 | const T* begin() const { |
340 | return fData; |
341 | } |
342 | |
343 | // It's safe to use fItemArray + fSize because if fItemArray is nullptr then adding 0 is |
344 | // valid and returns nullptr. See [expr.add] in the C++ standard. |
345 | T* end() { |
346 | if (fData == nullptr) { |
347 | SkASSERT(fSize == 0); |
348 | } |
349 | return fData + fSize; |
350 | } |
351 | const T* end() const { |
352 | if (fData == nullptr) { |
353 | SkASSERT(fSize == 0); |
354 | } |
355 | return fData + fSize; |
356 | } |
357 | T* data() { return fData; } |
358 | const T* data() const { return fData; } |
359 | int size() const { return fSize; } |
360 | size_t size_bytes() const { return this->bytes(fSize); } |
361 | void resize(size_t count) { this->resize_back((int)count); } |
362 | |
363 | void clear() { |
364 | this->destroyAll(); |
365 | fSize = 0; |
366 | } |
367 | |
368 | void shrink_to_fit() { |
369 | if (!fOwnMemory || fSize == fCapacity) { |
370 | return; |
371 | } |
372 | if (fSize == 0) { |
373 | sk_free(fData); |
374 | fData = nullptr; |
375 | fCapacity = 0; |
376 | } else { |
377 | SkSpan<std::byte> allocation = Allocate(capacity: fSize); |
378 | this->move(TCast(buffer: allocation.data())); |
379 | if (fOwnMemory) { |
380 | sk_free(fData); |
381 | } |
382 | this->setDataFromBytes(allocation); |
383 | } |
384 | } |
385 | |
386 | /** |
387 | * Get the i^th element. |
388 | */ |
389 | T& operator[] (int i) { |
390 | return fData[sk_collection_check_bounds(i, this->size())]; |
391 | } |
392 | |
393 | const T& operator[] (int i) const { |
394 | return fData[sk_collection_check_bounds(i, this->size())]; |
395 | } |
396 | |
397 | T& at(int i) { return (*this)[i]; } |
398 | const T& at(int i) const { return (*this)[i]; } |
399 | |
400 | /** |
401 | * equivalent to operator[](0) |
402 | */ |
403 | T& front() { |
404 | sk_collection_not_empty(this->empty()); |
405 | return fData[0]; |
406 | } |
407 | |
408 | const T& front() const { |
409 | sk_collection_not_empty(this->empty()); |
410 | return fData[0]; |
411 | } |
412 | |
413 | /** |
414 | * equivalent to operator[](size() - 1) |
415 | */ |
416 | T& back() { |
417 | sk_collection_not_empty(this->empty()); |
418 | return fData[fSize - 1]; |
419 | } |
420 | |
421 | const T& back() const { |
422 | sk_collection_not_empty(this->empty()); |
423 | return fData[fSize - 1]; |
424 | } |
425 | |
426 | /** |
427 | * equivalent to operator[](size()-1-i) |
428 | */ |
429 | T& fromBack(int i) { |
430 | return (*this)[fSize - i - 1]; |
431 | } |
432 | |
433 | const T& fromBack(int i) const { |
434 | return (*this)[fSize - i - 1]; |
435 | } |
436 | |
437 | bool operator==(const TArray<T, MEM_MOVE>& right) const { |
438 | int leftCount = this->size(); |
439 | if (leftCount != right.size()) { |
440 | return false; |
441 | } |
442 | for (int index = 0; index < leftCount; ++index) { |
443 | if (fData[index] != right.fData[index]) { |
444 | return false; |
445 | } |
446 | } |
447 | return true; |
448 | } |
449 | |
450 | bool operator!=(const TArray<T, MEM_MOVE>& right) const { |
451 | return !(*this == right); |
452 | } |
453 | |
454 | int capacity() const { |
455 | return fCapacity; |
456 | } |
457 | |
458 | protected: |
459 | // Creates an empty array that will use the passed storage block until it is insufficiently |
460 | // large to hold the entire array. |
461 | template <int InitialCapacity> |
462 | TArray(SkAlignedSTStorage<InitialCapacity, T>* storage, int size = 0) { |
463 | static_assert(InitialCapacity >= 0); |
464 | SkASSERT(size >= 0); |
465 | SkASSERT(storage->get() != nullptr); |
466 | if (size > InitialCapacity) { |
467 | this->initData(size); |
468 | } else { |
469 | this->setDataFromBytes(*storage); |
470 | fSize = size; |
471 | |
472 | // setDataFromBytes always sets fOwnMemory to true, but we are actually using static |
473 | // storage here, which shouldn't ever be freed. |
474 | fOwnMemory = false; |
475 | } |
476 | } |
477 | |
478 | // Copy a C array, using pre-allocated storage if preAllocCount >= count. Otherwise, storage |
479 | // will only be used when array shrinks to fit. |
480 | template <int InitialCapacity> |
481 | TArray(const T* array, int size, SkAlignedSTStorage<InitialCapacity, T>* storage) |
482 | : TArray{storage, size} |
483 | { |
484 | this->copy(array); |
485 | } |
486 | |
487 | private: |
488 | // Growth factors for checkRealloc. |
489 | static constexpr double kExactFit = 1.0; |
490 | static constexpr double kGrowing = 1.5; |
491 | |
492 | static constexpr int kMinHeapAllocCount = 8; |
493 | static_assert(SkIsPow2(value: kMinHeapAllocCount), "min alloc count not power of two."); |
494 | |
495 | // Note for 32-bit machines kMaxCapacity will be <= SIZE_MAX. For 64-bit machines it will |
496 | // just be INT_MAX if the sizeof(T) < 2^32. |
497 | static constexpr int kMaxCapacity = SkToInt(x: std::min(SIZE_MAX / sizeof(T), b: (size_t)INT_MAX)); |
498 | |
499 | void setDataFromBytes(SkSpan<std::byte> allocation) { |
500 | T* data = TCast(buffer: allocation.data()); |
501 | // We have gotten extra bytes back from the allocation limit, pin to kMaxCapacity. It |
502 | // would seem like the SkContainerAllocator should handle the divide, but it would have |
503 | // to a full divide instruction. If done here the size is known at compile, and usually |
504 | // can be implemented by a right shift. The full divide takes ~50X longer than the shift. |
505 | size_t size = std::min(a: allocation.size() / sizeof(T), b: SkToSizeT(x: kMaxCapacity)); |
506 | setData(SkSpan<T>(data, size)); |
507 | } |
508 | |
509 | void setData(SkSpan<T> array) { |
510 | fData = array.data(); |
511 | fCapacity = SkToU32(array.size()); |
512 | fOwnMemory = true; |
513 | } |
514 | |
515 | // We disable Control-Flow Integrity sanitization (go/cfi) when casting item-array buffers. |
516 | // CFI flags this code as dangerous because we are casting `buffer` to a T* while the buffer's |
517 | // contents might still be uninitialized memory. When T has a vtable, this is especially risky |
518 | // because we could hypothetically access a virtual method on fItemArray and jump to an |
519 | // unpredictable location in memory. Of course, TArray won't actually use fItemArray in this |
520 | // way, and we don't want to construct a T before the user requests one. There's no real risk |
521 | // here, so disable CFI when doing these casts. |
522 | SK_CLANG_NO_SANITIZE("cfi") |
523 | static T* TCast(void* buffer) { |
524 | return (T*)buffer; |
525 | } |
526 | |
527 | size_t bytes(int n) const { |
528 | SkASSERT(n <= kMaxCapacity); |
529 | return SkToSizeT(x: n) * sizeof(T); |
530 | } |
531 | |
532 | static SkSpan<std::byte> Allocate(int capacity, double growthFactor = 1.0) { |
533 | return SkContainerAllocator{sizeof(T), kMaxCapacity}.allocate(capacity, growthFactor); |
534 | } |
535 | |
536 | void initData(int count) { |
537 | this->setDataFromBytes(Allocate(capacity: count)); |
538 | fSize = count; |
539 | } |
540 | |
541 | void destroyAll() { |
542 | if (!this->empty()) { |
543 | T* cursor = this->begin(); |
544 | T* const end = this->end(); |
545 | do { |
546 | cursor->~T(); |
547 | cursor++; |
548 | } while (cursor < end); |
549 | } |
550 | } |
551 | |
552 | /** In the following move and copy methods, 'dst' is assumed to be uninitialized raw storage. |
553 | * In the following move methods, 'src' is destroyed leaving behind uninitialized raw storage. |
554 | */ |
555 | void copy(const T* src) { |
556 | if constexpr (std::is_trivially_copyable_v<T>) { |
557 | if (!this->empty() && src != nullptr) { |
558 | sk_careful_memcpy(fData, src, this->size_bytes()); |
559 | } |
560 | } else { |
561 | for (int i = 0; i < this->size(); ++i) { |
562 | new (fData + i) T(src[i]); |
563 | } |
564 | } |
565 | } |
566 | |
567 | void move(int dst, int src) { |
568 | if constexpr (MEM_MOVE) { |
569 | memcpy(dest: static_cast<void*>(&fData[dst]), |
570 | src: static_cast<const void*>(&fData[src]), |
571 | n: sizeof(T)); |
572 | } else { |
573 | new (&fData[dst]) T(std::move(fData[src])); |
574 | fData[src].~T(); |
575 | } |
576 | } |
577 | |
578 | void move(void* dst) { |
579 | if constexpr (MEM_MOVE) { |
580 | sk_careful_memcpy(dst, fData, this->bytes(fSize)); |
581 | } else { |
582 | for (int i = 0; i < this->size(); ++i) { |
583 | new (static_cast<char*>(dst) + this->bytes(i)) T(std::move(fData[i])); |
584 | fData[i].~T(); |
585 | } |
586 | } |
587 | } |
588 | |
589 | // Helper function that makes space for n objects, adjusts the count, but does not initialize |
590 | // the new objects. |
591 | void* push_back_raw(int n) { |
592 | this->checkRealloc(n, kGrowing); |
593 | void* ptr = fData + fSize; |
594 | fSize += n; |
595 | return ptr; |
596 | } |
597 | |
598 | void checkRealloc(int delta, double growthFactor) { |
599 | // This constant needs to be declared in the function where it is used to work around |
600 | // MSVC's persnickety nature about template definitions. |
601 | SkASSERT(delta >= 0); |
602 | SkASSERT(fSize >= 0); |
603 | SkASSERT(fCapacity >= 0); |
604 | |
605 | // Return if there are enough remaining allocated elements to satisfy the request. |
606 | if (this->capacity() - fSize >= delta) { |
607 | return; |
608 | } |
609 | |
610 | // Don't overflow fSize or size_t later in the memory allocation. Overflowing memory |
611 | // allocation really only applies to fSizes on 32-bit machines; on 64-bit machines this |
612 | // will probably never produce a check. Since kMaxCapacity is bounded above by INT_MAX, |
613 | // this also checks the bounds of fSize. |
614 | if (delta > kMaxCapacity - fSize) { |
615 | sk_report_container_overflow_and_die(); |
616 | } |
617 | const int newCount = fSize + delta; |
618 | |
619 | SkSpan<std::byte> allocation = Allocate(capacity: newCount, growthFactor); |
620 | |
621 | this->move(TCast(buffer: allocation.data())); |
622 | if (fOwnMemory) { |
623 | sk_free(fData); |
624 | } |
625 | this->setDataFromBytes(allocation); |
626 | SkASSERT(this->capacity() >= newCount); |
627 | SkASSERT(fData != nullptr); |
628 | } |
629 | |
630 | T* fData{nullptr}; |
631 | int fSize{0}; |
632 | uint32_t fOwnMemory : 1; |
633 | uint32_t fCapacity : 31; |
634 | }; |
635 | |
636 | template <typename T, bool M> static inline void swap(TArray<T, M>& a, TArray<T, M>& b) { |
637 | a.swap(b); |
638 | } |
639 | |
640 | // Subclass of TArray that contains a pre-allocated memory block for the array. |
641 | template <int N, typename T, bool MEM_MOVE = sk_is_trivially_relocatable_v<T>> |
642 | class STArray : private SkAlignedSTStorage<N,T>, public TArray<T, MEM_MOVE> { |
643 | static_assert(N > 0); |
644 | using Storage = SkAlignedSTStorage<N,T>; |
645 | |
646 | public: |
647 | STArray() |
648 | : Storage{} |
649 | , TArray<T, MEM_MOVE>(this) {} // Must use () to avoid confusion with initializer_list |
650 | // when T=bool because * are convertable to bool. |
651 | |
652 | STArray(const T* array, int count) |
653 | : Storage{} |
654 | , TArray<T, MEM_MOVE>{array, count, this} {} |
655 | |
656 | STArray(std::initializer_list<T> data) |
657 | : STArray{data.begin(), SkToInt(data.size())} {} |
658 | |
659 | explicit STArray(int reserveCount) |
660 | : STArray() { this->reserve_exact(reserveCount); } |
661 | |
662 | STArray(const STArray& that) |
663 | : STArray() { *this = that; } |
664 | |
665 | explicit STArray(const TArray<T, MEM_MOVE>& that) |
666 | : STArray() { *this = that; } |
667 | |
668 | STArray(STArray&& that) |
669 | : STArray() { *this = std::move(that); } |
670 | |
671 | explicit STArray(TArray<T, MEM_MOVE>&& that) |
672 | : STArray() { *this = std::move(that); } |
673 | |
674 | STArray& operator=(const STArray& that) { |
675 | TArray<T, MEM_MOVE>::operator=(that); |
676 | return *this; |
677 | } |
678 | |
679 | STArray& operator=(const TArray<T, MEM_MOVE>& that) { |
680 | TArray<T, MEM_MOVE>::operator=(that); |
681 | return *this; |
682 | } |
683 | |
684 | STArray& operator=(STArray&& that) { |
685 | TArray<T, MEM_MOVE>::operator=(std::move(that)); |
686 | return *this; |
687 | } |
688 | |
689 | STArray& operator=(TArray<T, MEM_MOVE>&& that) { |
690 | TArray<T, MEM_MOVE>::operator=(std::move(that)); |
691 | return *this; |
692 | } |
693 | |
694 | // Force the use of TArray for data() and size(). |
695 | using TArray<T, MEM_MOVE>::data; |
696 | using TArray<T, MEM_MOVE>::size; |
697 | }; |
698 | } // namespace skia_private |
699 | #endif // SkTArray_DEFINED |
700 |
Definitions
- TArray
- TArray
- TArray
- TArray
- TArray
- TArray
- TArray
- operator=
- operator=
- ~TArray
- reset
- reset
- reserve
- reserve_exact
- removeShuffle
- empty
- push_back
- push_back
- push_back
- emplace_back
- push_back_n
- push_back_n
- push_back_n
- move_back_n
- pop_back
- pop_back_n
- resize_back
- swap
- begin
- begin
- end
- end
- data
- data
- size
- size_bytes
- resize
- clear
- shrink_to_fit
- operator[]
- operator[]
- at
- at
- front
- front
- back
- back
- fromBack
- fromBack
- operator==
- operator!=
- capacity
- TArray
- TArray
- kExactFit
- kGrowing
- kMinHeapAllocCount
- kMaxCapacity
- setDataFromBytes
- setData
- TCast
- bytes
- Allocate
- initData
- destroyAll
- copy
- move
- move
- push_back_raw
- checkRealloc
- swap
- STArray
- STArray
- STArray
- STArray
- STArray
- STArray
- STArray
- STArray
- STArray
- operator=
- operator=
- operator=
Learn more about Flutter for embedded and desktop on industrialflutter.com