1 | /* |
2 | * Copyright 2006 The Android Open Source Project |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #ifndef SkRefCnt_DEFINED |
9 | #define SkRefCnt_DEFINED |
10 | |
11 | #include "include/core/SkTypes.h" |
12 | #include "include/private/base/SkDebug.h" |
13 | |
14 | #include <atomic> |
15 | #include <cstddef> |
16 | #include <cstdint> |
17 | #include <iosfwd> |
18 | #include <type_traits> |
19 | #include <utility> |
20 | |
21 | /** \class SkRefCntBase |
22 | |
23 | SkRefCntBase is the base class for objects that may be shared by multiple |
24 | objects. When an existing owner wants to share a reference, it calls ref(). |
25 | When an owner wants to release its reference, it calls unref(). When the |
26 | shared object's reference count goes to zero as the result of an unref() |
27 | call, its (virtual) destructor is called. It is an error for the |
28 | destructor to be called explicitly (or via the object going out of scope on |
29 | the stack or calling delete) if getRefCnt() > 1. |
30 | */ |
31 | class SK_API SkRefCntBase { |
32 | public: |
33 | /** Default construct, initializing the reference count to 1. |
34 | */ |
35 | SkRefCntBase() : fRefCnt(1) {} |
36 | |
37 | /** Destruct, asserting that the reference count is 1. |
38 | */ |
39 | virtual ~SkRefCntBase() { |
40 | #ifdef SK_DEBUG |
41 | SkASSERTF(this->getRefCnt() == 1, "fRefCnt was %d" , this->getRefCnt()); |
42 | // illegal value, to catch us if we reuse after delete |
43 | fRefCnt.store(d: 0, m: std::memory_order_relaxed); |
44 | #endif |
45 | } |
46 | |
47 | /** May return true if the caller is the only owner. |
48 | * Ensures that all previous owner's actions are complete. |
49 | */ |
50 | bool unique() const { |
51 | if (1 == fRefCnt.load(m: std::memory_order_acquire)) { |
52 | // The acquire barrier is only really needed if we return true. It |
53 | // prevents code conditioned on the result of unique() from running |
54 | // until previous owners are all totally done calling unref(). |
55 | return true; |
56 | } |
57 | return false; |
58 | } |
59 | |
60 | /** Increment the reference count. Must be balanced by a call to unref(). |
61 | */ |
62 | void ref() const { |
63 | SkASSERT(this->getRefCnt() > 0); |
64 | // No barrier required. |
65 | (void)fRefCnt.fetch_add(op: +1, m: std::memory_order_relaxed); |
66 | } |
67 | |
68 | /** Decrement the reference count. If the reference count is 1 before the |
69 | decrement, then delete the object. Note that if this is the case, then |
70 | the object needs to have been allocated via new, and not on the stack. |
71 | */ |
72 | void unref() const { |
73 | SkASSERT(this->getRefCnt() > 0); |
74 | // A release here acts in place of all releases we "should" have been doing in ref(). |
75 | if (1 == fRefCnt.fetch_add(op: -1, m: std::memory_order_acq_rel)) { |
76 | // Like unique(), the acquire is only needed on success, to make sure |
77 | // code in internal_dispose() doesn't happen before the decrement. |
78 | this->internal_dispose(); |
79 | } |
80 | } |
81 | |
82 | private: |
83 | |
84 | #ifdef SK_DEBUG |
85 | /** Return the reference count. Use only for debugging. */ |
86 | int32_t getRefCnt() const { |
87 | return fRefCnt.load(m: std::memory_order_relaxed); |
88 | } |
89 | #endif |
90 | |
91 | /** |
92 | * Called when the ref count goes to 0. |
93 | */ |
94 | virtual void internal_dispose() const { |
95 | #ifdef SK_DEBUG |
96 | SkASSERT(0 == this->getRefCnt()); |
97 | fRefCnt.store(d: 1, m: std::memory_order_relaxed); |
98 | #endif |
99 | delete this; |
100 | } |
101 | |
102 | // The following friends are those which override internal_dispose() |
103 | // and conditionally call SkRefCnt::internal_dispose(). |
104 | friend class SkWeakRefCnt; |
105 | |
106 | mutable std::atomic<int32_t> fRefCnt; |
107 | |
108 | SkRefCntBase(SkRefCntBase&&) = delete; |
109 | SkRefCntBase(const SkRefCntBase&) = delete; |
110 | SkRefCntBase& operator=(SkRefCntBase&&) = delete; |
111 | SkRefCntBase& operator=(const SkRefCntBase&) = delete; |
112 | }; |
113 | |
114 | #ifdef SK_REF_CNT_MIXIN_INCLUDE |
115 | // It is the responsibility of the following include to define the type SkRefCnt. |
116 | // This SkRefCnt should normally derive from SkRefCntBase. |
117 | #include SK_REF_CNT_MIXIN_INCLUDE |
118 | #else |
119 | class SK_API SkRefCnt : public SkRefCntBase { |
120 | // "#include SK_REF_CNT_MIXIN_INCLUDE" doesn't work with this build system. |
121 | #if defined(SK_BUILD_FOR_GOOGLE3) |
122 | public: |
123 | void deref() const { this->unref(); } |
124 | #endif |
125 | }; |
126 | #endif |
127 | |
128 | /////////////////////////////////////////////////////////////////////////////// |
129 | |
130 | /** Call obj->ref() and return obj. The obj must not be nullptr. |
131 | */ |
132 | template <typename T> static inline T* SkRef(T* obj) { |
133 | SkASSERT(obj); |
134 | obj->ref(); |
135 | return obj; |
136 | } |
137 | |
138 | /** Check if the argument is non-null, and if so, call obj->ref() and return obj. |
139 | */ |
140 | template <typename T> static inline T* SkSafeRef(T* obj) { |
141 | if (obj) { |
142 | obj->ref(); |
143 | } |
144 | return obj; |
145 | } |
146 | |
147 | /** Check if the argument is non-null, and if so, call obj->unref() |
148 | */ |
149 | template <typename T> static inline void SkSafeUnref(T* obj) { |
150 | if (obj) { |
151 | obj->unref(); |
152 | } |
153 | } |
154 | |
155 | /////////////////////////////////////////////////////////////////////////////// |
156 | |
157 | // This is a variant of SkRefCnt that's Not Virtual, so weighs 4 bytes instead of 8 or 16. |
158 | // There's only benefit to using this if the deriving class does not otherwise need a vtable. |
159 | template <typename Derived> |
160 | class SkNVRefCnt { |
161 | public: |
162 | SkNVRefCnt() : fRefCnt(1) {} |
163 | ~SkNVRefCnt() { |
164 | #ifdef SK_DEBUG |
165 | int rc = fRefCnt.load(m: std::memory_order_relaxed); |
166 | SkASSERTF(rc == 1, "NVRefCnt was %d" , rc); |
167 | #endif |
168 | } |
169 | |
170 | // Implementation is pretty much the same as SkRefCntBase. All required barriers are the same: |
171 | // - unique() needs acquire when it returns true, and no barrier if it returns false; |
172 | // - ref() doesn't need any barrier; |
173 | // - unref() needs a release barrier, and an acquire if it's going to call delete. |
174 | |
175 | bool unique() const { return 1 == fRefCnt.load(m: std::memory_order_acquire); } |
176 | void ref() const { (void)fRefCnt.fetch_add(op: +1, m: std::memory_order_relaxed); } |
177 | void unref() const { |
178 | if (1 == fRefCnt.fetch_add(op: -1, m: std::memory_order_acq_rel)) { |
179 | // restore the 1 for our destructor's assert |
180 | SkDEBUGCODE(fRefCnt.store(1, std::memory_order_relaxed)); |
181 | delete (const Derived*)this; |
182 | } |
183 | } |
184 | void deref() const { this->unref(); } |
185 | |
186 | // This must be used with caution. It is only valid to call this when 'threadIsolatedTestCnt' |
187 | // refs are known to be isolated to the current thread. That is, it is known that there are at |
188 | // least 'threadIsolatedTestCnt' refs for which no other thread may make a balancing unref() |
189 | // call. Assuming the contract is followed, if this returns false then no other thread has |
190 | // ownership of this. If it returns true then another thread *may* have ownership. |
191 | bool refCntGreaterThan(int32_t threadIsolatedTestCnt) const { |
192 | int cnt = fRefCnt.load(m: std::memory_order_acquire); |
193 | // If this fails then the above contract has been violated. |
194 | SkASSERT(cnt >= threadIsolatedTestCnt); |
195 | return cnt > threadIsolatedTestCnt; |
196 | } |
197 | |
198 | private: |
199 | mutable std::atomic<int32_t> fRefCnt; |
200 | |
201 | SkNVRefCnt(SkNVRefCnt&&) = delete; |
202 | SkNVRefCnt(const SkNVRefCnt&) = delete; |
203 | SkNVRefCnt& operator=(SkNVRefCnt&&) = delete; |
204 | SkNVRefCnt& operator=(const SkNVRefCnt&) = delete; |
205 | }; |
206 | |
207 | /////////////////////////////////////////////////////////////////////////////////////////////////// |
208 | |
209 | /** |
210 | * Shared pointer class to wrap classes that support a ref()/unref() interface. |
211 | * |
212 | * This can be used for classes inheriting from SkRefCnt, but it also works for other |
213 | * classes that match the interface, but have different internal choices: e.g. the hosted class |
214 | * may have its ref/unref be thread-safe, but that is not assumed/imposed by sk_sp. |
215 | * |
216 | * Declared with the trivial_abi attribute where supported so that sk_sp and types containing it |
217 | * may be considered as trivially relocatable by the compiler so that destroying-move operations |
218 | * i.e. move constructor followed by destructor can be optimized to memcpy. |
219 | */ |
220 | template <typename T> class SK_TRIVIAL_ABI sk_sp { |
221 | public: |
222 | using element_type = T; |
223 | |
224 | constexpr sk_sp() : fPtr(nullptr) {} |
225 | constexpr sk_sp(std::nullptr_t) : fPtr(nullptr) {} |
226 | |
227 | /** |
228 | * Shares the underlying object by calling ref(), so that both the argument and the newly |
229 | * created sk_sp both have a reference to it. |
230 | */ |
231 | sk_sp(const sk_sp<T>& that) : fPtr(SkSafeRef(that.get())) {} |
232 | template <typename U, |
233 | typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type> |
234 | sk_sp(const sk_sp<U>& that) : fPtr(SkSafeRef(that.get())) {} |
235 | |
236 | /** |
237 | * Move the underlying object from the argument to the newly created sk_sp. Afterwards only |
238 | * the new sk_sp will have a reference to the object, and the argument will point to null. |
239 | * No call to ref() or unref() will be made. |
240 | */ |
241 | sk_sp(sk_sp<T>&& that) : fPtr(that.release()) {} |
242 | template <typename U, |
243 | typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type> |
244 | sk_sp(sk_sp<U>&& that) : fPtr(that.release()) {} |
245 | |
246 | /** |
247 | * Adopt the bare pointer into the newly created sk_sp. |
248 | * No call to ref() or unref() will be made. |
249 | */ |
250 | explicit sk_sp(T* obj) : fPtr(obj) {} |
251 | |
252 | /** |
253 | * Calls unref() on the underlying object pointer. |
254 | */ |
255 | ~sk_sp() { |
256 | SkSafeUnref(fPtr); |
257 | SkDEBUGCODE(fPtr = nullptr); |
258 | } |
259 | |
260 | sk_sp<T>& operator=(std::nullptr_t) { this->reset(); return *this; } |
261 | |
262 | /** |
263 | * Shares the underlying object referenced by the argument by calling ref() on it. If this |
264 | * sk_sp previously had a reference to an object (i.e. not null) it will call unref() on that |
265 | * object. |
266 | */ |
267 | sk_sp<T>& operator=(const sk_sp<T>& that) { |
268 | if (this != &that) { |
269 | this->reset(SkSafeRef(that.get())); |
270 | } |
271 | return *this; |
272 | } |
273 | template <typename U, |
274 | typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type> |
275 | sk_sp<T>& operator=(const sk_sp<U>& that) { |
276 | this->reset(SkSafeRef(that.get())); |
277 | return *this; |
278 | } |
279 | |
280 | /** |
281 | * Move the underlying object from the argument to the sk_sp. If the sk_sp previously held |
282 | * a reference to another object, unref() will be called on that object. No call to ref() |
283 | * will be made. |
284 | */ |
285 | sk_sp<T>& operator=(sk_sp<T>&& that) { |
286 | this->reset(that.release()); |
287 | return *this; |
288 | } |
289 | template <typename U, |
290 | typename = typename std::enable_if<std::is_convertible<U*, T*>::value>::type> |
291 | sk_sp<T>& operator=(sk_sp<U>&& that) { |
292 | this->reset(that.release()); |
293 | return *this; |
294 | } |
295 | |
296 | T& operator*() const { |
297 | SkASSERT(this->get() != nullptr); |
298 | return *this->get(); |
299 | } |
300 | |
301 | explicit operator bool() const { return this->get() != nullptr; } |
302 | |
303 | T* get() const { return fPtr; } |
304 | T* operator->() const { return fPtr; } |
305 | |
306 | /** |
307 | * Adopt the new bare pointer, and call unref() on any previously held object (if not null). |
308 | * No call to ref() will be made. |
309 | */ |
310 | void reset(T* ptr = nullptr) { |
311 | // Calling fPtr->unref() may call this->~() or this->reset(T*). |
312 | // http://wg21.cmeerw.net/lwg/issue998 |
313 | // http://wg21.cmeerw.net/lwg/issue2262 |
314 | T* oldPtr = fPtr; |
315 | fPtr = ptr; |
316 | SkSafeUnref(oldPtr); |
317 | } |
318 | |
319 | /** |
320 | * Return the bare pointer, and set the internal object pointer to nullptr. |
321 | * The caller must assume ownership of the object, and manage its reference count directly. |
322 | * No call to unref() will be made. |
323 | */ |
324 | [[nodiscard]] T* release() { |
325 | T* ptr = fPtr; |
326 | fPtr = nullptr; |
327 | return ptr; |
328 | } |
329 | |
330 | void swap(sk_sp<T>& that) /*noexcept*/ { |
331 | using std::swap; |
332 | swap(fPtr, that.fPtr); |
333 | } |
334 | |
335 | using sk_is_trivially_relocatable = std::true_type; |
336 | |
337 | private: |
338 | T* fPtr; |
339 | }; |
340 | |
341 | template <typename T> inline void swap(sk_sp<T>& a, sk_sp<T>& b) /*noexcept*/ { |
342 | a.swap(b); |
343 | } |
344 | |
345 | template <typename T, typename U> inline bool operator==(const sk_sp<T>& a, const sk_sp<U>& b) { |
346 | return a.get() == b.get(); |
347 | } |
348 | template <typename T> inline bool operator==(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ { |
349 | return !a; |
350 | } |
351 | template <typename T> inline bool operator==(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ { |
352 | return !b; |
353 | } |
354 | |
355 | template <typename T, typename U> inline bool operator!=(const sk_sp<T>& a, const sk_sp<U>& b) { |
356 | return a.get() != b.get(); |
357 | } |
358 | template <typename T> inline bool operator!=(const sk_sp<T>& a, std::nullptr_t) /*noexcept*/ { |
359 | return static_cast<bool>(a); |
360 | } |
361 | template <typename T> inline bool operator!=(std::nullptr_t, const sk_sp<T>& b) /*noexcept*/ { |
362 | return static_cast<bool>(b); |
363 | } |
364 | |
365 | template <typename C, typename CT, typename T> |
366 | auto operator<<(std::basic_ostream<C, CT>& os, const sk_sp<T>& sp) -> decltype(os << sp.get()) { |
367 | return os << sp.get(); |
368 | } |
369 | |
370 | template <typename T, typename... Args> |
371 | sk_sp<T> sk_make_sp(Args&&... args) { |
372 | return sk_sp<T>(new T(std::forward<Args>(args)...)); |
373 | } |
374 | |
375 | /* |
376 | * Returns a sk_sp wrapping the provided ptr AND calls ref on it (if not null). |
377 | * |
378 | * This is different than the semantics of the constructor for sk_sp, which just wraps the ptr, |
379 | * effectively "adopting" it. |
380 | */ |
381 | template <typename T> sk_sp<T> sk_ref_sp(T* obj) { |
382 | return sk_sp<T>(SkSafeRef(obj)); |
383 | } |
384 | |
385 | template <typename T> sk_sp<T> sk_ref_sp(const T* obj) { |
386 | return sk_sp<T>(const_cast<T*>(SkSafeRef(obj))); |
387 | } |
388 | |
389 | #endif |
390 | |