1 | /* |
2 | * Copyright 2020 Google Inc. |
3 | * |
4 | * Use of this source code is governed by a BSD-style license that can be |
5 | * found in the LICENSE file. |
6 | */ |
7 | |
8 | #ifndef GrDirectContext_DEFINED |
9 | #define GrDirectContext_DEFINED |
10 | |
11 | #include "include/core/SkColor.h" |
12 | #include "include/core/SkRefCnt.h" |
13 | #include "include/core/SkTypes.h" |
14 | #include "include/gpu/GpuTypes.h" |
15 | #include "include/gpu/GrContextOptions.h" |
16 | #include "include/gpu/GrRecordingContext.h" |
17 | #include "include/gpu/GrTypes.h" |
18 | |
19 | #include <chrono> |
20 | #include <cstddef> |
21 | #include <cstdint> |
22 | #include <memory> |
23 | #include <string_view> |
24 | |
25 | class GrAtlasManager; |
26 | class GrBackendSemaphore; |
27 | class GrBackendFormat; |
28 | class GrBackendTexture; |
29 | class GrBackendRenderTarget; |
30 | class GrClientMappedBufferManager; |
31 | class GrContextThreadSafeProxy; |
32 | class GrDirectContextPriv; |
33 | class GrGpu; |
34 | class GrResourceCache; |
35 | class GrResourceProvider; |
36 | class SkData; |
37 | class SkImage; |
38 | class SkPixmap; |
39 | class SkSurface; |
40 | class SkTaskGroup; |
41 | class SkTraceMemoryDump; |
42 | enum SkColorType : int; |
43 | enum class SkTextureCompressionType; |
44 | struct GrGLInterface; |
45 | struct GrMockOptions; |
46 | struct GrVkBackendContext; // IWYU pragma: keep |
47 | struct GrD3DBackendContext; // IWYU pragma: keep |
48 | struct GrMtlBackendContext; // IWYU pragma: keep |
49 | |
50 | namespace skgpu { |
51 | class MutableTextureState; |
52 | #if !defined(SK_ENABLE_OPTIMIZE_SIZE) |
53 | namespace ganesh { class SmallPathAtlasMgr; } |
54 | #endif |
55 | } |
56 | namespace sktext { namespace gpu { class StrikeCache; } } |
57 | namespace wgpu { class Device; } // IWYU pragma: keep |
58 | |
59 | namespace SkSurfaces { |
60 | enum class BackendSurfaceAccess; |
61 | } |
62 | |
63 | class SK_API GrDirectContext : public GrRecordingContext { |
64 | public: |
65 | #ifdef SK_GL |
66 | /** |
67 | * Creates a GrDirectContext for a backend context. If no GrGLInterface is provided then the |
68 | * result of GrGLMakeNativeInterface() is used if it succeeds. |
69 | */ |
70 | static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>, const GrContextOptions&); |
71 | static sk_sp<GrDirectContext> MakeGL(sk_sp<const GrGLInterface>); |
72 | static sk_sp<GrDirectContext> MakeGL(const GrContextOptions&); |
73 | static sk_sp<GrDirectContext> MakeGL(); |
74 | #endif |
75 | |
76 | #ifdef SK_VULKAN |
77 | /** |
78 | * The Vulkan context (VkQueue, VkDevice, VkInstance) must be kept alive until the returned |
79 | * GrDirectContext is destroyed. This also means that any objects created with this |
80 | * GrDirectContext (e.g. SkSurfaces, SkImages, etc.) must also be released as they may hold |
81 | * refs on the GrDirectContext. Once all these objects and the GrDirectContext are released, |
82 | * then it is safe to delete the vulkan objects. |
83 | */ |
84 | static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&, const GrContextOptions&); |
85 | static sk_sp<GrDirectContext> MakeVulkan(const GrVkBackendContext&); |
86 | #endif |
87 | |
88 | #ifdef SK_METAL |
89 | /** |
90 | * Makes a GrDirectContext which uses Metal as the backend. The GrMtlBackendContext contains a |
91 | * MTLDevice and MTLCommandQueue which should be used by the backend. These objects must |
92 | * have their own ref which will be released when the GrMtlBackendContext is destroyed. |
93 | * Ganesh will take its own ref on the objects which will be released when the GrDirectContext |
94 | * is destroyed. |
95 | */ |
96 | static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&, const GrContextOptions&); |
97 | static sk_sp<GrDirectContext> MakeMetal(const GrMtlBackendContext&); |
98 | /** |
99 | * Deprecated. |
100 | * |
101 | * Makes a GrDirectContext which uses Metal as the backend. The device parameter is an |
102 | * MTLDevice and queue is an MTLCommandQueue which should be used by the backend. These objects |
103 | * must have a ref on them that can be transferred to Ganesh, which will release the ref |
104 | * when the GrDirectContext is destroyed. |
105 | */ |
106 | static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue, const GrContextOptions&); |
107 | static sk_sp<GrDirectContext> MakeMetal(void* device, void* queue); |
108 | #endif |
109 | |
110 | #ifdef SK_DIRECT3D |
111 | /** |
112 | * Makes a GrDirectContext which uses Direct3D as the backend. The Direct3D context |
113 | * must be kept alive until the returned GrDirectContext is first destroyed or abandoned. |
114 | */ |
115 | static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&, const GrContextOptions&); |
116 | static sk_sp<GrDirectContext> MakeDirect3D(const GrD3DBackendContext&); |
117 | #endif |
118 | |
119 | #ifdef SK_DAWN |
120 | static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&, |
121 | const GrContextOptions&); |
122 | static sk_sp<GrDirectContext> MakeDawn(const wgpu::Device&); |
123 | #endif |
124 | |
125 | static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*, const GrContextOptions&); |
126 | static sk_sp<GrDirectContext> MakeMock(const GrMockOptions*); |
127 | |
128 | ~GrDirectContext() override; |
129 | |
130 | /** |
131 | * The context normally assumes that no outsider is setting state |
132 | * within the underlying 3D API's context/device/whatever. This call informs |
133 | * the context that the state was modified and it should resend. Shouldn't |
134 | * be called frequently for good performance. |
135 | * The flag bits, state, is dependent on which backend is used by the |
136 | * context, either GL or D3D (possible in future). |
137 | */ |
138 | void resetContext(uint32_t state = kAll_GrBackendState); |
139 | |
140 | /** |
141 | * If the backend is GrBackendApi::kOpenGL, then all texture unit/target combinations for which |
142 | * the context has modified the bound texture will have texture id 0 bound. This does not |
143 | * flush the context. Calling resetContext() does not change the set that will be bound |
144 | * to texture id 0 on the next call to resetGLTextureBindings(). After this is called |
145 | * all unit/target combinations are considered to have unmodified bindings until the context |
146 | * subsequently modifies them (meaning if this is called twice in a row with no intervening |
147 | * context usage then the second call is a no-op.) |
148 | */ |
149 | void resetGLTextureBindings(); |
150 | |
151 | /** |
152 | * Abandons all GPU resources and assumes the underlying backend 3D API context is no longer |
153 | * usable. Call this if you have lost the associated GPU context, and thus internal texture, |
154 | * buffer, etc. references/IDs are now invalid. Calling this ensures that the destructors of the |
155 | * context and any of its created resource objects will not make backend 3D API calls. Content |
156 | * rendered but not previously flushed may be lost. After this function is called all subsequent |
157 | * calls on the context will fail or be no-ops. |
158 | * |
159 | * The typical use case for this function is that the underlying 3D context was lost and further |
160 | * API calls may crash. |
161 | * |
162 | * This call is not valid to be made inside ReleaseProcs passed into SkSurface or SkImages. The |
163 | * call will simply fail (and assert in debug) if it is called while inside a ReleaseProc. |
164 | * |
165 | * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to |
166 | * create the context must be kept alive even after abandoning the context. Those objects must |
167 | * live for the lifetime of the context object itself. The reason for this is so that |
168 | * we can continue to delete any outstanding GrBackendTextures/RenderTargets which must be |
169 | * cleaned up even in a device lost state. |
170 | */ |
171 | void abandonContext() override; |
172 | |
173 | /** |
174 | * Returns true if the context was abandoned or if the if the backend specific context has |
175 | * gotten into an unrecoverarble, lost state (e.g. in Vulkan backend if we've gotten a |
176 | * VK_ERROR_DEVICE_LOST). If the backend context is lost, this call will also abandon this |
177 | * context. |
178 | */ |
179 | bool abandoned() override; |
180 | |
181 | // TODO: Remove this from public after migrating Chrome. |
182 | sk_sp<GrContextThreadSafeProxy> threadSafeProxy(); |
183 | |
184 | /** |
185 | * Checks if the underlying 3D API reported an out-of-memory error. If this returns true it is |
186 | * reset and will return false until another out-of-memory error is reported by the 3D API. If |
187 | * the context is abandoned then this will report false. |
188 | * |
189 | * Currently this is implemented for: |
190 | * |
191 | * OpenGL [ES] - Note that client calls to glGetError() may swallow GL_OUT_OF_MEMORY errors and |
192 | * therefore hide the error from Skia. Also, it is not advised to use this in combination with |
193 | * enabling GrContextOptions::fSkipGLErrorChecks. That option may prevent the context from ever |
194 | * checking the GL context for OOM. |
195 | * |
196 | * Vulkan - Reports true if VK_ERROR_OUT_OF_HOST_MEMORY or VK_ERROR_OUT_OF_DEVICE_MEMORY has |
197 | * occurred. |
198 | */ |
199 | bool oomed(); |
200 | |
201 | /** |
202 | * This is similar to abandonContext() however the underlying 3D context is not yet lost and |
203 | * the context will cleanup all allocated resources before returning. After returning it will |
204 | * assume that the underlying context may no longer be valid. |
205 | * |
206 | * The typical use case for this function is that the client is going to destroy the 3D context |
207 | * but can't guarantee that context will be destroyed first (perhaps because it may be ref'ed |
208 | * elsewhere by either the client or Skia objects). |
209 | * |
210 | * For Vulkan, even if the device becomes lost, the VkQueue, VkDevice, or VkInstance used to |
211 | * create the context must be alive before calling releaseResourcesAndAbandonContext. |
212 | */ |
213 | void releaseResourcesAndAbandonContext(); |
214 | |
215 | /////////////////////////////////////////////////////////////////////////// |
216 | // Resource Cache |
217 | |
218 | /** DEPRECATED |
219 | * Return the current GPU resource cache limits. |
220 | * |
221 | * @param maxResources If non-null, will be set to -1. |
222 | * @param maxResourceBytes If non-null, returns maximum number of bytes of |
223 | * video memory that can be held in the cache. |
224 | */ |
225 | void getResourceCacheLimits(int* maxResources, size_t* maxResourceBytes) const; |
226 | |
227 | /** |
228 | * Return the current GPU resource cache limit in bytes. |
229 | */ |
230 | size_t getResourceCacheLimit() const; |
231 | |
232 | /** |
233 | * Gets the current GPU resource cache usage. |
234 | * |
235 | * @param resourceCount If non-null, returns the number of resources that are held in the |
236 | * cache. |
237 | * @param maxResourceBytes If non-null, returns the total number of bytes of video memory held |
238 | * in the cache. |
239 | */ |
240 | void getResourceCacheUsage(int* resourceCount, size_t* resourceBytes) const; |
241 | |
242 | /** |
243 | * Gets the number of bytes in the cache consumed by purgeable (e.g. unlocked) resources. |
244 | */ |
245 | size_t getResourceCachePurgeableBytes() const; |
246 | |
247 | /** DEPRECATED |
248 | * Specify the GPU resource cache limits. If the current cache exceeds the maxResourceBytes |
249 | * limit, it will be purged (LRU) to keep the cache within the limit. |
250 | * |
251 | * @param maxResources Unused. |
252 | * @param maxResourceBytes The maximum number of bytes of video memory |
253 | * that can be held in the cache. |
254 | */ |
255 | void setResourceCacheLimits(int maxResources, size_t maxResourceBytes); |
256 | |
257 | /** |
258 | * Specify the GPU resource cache limit. If the cache currently exceeds this limit, |
259 | * it will be purged (LRU) to keep the cache within the limit. |
260 | * |
261 | * @param maxResourceBytes The maximum number of bytes of video memory |
262 | * that can be held in the cache. |
263 | */ |
264 | void setResourceCacheLimit(size_t maxResourceBytes); |
265 | |
266 | /** |
267 | * Frees GPU created by the context. Can be called to reduce GPU memory |
268 | * pressure. |
269 | */ |
270 | void freeGpuResources(); |
271 | |
272 | /** |
273 | * Purge GPU resources that haven't been used in the past 'msNotUsed' milliseconds or are |
274 | * otherwise marked for deletion, regardless of whether the context is under budget. |
275 | * |
276 | * If 'scratchResourcesOnly' is true all unlocked scratch resources older than 'msNotUsed' will |
277 | * be purged but the unlocked resources with persistent data will remain. If |
278 | * 'scratchResourcesOnly' is false then all unlocked resources older than 'msNotUsed' will be |
279 | * purged. |
280 | * |
281 | * @param msNotUsed Only unlocked resources not used in these last milliseconds |
282 | * will be cleaned up. |
283 | * @param scratchResourcesOnly If true only unlocked scratch resources will be purged. |
284 | */ |
285 | void performDeferredCleanup(std::chrono::milliseconds msNotUsed, |
286 | bool scratchResourcesOnly=false); |
287 | |
288 | // Temporary compatibility API for Android. |
289 | void purgeResourcesNotUsedInMs(std::chrono::milliseconds msNotUsed) { |
290 | this->performDeferredCleanup(msNotUsed); |
291 | } |
292 | |
293 | /** |
294 | * Purge unlocked resources from the cache until the the provided byte count has been reached |
295 | * or we have purged all unlocked resources. The default policy is to purge in LRU order, but |
296 | * can be overridden to prefer purging scratch resources (in LRU order) prior to purging other |
297 | * resource types. |
298 | * |
299 | * @param maxBytesToPurge the desired number of bytes to be purged. |
300 | * @param preferScratchResources If true scratch resources will be purged prior to other |
301 | * resource types. |
302 | */ |
303 | void purgeUnlockedResources(size_t bytesToPurge, bool preferScratchResources); |
304 | |
305 | /** |
306 | * This entry point is intended for instances where an app has been backgrounded or |
307 | * suspended. |
308 | * If 'scratchResourcesOnly' is true all unlocked scratch resources will be purged but the |
309 | * unlocked resources with persistent data will remain. If 'scratchResourcesOnly' is false |
310 | * then all unlocked resources will be purged. |
311 | * In either case, after the unlocked resources are purged a separate pass will be made to |
312 | * ensure that resource usage is under budget (i.e., even if 'scratchResourcesOnly' is true |
313 | * some resources with persistent data may be purged to be under budget). |
314 | * |
315 | * @param scratchResourcesOnly If true only unlocked scratch resources will be purged prior |
316 | * enforcing the budget requirements. |
317 | */ |
318 | void purgeUnlockedResources(bool scratchResourcesOnly); |
319 | |
320 | /** |
321 | * Gets the maximum supported texture size. |
322 | */ |
323 | using GrRecordingContext::maxTextureSize; |
324 | |
325 | /** |
326 | * Gets the maximum supported render target size. |
327 | */ |
328 | using GrRecordingContext::maxRenderTargetSize; |
329 | |
330 | /** |
331 | * Can a SkImage be created with the given color type. |
332 | */ |
333 | using GrRecordingContext::colorTypeSupportedAsImage; |
334 | |
335 | /** |
336 | * Can a SkSurface be created with the given color type. To check whether MSAA is supported |
337 | * use maxSurfaceSampleCountForColorType(). |
338 | */ |
339 | using GrRecordingContext::colorTypeSupportedAsSurface; |
340 | |
341 | /** |
342 | * Gets the maximum supported sample count for a color type. 1 is returned if only non-MSAA |
343 | * rendering is supported for the color type. 0 is returned if rendering to this color type |
344 | * is not supported at all. |
345 | */ |
346 | using GrRecordingContext::maxSurfaceSampleCountForColorType; |
347 | |
348 | /////////////////////////////////////////////////////////////////////////// |
349 | // Misc. |
350 | |
351 | /** |
352 | * Inserts a list of GPU semaphores that the current GPU-backed API must wait on before |
353 | * executing any more commands on the GPU. If this call returns false, then the GPU back-end |
354 | * will not wait on any passed in semaphores, and the client will still own the semaphores, |
355 | * regardless of the value of deleteSemaphoresAfterWait. |
356 | * |
357 | * If deleteSemaphoresAfterWait is false then Skia will not delete the semaphores. In this case |
358 | * it is the client's responsibility to not destroy or attempt to reuse the semaphores until it |
359 | * knows that Skia has finished waiting on them. This can be done by using finishedProcs on |
360 | * flush calls. |
361 | */ |
362 | bool wait(int numSemaphores, const GrBackendSemaphore* waitSemaphores, |
363 | bool deleteSemaphoresAfterWait = true); |
364 | |
365 | /** |
366 | * Call to ensure all drawing to the context has been flushed and submitted to the underlying 3D |
367 | * API. This is equivalent to calling GrContext::flush with a default GrFlushInfo followed by |
368 | * GrContext::submit(syncCpu). |
369 | */ |
370 | void flushAndSubmit(bool syncCpu = false) { |
371 | this->flush(info: GrFlushInfo()); |
372 | this->submit(syncCpu); |
373 | } |
374 | |
375 | /** |
376 | * Call to ensure all drawing to the context has been flushed to underlying 3D API specific |
377 | * objects. A call to `submit` is always required to ensure work is actually sent to |
378 | * the gpu. Some specific API details: |
379 | * GL: Commands are actually sent to the driver, but glFlush is never called. Thus some |
380 | * sync objects from the flush will not be valid until a submission occurs. |
381 | * |
382 | * Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command |
383 | * buffer or encoder objects. However, these objects are not sent to the gpu until a |
384 | * submission occurs. |
385 | * |
386 | * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be |
387 | * submitted to the gpu during the next submit call (it is possible Skia failed to create a |
388 | * subset of the semaphores). The client should not wait on these semaphores until after submit |
389 | * has been called, and must keep them alive until then. If this call returns |
390 | * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on |
391 | * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in with |
392 | * the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the |
393 | * client is still responsible for deleting any initialized semaphores. |
394 | * Regardleess of semaphore submission the context will still be flushed. It should be |
395 | * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not |
396 | * happen. It simply means there were no semaphores submitted to the GPU. A caller should only |
397 | * take this as a failure if they passed in semaphores to be submitted. |
398 | */ |
399 | GrSemaphoresSubmitted flush(const GrFlushInfo& info); |
400 | |
401 | void flush() { this->flush(info: GrFlushInfo()); } |
402 | |
403 | /** Flushes any pending uses of texture-backed images in the GPU backend. If the image is not |
404 | * texture-backed (including promise texture images) or if the GrDirectContext does not |
405 | * have the same context ID as the context backing the image then this is a no-op. |
406 | * If the image was not used in any non-culled draws in the current queue of work for the |
407 | * passed GrDirectContext then this is a no-op unless the GrFlushInfo contains semaphores or |
408 | * a finish proc. Those are respected even when the image has not been used. |
409 | * @param image the non-null image to flush. |
410 | * @param info flush options |
411 | */ |
412 | GrSemaphoresSubmitted flush(sk_sp<const SkImage> image, const GrFlushInfo& info); |
413 | void flush(sk_sp<const SkImage> image); |
414 | |
415 | /** Version of flush() that uses a default GrFlushInfo. Also submits the flushed work to the |
416 | * GPU. |
417 | */ |
418 | void flushAndSubmit(sk_sp<const SkImage> image); |
419 | |
420 | /** Issues pending SkSurface commands to the GPU-backed API objects and resolves any SkSurface |
421 | * MSAA. A call to GrDirectContext::submit is always required to ensure work is actually sent |
422 | * to the gpu. Some specific API details: |
423 | * GL: Commands are actually sent to the driver, but glFlush is never called. Thus some |
424 | * sync objects from the flush will not be valid until a submission occurs. |
425 | * |
426 | * Vulkan/Metal/D3D/Dawn: Commands are recorded to the backend APIs corresponding command |
427 | * buffer or encoder objects. However, these objects are not sent to the gpu until a |
428 | * submission occurs. |
429 | * |
430 | * The work that is submitted to the GPU will be dependent on the BackendSurfaceAccess that is |
431 | * passed in. |
432 | * |
433 | * If BackendSurfaceAccess::kNoAccess is passed in all commands will be issued to the GPU. |
434 | * |
435 | * If BackendSurfaceAccess::kPresent is passed in and the backend API is not Vulkan, it is |
436 | * treated the same as kNoAccess. If the backend API is Vulkan, the VkImage that backs the |
437 | * SkSurface will be transferred back to its original queue. If the SkSurface was created by |
438 | * wrapping a VkImage, the queue will be set to the queue which was originally passed in on |
439 | * the GrVkImageInfo. Additionally, if the original queue was not external or foreign the |
440 | * layout of the VkImage will be set to VK_IMAGE_LAYOUT_PRESENT_SRC_KHR. |
441 | * |
442 | * The GrFlushInfo describes additional options to flush. Please see documentation at |
443 | * GrFlushInfo for more info. |
444 | * |
445 | * If the return is GrSemaphoresSubmitted::kYes, only initialized GrBackendSemaphores will be |
446 | * submitted to the gpu during the next submit call (it is possible Skia failed to create a |
447 | * subset of the semaphores). The client should not wait on these semaphores until after submit |
448 | * has been called, but must keep them alive until then. If a submit flag was passed in with |
449 | * the flush these valid semaphores can we waited on immediately. If this call returns |
450 | * GrSemaphoresSubmitted::kNo, the GPU backend will not submit any semaphores to be signaled on |
451 | * the GPU. Thus the client should not have the GPU wait on any of the semaphores passed in |
452 | * with the GrFlushInfo. Regardless of whether semaphores were submitted to the GPU or not, the |
453 | * client is still responsible for deleting any initialized semaphores. |
454 | * Regardless of semaphore submission the context will still be flushed. It should be |
455 | * emphasized that a return value of GrSemaphoresSubmitted::kNo does not mean the flush did not |
456 | * happen. It simply means there were no semaphores submitted to the GPU. A caller should only |
457 | * take this as a failure if they passed in semaphores to be submitted. |
458 | * |
459 | * Pending surface commands are flushed regardless of the return result. |
460 | * |
461 | * @param surface The GPU backed surface to be flushed. Has no effect on a CPU-backed surface. |
462 | * @param access type of access the call will do on the backend object after flush |
463 | * @param info flush options |
464 | */ |
465 | GrSemaphoresSubmitted flush(sk_sp<SkSurface> surface, |
466 | SkSurfaces::BackendSurfaceAccess access, |
467 | const GrFlushInfo& info); |
468 | GrSemaphoresSubmitted flush(SkSurface* surface, |
469 | SkSurfaces::BackendSurfaceAccess access, |
470 | const GrFlushInfo& info); |
471 | |
472 | /** |
473 | * Same as above except: |
474 | * |
475 | * If a skgpu::MutableTextureState is passed in, at the end of the flush we will transition |
476 | * the surface to be in the state requested by the skgpu::MutableTextureState. If the surface |
477 | * (or SkImage or GrBackendSurface wrapping the same backend object) is used again after this |
478 | * flush the state may be changed and no longer match what is requested here. This is often |
479 | * used if the surface will be used for presenting or external use and the client wants backend |
480 | * object to be prepped for that use. A finishedProc or semaphore on the GrFlushInfo will also |
481 | * include the work for any requested state change. |
482 | * |
483 | * If the backend API is Vulkan, the caller can set the skgpu::MutableTextureState's |
484 | * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to |
485 | * tell Skia to not change those respective states. |
486 | * |
487 | * @param surface The GPU backed surface to be flushed. Has no effect on a CPU-backed surface. |
488 | * @param info flush options |
489 | * @param newState optional state change request after flush |
490 | */ |
491 | GrSemaphoresSubmitted flush(sk_sp<SkSurface> surface, |
492 | const GrFlushInfo& info, |
493 | const skgpu::MutableTextureState* newState = nullptr); |
494 | GrSemaphoresSubmitted flush(SkSurface* surface, |
495 | const GrFlushInfo& info, |
496 | const skgpu::MutableTextureState* newState = nullptr); |
497 | |
498 | /** Call to ensure all reads/writes of the surface have been issued to the underlying 3D API. |
499 | * Skia will correctly order its own draws and pixel operations. This must to be used to ensure |
500 | * correct ordering when the surface backing store is accessed outside Skia (e.g. direct use of |
501 | * the 3D API or a windowing system). This is equivalent to |
502 | * calling ::flush with a default GrFlushInfo followed by ::submit(syncCpu). |
503 | * |
504 | * Has no effect on a CPU-backed surface. |
505 | */ |
506 | void flushAndSubmit(sk_sp<SkSurface> surface, bool syncCpu = false); |
507 | |
508 | /** |
509 | * Flushes the given surface with the default GrFlushInfo. |
510 | * |
511 | * Has no effect on a CPU-backed surface. |
512 | */ |
513 | void flush(sk_sp<SkSurface> surface); |
514 | |
515 | /** |
516 | * Submit outstanding work to the gpu from all previously un-submitted flushes. The return |
517 | * value of the submit will indicate whether or not the submission to the GPU was successful. |
518 | * |
519 | * If the call returns true, all previously passed in semaphores in flush calls will have been |
520 | * submitted to the GPU and they can safely be waited on. The caller should wait on those |
521 | * semaphores or perform some other global synchronization before deleting the semaphores. |
522 | * |
523 | * If it returns false, then those same semaphores will not have been submitted and we will not |
524 | * try to submit them again. The caller is free to delete the semaphores at any time. |
525 | * |
526 | * If the syncCpu flag is true this function will return once the gpu has finished with all |
527 | * submitted work. |
528 | */ |
529 | bool submit(bool syncCpu = false); |
530 | |
531 | /** |
532 | * Checks whether any asynchronous work is complete and if so calls related callbacks. |
533 | */ |
534 | void checkAsyncWorkCompletion(); |
535 | |
536 | /** Enumerates all cached GPU resources and dumps their memory to traceMemoryDump. */ |
537 | // Chrome is using this! |
538 | void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const; |
539 | |
540 | bool supportsDistanceFieldText() const; |
541 | |
542 | void storeVkPipelineCacheData(); |
543 | |
544 | /** |
545 | * Retrieve the default GrBackendFormat for a given SkColorType and renderability. |
546 | * It is guaranteed that this backend format will be the one used by the following |
547 | * SkColorType and GrSurfaceCharacterization-based createBackendTexture methods. |
548 | * |
549 | * The caller should check that the returned format is valid. |
550 | */ |
551 | using GrRecordingContext::defaultBackendFormat; |
552 | |
553 | /** |
554 | * The explicitly allocated backend texture API allows clients to use Skia to create backend |
555 | * objects outside of Skia proper (i.e., Skia's caching system will not know about them.) |
556 | * |
557 | * It is the client's responsibility to delete all these objects (using deleteBackendTexture) |
558 | * before deleting the context used to create them. If the backend is Vulkan, the textures must |
559 | * be deleted before abandoning the context as well. Additionally, clients should only delete |
560 | * these objects on the thread for which that context is active. |
561 | * |
562 | * The client is responsible for ensuring synchronization between different uses |
563 | * of the backend object (i.e., wrapping it in a surface, rendering to it, deleting the |
564 | * surface, rewrapping it in a image and drawing the image will require explicit |
565 | * synchronization on the client's part). |
566 | */ |
567 | |
568 | /** |
569 | * If possible, create an uninitialized backend texture. The client should ensure that the |
570 | * returned backend texture is valid. |
571 | * For the Vulkan backend the layout of the created VkImage will be: |
572 | * VK_IMAGE_LAYOUT_UNDEFINED. |
573 | */ |
574 | GrBackendTexture createBackendTexture(int width, |
575 | int height, |
576 | const GrBackendFormat&, |
577 | GrMipmapped, |
578 | GrRenderable, |
579 | GrProtected = GrProtected::kNo, |
580 | std::string_view label = {}); |
581 | |
582 | /** |
583 | * If possible, create an uninitialized backend texture. The client should ensure that the |
584 | * returned backend texture is valid. |
585 | * If successful, the created backend texture will be compatible with the provided |
586 | * SkColorType. |
587 | * For the Vulkan backend the layout of the created VkImage will be: |
588 | * VK_IMAGE_LAYOUT_UNDEFINED. |
589 | */ |
590 | GrBackendTexture createBackendTexture(int width, int height, |
591 | SkColorType, |
592 | GrMipmapped, |
593 | GrRenderable, |
594 | GrProtected = GrProtected::kNo, |
595 | std::string_view label = {}); |
596 | |
597 | /** |
598 | * If possible, create a backend texture initialized to a particular color. The client should |
599 | * ensure that the returned backend texture is valid. The client can pass in a finishedProc |
600 | * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The |
601 | * client is required to call `submit` to send the upload work to the gpu. The |
602 | * finishedProc will always get called even if we failed to create the GrBackendTexture. |
603 | * For the Vulkan backend the layout of the created VkImage will be: |
604 | * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL |
605 | */ |
606 | GrBackendTexture createBackendTexture(int width, int height, |
607 | const GrBackendFormat&, |
608 | const SkColor4f& color, |
609 | GrMipmapped, |
610 | GrRenderable, |
611 | GrProtected = GrProtected::kNo, |
612 | GrGpuFinishedProc finishedProc = nullptr, |
613 | GrGpuFinishedContext finishedContext = nullptr, |
614 | std::string_view label = {}); |
615 | |
616 | /** |
617 | * If possible, create a backend texture initialized to a particular color. The client should |
618 | * ensure that the returned backend texture is valid. The client can pass in a finishedProc |
619 | * to be notified when the data has been uploaded by the gpu and the texture can be deleted. The |
620 | * client is required to call `submit` to send the upload work to the gpu. The |
621 | * finishedProc will always get called even if we failed to create the GrBackendTexture. |
622 | * If successful, the created backend texture will be compatible with the provided |
623 | * SkColorType. |
624 | * For the Vulkan backend the layout of the created VkImage will be: |
625 | * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL |
626 | */ |
627 | GrBackendTexture createBackendTexture(int width, int height, |
628 | SkColorType, |
629 | const SkColor4f& color, |
630 | GrMipmapped, |
631 | GrRenderable, |
632 | GrProtected = GrProtected::kNo, |
633 | GrGpuFinishedProc finishedProc = nullptr, |
634 | GrGpuFinishedContext finishedContext = nullptr, |
635 | std::string_view label = {}); |
636 | |
637 | /** |
638 | * If possible, create a backend texture initialized with the provided pixmap data. The client |
639 | * should ensure that the returned backend texture is valid. The client can pass in a |
640 | * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be |
641 | * deleted. The client is required to call `submit` to send the upload work to the gpu. |
642 | * The finishedProc will always get called even if we failed to create the GrBackendTexture. |
643 | * If successful, the created backend texture will be compatible with the provided |
644 | * pixmap(s). Compatible, in this case, means that the backend format will be the result |
645 | * of calling defaultBackendFormat on the base pixmap's colortype. The src data can be deleted |
646 | * when this call returns. |
647 | * If numLevels is 1 a non-mipmapped texture will result. If a mipmapped texture is desired |
648 | * the data for all the mipmap levels must be provided. In the mipmapped case all the |
649 | * colortypes of the provided pixmaps must be the same. Additionally, all the miplevels |
650 | * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). The |
651 | * GrSurfaceOrigin controls whether the pixmap data is vertically flipped in the texture. |
652 | * Note: the pixmap's alphatypes and colorspaces are ignored. |
653 | * For the Vulkan backend the layout of the created VkImage will be: |
654 | * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL |
655 | */ |
656 | GrBackendTexture createBackendTexture(const SkPixmap srcData[], |
657 | int numLevels, |
658 | GrSurfaceOrigin, |
659 | GrRenderable, |
660 | GrProtected, |
661 | GrGpuFinishedProc finishedProc = nullptr, |
662 | GrGpuFinishedContext finishedContext = nullptr, |
663 | std::string_view label = {}); |
664 | |
665 | /** |
666 | * Convenience version createBackendTexture() that takes just a base level pixmap. |
667 | */ |
668 | GrBackendTexture createBackendTexture(const SkPixmap& srcData, |
669 | GrSurfaceOrigin textureOrigin, |
670 | GrRenderable renderable, |
671 | GrProtected isProtected, |
672 | GrGpuFinishedProc finishedProc = nullptr, |
673 | GrGpuFinishedContext finishedContext = nullptr, |
674 | std::string_view label = {}); |
675 | |
676 | // Deprecated versions that do not take origin and assume top-left. |
677 | GrBackendTexture createBackendTexture(const SkPixmap srcData[], |
678 | int numLevels, |
679 | GrRenderable renderable, |
680 | GrProtected isProtected, |
681 | GrGpuFinishedProc finishedProc = nullptr, |
682 | GrGpuFinishedContext finishedContext = nullptr, |
683 | std::string_view label = {}); |
684 | |
685 | GrBackendTexture createBackendTexture(const SkPixmap& srcData, |
686 | GrRenderable renderable, |
687 | GrProtected isProtected, |
688 | GrGpuFinishedProc finishedProc = nullptr, |
689 | GrGpuFinishedContext finishedContext = nullptr, |
690 | std::string_view label = {}); |
691 | |
692 | /** |
693 | * If possible, updates a backend texture to be filled to a particular color. The client should |
694 | * check the return value to see if the update was successful. The client can pass in a |
695 | * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be |
696 | * deleted. The client is required to call `submit` to send the upload work to the gpu. |
697 | * The finishedProc will always get called even if we failed to update the GrBackendTexture. |
698 | * For the Vulkan backend after a successful update the layout of the created VkImage will be: |
699 | * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL |
700 | */ |
701 | bool updateBackendTexture(const GrBackendTexture&, |
702 | const SkColor4f& color, |
703 | GrGpuFinishedProc finishedProc, |
704 | GrGpuFinishedContext finishedContext); |
705 | |
706 | /** |
707 | * If possible, updates a backend texture to be filled to a particular color. The data in |
708 | * GrBackendTexture and passed in color is interpreted with respect to the passed in |
709 | * SkColorType. The client should check the return value to see if the update was successful. |
710 | * The client can pass in a finishedProc to be notified when the data has been uploaded by the |
711 | * gpu and the texture can be deleted. The client is required to call `submit` to send |
712 | * the upload work to the gpu. The finishedProc will always get called even if we failed to |
713 | * update the GrBackendTexture. |
714 | * For the Vulkan backend after a successful update the layout of the created VkImage will be: |
715 | * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL |
716 | */ |
717 | bool updateBackendTexture(const GrBackendTexture&, |
718 | SkColorType skColorType, |
719 | const SkColor4f& color, |
720 | GrGpuFinishedProc finishedProc, |
721 | GrGpuFinishedContext finishedContext); |
722 | |
723 | /** |
724 | * If possible, updates a backend texture filled with the provided pixmap data. The client |
725 | * should check the return value to see if the update was successful. The client can pass in a |
726 | * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be |
727 | * deleted. The client is required to call `submit` to send the upload work to the gpu. |
728 | * The finishedProc will always get called even if we failed to create the GrBackendTexture. |
729 | * The backend texture must be compatible with the provided pixmap(s). Compatible, in this case, |
730 | * means that the backend format is compatible with the base pixmap's colortype. The src data |
731 | * can be deleted when this call returns. |
732 | * If the backend texture is mip mapped, the data for all the mipmap levels must be provided. |
733 | * In the mipmapped case all the colortypes of the provided pixmaps must be the same. |
734 | * Additionally, all the miplevels must be sized correctly (please see |
735 | * SkMipmap::ComputeLevelSize and ComputeLevelCount). The GrSurfaceOrigin controls whether the |
736 | * pixmap data is vertically flipped in the texture. |
737 | * Note: the pixmap's alphatypes and colorspaces are ignored. |
738 | * For the Vulkan backend after a successful update the layout of the created VkImage will be: |
739 | * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL |
740 | */ |
741 | bool updateBackendTexture(const GrBackendTexture&, |
742 | const SkPixmap srcData[], |
743 | int numLevels, |
744 | GrSurfaceOrigin = kTopLeft_GrSurfaceOrigin, |
745 | GrGpuFinishedProc finishedProc = nullptr, |
746 | GrGpuFinishedContext finishedContext = nullptr); |
747 | |
748 | /** |
749 | * Convenience version of updateBackendTexture that takes just a base level pixmap. |
750 | */ |
751 | bool updateBackendTexture(const GrBackendTexture& texture, |
752 | const SkPixmap& srcData, |
753 | GrSurfaceOrigin textureOrigin = kTopLeft_GrSurfaceOrigin, |
754 | GrGpuFinishedProc finishedProc = nullptr, |
755 | GrGpuFinishedContext finishedContext = nullptr) { |
756 | return this->updateBackendTexture(texture, |
757 | srcData: &srcData, |
758 | numLevels: 1, |
759 | textureOrigin, |
760 | finishedProc, |
761 | finishedContext); |
762 | } |
763 | |
764 | // Deprecated version that does not take origin and assumes top-left. |
765 | bool updateBackendTexture(const GrBackendTexture& texture, |
766 | const SkPixmap srcData[], |
767 | int numLevels, |
768 | GrGpuFinishedProc finishedProc, |
769 | GrGpuFinishedContext finishedContext); |
770 | |
771 | /** |
772 | * Retrieve the GrBackendFormat for a given SkTextureCompressionType. This is |
773 | * guaranteed to match the backend format used by the following |
774 | * createCompressedBackendTexture methods that take a CompressionType. |
775 | * |
776 | * The caller should check that the returned format is valid. |
777 | */ |
778 | using GrRecordingContext::compressedBackendFormat; |
779 | |
780 | /** |
781 | *If possible, create a compressed backend texture initialized to a particular color. The |
782 | * client should ensure that the returned backend texture is valid. The client can pass in a |
783 | * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be |
784 | * deleted. The client is required to call `submit` to send the upload work to the gpu. |
785 | * The finishedProc will always get called even if we failed to create the GrBackendTexture. |
786 | * For the Vulkan backend the layout of the created VkImage will be: |
787 | * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL |
788 | */ |
789 | GrBackendTexture createCompressedBackendTexture(int width, int height, |
790 | const GrBackendFormat&, |
791 | const SkColor4f& color, |
792 | GrMipmapped, |
793 | GrProtected = GrProtected::kNo, |
794 | GrGpuFinishedProc finishedProc = nullptr, |
795 | GrGpuFinishedContext finishedContext = nullptr); |
796 | |
797 | GrBackendTexture createCompressedBackendTexture(int width, int height, |
798 | SkTextureCompressionType, |
799 | const SkColor4f& color, |
800 | GrMipmapped, |
801 | GrProtected = GrProtected::kNo, |
802 | GrGpuFinishedProc finishedProc = nullptr, |
803 | GrGpuFinishedContext finishedContext = nullptr); |
804 | |
805 | /** |
806 | * If possible, create a backend texture initialized with the provided raw data. The client |
807 | * should ensure that the returned backend texture is valid. The client can pass in a |
808 | * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be |
809 | * deleted. The client is required to call `submit` to send the upload work to the gpu. |
810 | * The finishedProc will always get called even if we failed to create the GrBackendTexture |
811 | * If numLevels is 1 a non-mipmapped texture will result. If a mipmapped texture is desired |
812 | * the data for all the mipmap levels must be provided. Additionally, all the miplevels |
813 | * must be sized correctly (please see SkMipmap::ComputeLevelSize and ComputeLevelCount). |
814 | * For the Vulkan backend the layout of the created VkImage will be: |
815 | * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL |
816 | */ |
817 | GrBackendTexture createCompressedBackendTexture(int width, int height, |
818 | const GrBackendFormat&, |
819 | const void* data, size_t dataSize, |
820 | GrMipmapped, |
821 | GrProtected = GrProtected::kNo, |
822 | GrGpuFinishedProc finishedProc = nullptr, |
823 | GrGpuFinishedContext finishedContext = nullptr); |
824 | |
825 | GrBackendTexture createCompressedBackendTexture(int width, int height, |
826 | SkTextureCompressionType, |
827 | const void* data, size_t dataSize, |
828 | GrMipmapped, |
829 | GrProtected = GrProtected::kNo, |
830 | GrGpuFinishedProc finishedProc = nullptr, |
831 | GrGpuFinishedContext finishedContext = nullptr); |
832 | |
833 | /** |
834 | * If possible, updates a backend texture filled with the provided color. If the texture is |
835 | * mipmapped, all levels of the mip chain will be updated to have the supplied color. The client |
836 | * should check the return value to see if the update was successful. The client can pass in a |
837 | * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be |
838 | * deleted. The client is required to call `submit` to send the upload work to the gpu. |
839 | * The finishedProc will always get called even if we failed to create the GrBackendTexture. |
840 | * For the Vulkan backend after a successful update the layout of the created VkImage will be: |
841 | * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL |
842 | */ |
843 | bool updateCompressedBackendTexture(const GrBackendTexture&, |
844 | const SkColor4f& color, |
845 | GrGpuFinishedProc finishedProc, |
846 | GrGpuFinishedContext finishedContext); |
847 | |
848 | /** |
849 | * If possible, updates a backend texture filled with the provided raw data. The client |
850 | * should check the return value to see if the update was successful. The client can pass in a |
851 | * finishedProc to be notified when the data has been uploaded by the gpu and the texture can be |
852 | * deleted. The client is required to call `submit` to send the upload work to the gpu. |
853 | * The finishedProc will always get called even if we failed to create the GrBackendTexture. |
854 | * If a mipmapped texture is passed in, the data for all the mipmap levels must be provided. |
855 | * Additionally, all the miplevels must be sized correctly (please see |
856 | * SkMipMap::ComputeLevelSize and ComputeLevelCount). |
857 | * For the Vulkan backend after a successful update the layout of the created VkImage will be: |
858 | * VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL |
859 | */ |
860 | bool updateCompressedBackendTexture(const GrBackendTexture&, |
861 | const void* data, |
862 | size_t dataSize, |
863 | GrGpuFinishedProc finishedProc, |
864 | GrGpuFinishedContext finishedContext); |
865 | |
866 | /** |
867 | * Updates the state of the GrBackendTexture/RenderTarget to have the passed in |
868 | * skgpu::MutableTextureState. All objects that wrap the backend surface (i.e. SkSurfaces and |
869 | * SkImages) will also be aware of this state change. This call does not submit the state change |
870 | * to the gpu, but requires the client to call `submit` to send it to the GPU. The work |
871 | * for this call is ordered linearly with all other calls that require GrContext::submit to be |
872 | * called (e.g updateBackendTexture and flush). If finishedProc is not null then it will be |
873 | * called with finishedContext after the state transition is known to have occurred on the GPU. |
874 | * |
875 | * See skgpu::MutableTextureState to see what state can be set via this call. |
876 | * |
877 | * If the backend API is Vulkan, the caller can set the skgpu::MutableTextureState's |
878 | * VkImageLayout to VK_IMAGE_LAYOUT_UNDEFINED or queueFamilyIndex to VK_QUEUE_FAMILY_IGNORED to |
879 | * tell Skia to not change those respective states. |
880 | * |
881 | * If previousState is not null and this returns true, then Skia will have filled in |
882 | * previousState to have the values of the state before this call. |
883 | */ |
884 | bool setBackendTextureState(const GrBackendTexture&, |
885 | const skgpu::MutableTextureState&, |
886 | skgpu::MutableTextureState* previousState = nullptr, |
887 | GrGpuFinishedProc finishedProc = nullptr, |
888 | GrGpuFinishedContext finishedContext = nullptr); |
889 | bool setBackendRenderTargetState(const GrBackendRenderTarget&, |
890 | const skgpu::MutableTextureState&, |
891 | skgpu::MutableTextureState* previousState = nullptr, |
892 | GrGpuFinishedProc finishedProc = nullptr, |
893 | GrGpuFinishedContext finishedContext = nullptr); |
894 | |
895 | void deleteBackendTexture(const GrBackendTexture&); |
896 | |
897 | // This interface allows clients to pre-compile shaders and populate the runtime program cache. |
898 | // The key and data blobs should be the ones passed to the PersistentCache, in SkSL format. |
899 | // |
900 | // Steps to use this API: |
901 | // |
902 | // 1) Create a GrDirectContext as normal, but set fPersistentCache on GrContextOptions to |
903 | // something that will save the cached shader blobs. Set fShaderCacheStrategy to kSkSL. This |
904 | // will ensure that the blobs are SkSL, and are suitable for pre-compilation. |
905 | // 2) Run your application, and save all of the key/data pairs that are fed to the cache. |
906 | // |
907 | // 3) Switch over to shipping your application. Include the key/data pairs from above. |
908 | // 4) At startup (or any convenient time), call precompileShader for each key/data pair. |
909 | // This will compile the SkSL to create a GL program, and populate the runtime cache. |
910 | // |
911 | // This is only guaranteed to work if the context/device used in step #2 are created in the |
912 | // same way as the one used in step #4, and the same GrContextOptions are specified. |
913 | // Using cached shader blobs on a different device or driver are undefined. |
914 | bool precompileShader(const SkData& key, const SkData& data); |
915 | |
916 | #ifdef SK_ENABLE_DUMP_GPU |
917 | /** Returns a string with detailed information about the context & GPU, in JSON format. */ |
918 | SkString dump() const; |
919 | #endif |
920 | |
921 | class DirectContextID { |
922 | public: |
923 | static GrDirectContext::DirectContextID Next(); |
924 | |
925 | DirectContextID() : fID(SK_InvalidUniqueID) {} |
926 | |
927 | bool operator==(const DirectContextID& that) const { return fID == that.fID; } |
928 | bool operator!=(const DirectContextID& that) const { return !(*this == that); } |
929 | |
930 | void makeInvalid() { fID = SK_InvalidUniqueID; } |
931 | bool isValid() const { return fID != SK_InvalidUniqueID; } |
932 | |
933 | private: |
934 | constexpr DirectContextID(uint32_t id) : fID(id) {} |
935 | uint32_t fID; |
936 | }; |
937 | |
938 | DirectContextID directContextID() const { return fDirectContextID; } |
939 | |
940 | // Provides access to functions that aren't part of the public API. |
941 | GrDirectContextPriv priv(); |
942 | const GrDirectContextPriv priv() const; // NOLINT(readability-const-return-type) |
943 | |
944 | protected: |
945 | GrDirectContext(GrBackendApi backend, const GrContextOptions& options); |
946 | |
947 | bool init() override; |
948 | |
949 | GrAtlasManager* onGetAtlasManager() { return fAtlasManager.get(); } |
950 | #if !defined(SK_ENABLE_OPTIMIZE_SIZE) |
951 | skgpu::ganesh::SmallPathAtlasMgr* onGetSmallPathAtlasMgr(); |
952 | #endif |
953 | |
954 | GrDirectContext* asDirectContext() override { return this; } |
955 | |
956 | private: |
957 | // This call will make sure out work on the GPU is finished and will execute any outstanding |
958 | // asynchronous work (e.g. calling finished procs, freeing resources, etc.) related to the |
959 | // outstanding work on the gpu. The main use currently for this function is when tearing down or |
960 | // abandoning the context. |
961 | // |
962 | // When we finish up work on the GPU it could trigger callbacks to the client. In the case we |
963 | // are abandoning the context we don't want the client to be able to use the GrDirectContext to |
964 | // issue more commands during the callback. Thus before calling this function we set the |
965 | // GrDirectContext's state to be abandoned. However, we need to be able to get by the abaonded |
966 | // check in the call to know that it is safe to execute this. The shouldExecuteWhileAbandoned |
967 | // bool is used for this signal. |
968 | void syncAllOutstandingGpuWork(bool shouldExecuteWhileAbandoned); |
969 | |
970 | // This delete callback needs to be the first thing on the GrDirectContext so that it is the |
971 | // last thing destroyed. The callback may signal the client to clean up things that may need |
972 | // to survive the lifetime of some of the other objects on the GrDirectCotnext. So make sure |
973 | // we don't call it until all else has been destroyed. |
974 | class DeleteCallbackHelper { |
975 | public: |
976 | DeleteCallbackHelper(GrDirectContextDestroyedContext context, |
977 | GrDirectContextDestroyedProc proc) |
978 | : fContext(context), fProc(proc) {} |
979 | |
980 | ~DeleteCallbackHelper() { |
981 | if (fProc) { |
982 | fProc(fContext); |
983 | } |
984 | } |
985 | |
986 | private: |
987 | GrDirectContextDestroyedContext fContext; |
988 | GrDirectContextDestroyedProc fProc; |
989 | }; |
990 | std::unique_ptr<DeleteCallbackHelper> fDeleteCallbackHelper; |
991 | |
992 | const DirectContextID fDirectContextID; |
993 | // fTaskGroup must appear before anything that uses it (e.g. fGpu), so that it is destroyed |
994 | // after all of its users. Clients of fTaskGroup will generally want to ensure that they call |
995 | // wait() on it as they are being destroyed, to avoid the possibility of pending tasks being |
996 | // invoked after objects they depend upon have already been destroyed. |
997 | std::unique_ptr<SkTaskGroup> fTaskGroup; |
998 | std::unique_ptr<sktext::gpu::StrikeCache> fStrikeCache; |
999 | sk_sp<GrGpu> fGpu; |
1000 | std::unique_ptr<GrResourceCache> fResourceCache; |
1001 | std::unique_ptr<GrResourceProvider> fResourceProvider; |
1002 | |
1003 | // This is incremented before we start calling ReleaseProcs from GrSurfaces and decremented |
1004 | // after. A ReleaseProc may trigger code causing another resource to get freed so we to track |
1005 | // the count to know if we in a ReleaseProc at any level. When this is set to a value greated |
1006 | // than zero we will not allow abandonContext calls to be made on the context. |
1007 | int fInsideReleaseProcCnt = 0; |
1008 | |
1009 | bool fDidTestPMConversions; |
1010 | // true if the PM/UPM conversion succeeded; false otherwise |
1011 | bool fPMUPMConversionsRoundTrip; |
1012 | |
1013 | GrContextOptions::PersistentCache* fPersistentCache; |
1014 | |
1015 | std::unique_ptr<GrClientMappedBufferManager> fMappedBufferManager; |
1016 | std::unique_ptr<GrAtlasManager> fAtlasManager; |
1017 | |
1018 | #if !defined(SK_ENABLE_OPTIMIZE_SIZE) |
1019 | std::unique_ptr<skgpu::ganesh::SmallPathAtlasMgr> fSmallPathAtlasMgr; |
1020 | #endif |
1021 | |
1022 | friend class GrDirectContextPriv; |
1023 | }; |
1024 | |
1025 | |
1026 | #endif |
1027 | |