1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | */ |
22 | |
23 | /* amdgpu_amdkfd.h defines the private interface between amdgpu and amdkfd. */ |
24 | |
25 | #ifndef AMDGPU_AMDKFD_H_INCLUDED |
26 | #define AMDGPU_AMDKFD_H_INCLUDED |
27 | |
28 | #include <linux/list.h> |
29 | #include <linux/types.h> |
30 | #include <linux/mm.h> |
31 | #include <linux/kthread.h> |
32 | #include <linux/workqueue.h> |
33 | #include <linux/mmu_notifier.h> |
34 | #include <linux/memremap.h> |
35 | #include <kgd_kfd_interface.h> |
36 | #include "amdgpu_sync.h" |
37 | #include "amdgpu_vm.h" |
38 | #include "amdgpu_xcp.h" |
39 | |
40 | extern uint64_t amdgpu_amdkfd_total_mem_size; |
41 | |
42 | enum TLB_FLUSH_TYPE { |
43 | TLB_FLUSH_LEGACY = 0, |
44 | TLB_FLUSH_LIGHTWEIGHT, |
45 | TLB_FLUSH_HEAVYWEIGHT |
46 | }; |
47 | |
48 | struct amdgpu_device; |
49 | |
50 | enum kfd_mem_attachment_type { |
51 | KFD_MEM_ATT_SHARED, /* Share kgd_mem->bo or another attachment's */ |
52 | KFD_MEM_ATT_USERPTR, /* SG bo to DMA map pages from a userptr bo */ |
53 | KFD_MEM_ATT_DMABUF, /* DMAbuf to DMA map TTM BOs */ |
54 | KFD_MEM_ATT_SG /* Tag to DMA map SG BOs */ |
55 | }; |
56 | |
57 | struct kfd_mem_attachment { |
58 | struct list_head list; |
59 | enum kfd_mem_attachment_type type; |
60 | bool is_mapped; |
61 | struct amdgpu_bo_va *bo_va; |
62 | struct amdgpu_device *adev; |
63 | uint64_t va; |
64 | uint64_t pte_flags; |
65 | }; |
66 | |
67 | struct kgd_mem { |
68 | struct mutex lock; |
69 | struct amdgpu_bo *bo; |
70 | struct dma_buf *dmabuf; |
71 | struct hmm_range *range; |
72 | struct list_head attachments; |
73 | /* protected by amdkfd_process_info.lock */ |
74 | struct list_head validate_list; |
75 | uint32_t domain; |
76 | unsigned int mapped_to_gpu_memory; |
77 | uint64_t va; |
78 | |
79 | uint32_t alloc_flags; |
80 | |
81 | uint32_t invalid; |
82 | struct amdkfd_process_info *process_info; |
83 | |
84 | struct amdgpu_sync sync; |
85 | |
86 | bool aql_queue; |
87 | bool is_imported; |
88 | }; |
89 | |
90 | /* KFD Memory Eviction */ |
91 | struct amdgpu_amdkfd_fence { |
92 | struct dma_fence base; |
93 | struct mm_struct *mm; |
94 | spinlock_t lock; |
95 | char timeline_name[TASK_COMM_LEN]; |
96 | struct svm_range_bo *svm_bo; |
97 | }; |
98 | |
99 | struct amdgpu_kfd_dev { |
100 | struct kfd_dev *dev; |
101 | int64_t vram_used[MAX_XCP]; |
102 | uint64_t vram_used_aligned[MAX_XCP]; |
103 | bool init_complete; |
104 | struct work_struct reset_work; |
105 | |
106 | /* HMM page migration MEMORY_DEVICE_PRIVATE mapping */ |
107 | struct dev_pagemap pgmap; |
108 | }; |
109 | |
110 | enum kgd_engine_type { |
111 | KGD_ENGINE_PFP = 1, |
112 | KGD_ENGINE_ME, |
113 | KGD_ENGINE_CE, |
114 | KGD_ENGINE_MEC1, |
115 | KGD_ENGINE_MEC2, |
116 | KGD_ENGINE_RLC, |
117 | KGD_ENGINE_SDMA1, |
118 | KGD_ENGINE_SDMA2, |
119 | KGD_ENGINE_MAX |
120 | }; |
121 | |
122 | |
123 | struct amdkfd_process_info { |
124 | /* List head of all VMs that belong to a KFD process */ |
125 | struct list_head vm_list_head; |
126 | /* List head for all KFD BOs that belong to a KFD process. */ |
127 | struct list_head kfd_bo_list; |
128 | /* List of userptr BOs that are valid or invalid */ |
129 | struct list_head userptr_valid_list; |
130 | struct list_head userptr_inval_list; |
131 | /* Lock to protect kfd_bo_list */ |
132 | struct mutex lock; |
133 | |
134 | /* Number of VMs */ |
135 | unsigned int n_vms; |
136 | /* Eviction Fence */ |
137 | struct amdgpu_amdkfd_fence *eviction_fence; |
138 | |
139 | /* MMU-notifier related fields */ |
140 | struct mutex notifier_lock; |
141 | uint32_t evicted_bos; |
142 | struct delayed_work restore_userptr_work; |
143 | struct pid *pid; |
144 | bool block_mmu_notifications; |
145 | }; |
146 | |
147 | int amdgpu_amdkfd_init(void); |
148 | void amdgpu_amdkfd_fini(void); |
149 | |
150 | void amdgpu_amdkfd_suspend(struct amdgpu_device *adev, bool run_pm); |
151 | int amdgpu_amdkfd_resume(struct amdgpu_device *adev, bool run_pm); |
152 | void amdgpu_amdkfd_interrupt(struct amdgpu_device *adev, |
153 | const void *ih_ring_entry); |
154 | void amdgpu_amdkfd_device_probe(struct amdgpu_device *adev); |
155 | void amdgpu_amdkfd_device_init(struct amdgpu_device *adev); |
156 | void amdgpu_amdkfd_device_fini_sw(struct amdgpu_device *adev); |
157 | int amdgpu_amdkfd_check_and_lock_kfd(struct amdgpu_device *adev); |
158 | void amdgpu_amdkfd_unlock_kfd(struct amdgpu_device *adev); |
159 | int amdgpu_amdkfd_submit_ib(struct amdgpu_device *adev, |
160 | enum kgd_engine_type engine, |
161 | uint32_t vmid, uint64_t gpu_addr, |
162 | uint32_t *ib_cmd, uint32_t ib_len); |
163 | void amdgpu_amdkfd_set_compute_idle(struct amdgpu_device *adev, bool idle); |
164 | bool amdgpu_amdkfd_have_atomics_support(struct amdgpu_device *adev); |
165 | int amdgpu_amdkfd_flush_gpu_tlb_vmid(struct amdgpu_device *adev, |
166 | uint16_t vmid); |
167 | int amdgpu_amdkfd_flush_gpu_tlb_pasid(struct amdgpu_device *adev, |
168 | uint16_t pasid, enum TLB_FLUSH_TYPE flush_type, |
169 | uint32_t inst); |
170 | |
171 | bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid); |
172 | |
173 | int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev); |
174 | |
175 | int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev); |
176 | |
177 | void amdgpu_amdkfd_gpu_reset(struct amdgpu_device *adev); |
178 | |
179 | int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev, |
180 | int queue_bit); |
181 | |
182 | struct amdgpu_amdkfd_fence *amdgpu_amdkfd_fence_create(u64 context, |
183 | struct mm_struct *mm, |
184 | struct svm_range_bo *svm_bo); |
185 | #if defined(CONFIG_DEBUG_FS) |
186 | int kfd_debugfs_kfd_mem_limits(struct seq_file *m, void *data); |
187 | #endif |
188 | #if IS_ENABLED(CONFIG_HSA_AMD) |
189 | bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm); |
190 | struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f); |
191 | int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo); |
192 | int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, |
193 | unsigned long cur_seq, struct kgd_mem *mem); |
194 | #else |
195 | static inline |
196 | bool amdkfd_fence_check_mm(struct dma_fence *f, struct mm_struct *mm) |
197 | { |
198 | return false; |
199 | } |
200 | |
201 | static inline |
202 | struct amdgpu_amdkfd_fence *to_amdgpu_amdkfd_fence(struct dma_fence *f) |
203 | { |
204 | return NULL; |
205 | } |
206 | |
207 | static inline |
208 | int amdgpu_amdkfd_remove_fence_on_pt_pd_bos(struct amdgpu_bo *bo) |
209 | { |
210 | return 0; |
211 | } |
212 | |
213 | static inline |
214 | int amdgpu_amdkfd_evict_userptr(struct mmu_interval_notifier *mni, |
215 | unsigned long cur_seq, struct kgd_mem *mem) |
216 | { |
217 | return 0; |
218 | } |
219 | #endif |
220 | /* Shared API */ |
221 | int amdgpu_amdkfd_alloc_gtt_mem(struct amdgpu_device *adev, size_t size, |
222 | void **mem_obj, uint64_t *gpu_addr, |
223 | void **cpu_ptr, bool mqd_gfx9); |
224 | void amdgpu_amdkfd_free_gtt_mem(struct amdgpu_device *adev, void *mem_obj); |
225 | int amdgpu_amdkfd_alloc_gws(struct amdgpu_device *adev, size_t size, |
226 | void **mem_obj); |
227 | void amdgpu_amdkfd_free_gws(struct amdgpu_device *adev, void *mem_obj); |
228 | int amdgpu_amdkfd_add_gws_to_process(void *info, void *gws, struct kgd_mem **mem); |
229 | int amdgpu_amdkfd_remove_gws_from_process(void *info, void *mem); |
230 | uint32_t amdgpu_amdkfd_get_fw_version(struct amdgpu_device *adev, |
231 | enum kgd_engine_type type); |
232 | void amdgpu_amdkfd_get_local_mem_info(struct amdgpu_device *adev, |
233 | struct kfd_local_mem_info *mem_info, |
234 | struct amdgpu_xcp *xcp); |
235 | uint64_t amdgpu_amdkfd_get_gpu_clock_counter(struct amdgpu_device *adev); |
236 | |
237 | uint32_t amdgpu_amdkfd_get_max_engine_clock_in_mhz(struct amdgpu_device *adev); |
238 | int amdgpu_amdkfd_get_dmabuf_info(struct amdgpu_device *adev, int dma_buf_fd, |
239 | struct amdgpu_device **dmabuf_adev, |
240 | uint64_t *bo_size, void *metadata_buffer, |
241 | size_t buffer_size, uint32_t *metadata_size, |
242 | uint32_t *flags, int8_t *xcp_id); |
243 | uint8_t amdgpu_amdkfd_get_xgmi_hops_count(struct amdgpu_device *dst, |
244 | struct amdgpu_device *src); |
245 | int amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(struct amdgpu_device *dst, |
246 | struct amdgpu_device *src, |
247 | bool is_min); |
248 | int amdgpu_amdkfd_get_pcie_bandwidth_mbytes(struct amdgpu_device *adev, bool is_min); |
249 | int amdgpu_amdkfd_send_close_event_drain_irq(struct amdgpu_device *adev, |
250 | uint32_t *payload); |
251 | int amdgpu_amdkfd_unmap_hiq(struct amdgpu_device *adev, u32 doorbell_off, |
252 | u32 inst); |
253 | |
254 | /* Read user wptr from a specified user address space with page fault |
255 | * disabled. The memory must be pinned and mapped to the hardware when |
256 | * this is called in hqd_load functions, so it should never fault in |
257 | * the first place. This resolves a circular lock dependency involving |
258 | * four locks, including the DQM lock and mmap_lock. |
259 | */ |
260 | #define read_user_wptr(mmptr, wptr, dst) \ |
261 | ({ \ |
262 | bool valid = false; \ |
263 | if ((mmptr) && (wptr)) { \ |
264 | pagefault_disable(); \ |
265 | if ((mmptr) == current->mm) { \ |
266 | valid = !get_user((dst), (wptr)); \ |
267 | } else if (current->flags & PF_KTHREAD) { \ |
268 | kthread_use_mm(mmptr); \ |
269 | valid = !get_user((dst), (wptr)); \ |
270 | kthread_unuse_mm(mmptr); \ |
271 | } \ |
272 | pagefault_enable(); \ |
273 | } \ |
274 | valid; \ |
275 | }) |
276 | |
277 | /* GPUVM API */ |
278 | #define drm_priv_to_vm(drm_priv) \ |
279 | (&((struct amdgpu_fpriv *) \ |
280 | ((struct drm_file *)(drm_priv))->driver_priv)->vm) |
281 | |
282 | int amdgpu_amdkfd_gpuvm_set_vm_pasid(struct amdgpu_device *adev, |
283 | struct amdgpu_vm *avm, u32 pasid); |
284 | int amdgpu_amdkfd_gpuvm_acquire_process_vm(struct amdgpu_device *adev, |
285 | struct amdgpu_vm *avm, |
286 | void **process_info, |
287 | struct dma_fence **ef); |
288 | void amdgpu_amdkfd_gpuvm_release_process_vm(struct amdgpu_device *adev, |
289 | void *drm_priv); |
290 | uint64_t amdgpu_amdkfd_gpuvm_get_process_page_dir(void *drm_priv); |
291 | size_t amdgpu_amdkfd_get_available_memory(struct amdgpu_device *adev, |
292 | uint8_t xcp_id); |
293 | int amdgpu_amdkfd_gpuvm_alloc_memory_of_gpu( |
294 | struct amdgpu_device *adev, uint64_t va, uint64_t size, |
295 | void *drm_priv, struct kgd_mem **mem, |
296 | uint64_t *offset, uint32_t flags, bool criu_resume); |
297 | int amdgpu_amdkfd_gpuvm_free_memory_of_gpu( |
298 | struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv, |
299 | uint64_t *size); |
300 | int amdgpu_amdkfd_gpuvm_map_memory_to_gpu(struct amdgpu_device *adev, |
301 | struct kgd_mem *mem, void *drm_priv); |
302 | int amdgpu_amdkfd_gpuvm_unmap_memory_from_gpu( |
303 | struct amdgpu_device *adev, struct kgd_mem *mem, void *drm_priv); |
304 | void amdgpu_amdkfd_gpuvm_dmaunmap_mem(struct kgd_mem *mem, void *drm_priv); |
305 | int amdgpu_amdkfd_gpuvm_sync_memory( |
306 | struct amdgpu_device *adev, struct kgd_mem *mem, bool intr); |
307 | int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_mem *mem, |
308 | void **kptr, uint64_t *size); |
309 | void amdgpu_amdkfd_gpuvm_unmap_gtt_bo_from_kernel(struct kgd_mem *mem); |
310 | |
311 | int amdgpu_amdkfd_map_gtt_bo_to_gart(struct amdgpu_device *adev, struct amdgpu_bo *bo); |
312 | |
313 | int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info, |
314 | struct dma_fence **ef); |
315 | int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct amdgpu_device *adev, |
316 | struct kfd_vm_fault_info *info); |
317 | int amdgpu_amdkfd_gpuvm_import_dmabuf(struct amdgpu_device *adev, |
318 | struct dma_buf *dmabuf, |
319 | uint64_t va, void *drm_priv, |
320 | struct kgd_mem **mem, uint64_t *size, |
321 | uint64_t *mmap_offset); |
322 | int amdgpu_amdkfd_gpuvm_export_dmabuf(struct kgd_mem *mem, |
323 | struct dma_buf **dmabuf); |
324 | void amdgpu_amdkfd_debug_mem_fence(struct amdgpu_device *adev); |
325 | int amdgpu_amdkfd_get_tile_config(struct amdgpu_device *adev, |
326 | struct tile_config *config); |
327 | void amdgpu_amdkfd_ras_poison_consumption_handler(struct amdgpu_device *adev, |
328 | bool reset); |
329 | bool amdgpu_amdkfd_bo_mapped_to_dev(struct amdgpu_device *adev, struct kgd_mem *mem); |
330 | void amdgpu_amdkfd_block_mmu_notifications(void *p); |
331 | int amdgpu_amdkfd_criu_resume(void *p); |
332 | bool amdgpu_amdkfd_ras_query_utcl2_poison_status(struct amdgpu_device *adev); |
333 | int amdgpu_amdkfd_reserve_mem_limit(struct amdgpu_device *adev, |
334 | uint64_t size, u32 alloc_flag, int8_t xcp_id); |
335 | void amdgpu_amdkfd_unreserve_mem_limit(struct amdgpu_device *adev, |
336 | uint64_t size, u32 alloc_flag, int8_t xcp_id); |
337 | |
338 | u64 amdgpu_amdkfd_xcp_memory_size(struct amdgpu_device *adev, int xcp_id); |
339 | |
340 | #define KFD_XCP_MEM_ID(adev, xcp_id) \ |
341 | ((adev)->xcp_mgr && (xcp_id) >= 0 ?\ |
342 | (adev)->xcp_mgr->xcp[(xcp_id)].mem_id : -1) |
343 | |
344 | #define KFD_XCP_MEMORY_SIZE(adev, xcp_id) amdgpu_amdkfd_xcp_memory_size((adev), (xcp_id)) |
345 | |
346 | |
347 | #if IS_ENABLED(CONFIG_HSA_AMD) |
348 | void amdgpu_amdkfd_gpuvm_init_mem_limits(void); |
349 | void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, |
350 | struct amdgpu_vm *vm); |
351 | |
352 | /** |
353 | * @amdgpu_amdkfd_release_notify() - Notify KFD when GEM object is released |
354 | * |
355 | * Allows KFD to release its resources associated with the GEM object. |
356 | */ |
357 | void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo); |
358 | void amdgpu_amdkfd_reserve_system_mem(uint64_t size); |
359 | #else |
360 | static inline |
361 | void amdgpu_amdkfd_gpuvm_init_mem_limits(void) |
362 | { |
363 | } |
364 | |
365 | static inline |
366 | void amdgpu_amdkfd_gpuvm_destroy_cb(struct amdgpu_device *adev, |
367 | struct amdgpu_vm *vm) |
368 | { |
369 | } |
370 | |
371 | static inline |
372 | void amdgpu_amdkfd_release_notify(struct amdgpu_bo *bo) |
373 | { |
374 | } |
375 | #endif |
376 | |
377 | #if IS_ENABLED(CONFIG_HSA_AMD_SVM) |
378 | int kgd2kfd_init_zone_device(struct amdgpu_device *adev); |
379 | #else |
380 | static inline |
381 | int kgd2kfd_init_zone_device(struct amdgpu_device *adev) |
382 | { |
383 | return 0; |
384 | } |
385 | #endif |
386 | |
387 | /* KGD2KFD callbacks */ |
388 | int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger); |
389 | int kgd2kfd_resume_mm(struct mm_struct *mm); |
390 | int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm, |
391 | struct dma_fence *fence); |
392 | #if IS_ENABLED(CONFIG_HSA_AMD) |
393 | int kgd2kfd_init(void); |
394 | void kgd2kfd_exit(void); |
395 | struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf); |
396 | bool kgd2kfd_device_init(struct kfd_dev *kfd, |
397 | const struct kgd2kfd_shared_resources *gpu_resources); |
398 | void kgd2kfd_device_exit(struct kfd_dev *kfd); |
399 | void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm); |
400 | int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm); |
401 | int kgd2kfd_pre_reset(struct kfd_dev *kfd); |
402 | int kgd2kfd_post_reset(struct kfd_dev *kfd); |
403 | void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry); |
404 | void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd); |
405 | void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask); |
406 | int kgd2kfd_check_and_lock_kfd(void); |
407 | void kgd2kfd_unlock_kfd(void); |
408 | #else |
409 | static inline int kgd2kfd_init(void) |
410 | { |
411 | return -ENOENT; |
412 | } |
413 | |
414 | static inline void kgd2kfd_exit(void) |
415 | { |
416 | } |
417 | |
418 | static inline |
419 | struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf) |
420 | { |
421 | return NULL; |
422 | } |
423 | |
424 | static inline |
425 | bool kgd2kfd_device_init(struct kfd_dev *kfd, |
426 | const struct kgd2kfd_shared_resources *gpu_resources) |
427 | { |
428 | return false; |
429 | } |
430 | |
431 | static inline void kgd2kfd_device_exit(struct kfd_dev *kfd) |
432 | { |
433 | } |
434 | |
435 | static inline void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm) |
436 | { |
437 | } |
438 | |
439 | static inline int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm) |
440 | { |
441 | return 0; |
442 | } |
443 | |
444 | static inline int kgd2kfd_pre_reset(struct kfd_dev *kfd) |
445 | { |
446 | return 0; |
447 | } |
448 | |
449 | static inline int kgd2kfd_post_reset(struct kfd_dev *kfd) |
450 | { |
451 | return 0; |
452 | } |
453 | |
454 | static inline |
455 | void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry) |
456 | { |
457 | } |
458 | |
459 | static inline |
460 | void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd) |
461 | { |
462 | } |
463 | |
464 | static inline |
465 | void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask) |
466 | { |
467 | } |
468 | |
469 | static inline int kgd2kfd_check_and_lock_kfd(void) |
470 | { |
471 | return 0; |
472 | } |
473 | |
474 | static inline void kgd2kfd_unlock_kfd(void) |
475 | { |
476 | } |
477 | #endif |
478 | #endif /* AMDGPU_AMDKFD_H_INCLUDED */ |
479 | |