1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * Authors: Christian König |
23 | */ |
24 | #ifndef __AMDGPU_VM_H__ |
25 | #define __AMDGPU_VM_H__ |
26 | |
27 | #include <linux/idr.h> |
28 | #include <linux/kfifo.h> |
29 | #include <linux/rbtree.h> |
30 | #include <drm/gpu_scheduler.h> |
31 | #include <drm/drm_file.h> |
32 | #include <drm/ttm/ttm_bo.h> |
33 | #include <linux/sched/mm.h> |
34 | |
35 | #include "amdgpu_sync.h" |
36 | #include "amdgpu_ring.h" |
37 | #include "amdgpu_ids.h" |
38 | |
39 | struct drm_exec; |
40 | |
41 | struct amdgpu_bo_va; |
42 | struct amdgpu_job; |
43 | struct amdgpu_bo_list_entry; |
44 | struct amdgpu_bo_vm; |
45 | struct amdgpu_mem_stats; |
46 | |
47 | /* |
48 | * GPUVM handling |
49 | */ |
50 | |
51 | /* Maximum number of PTEs the hardware can write with one command */ |
52 | #define AMDGPU_VM_MAX_UPDATE_SIZE 0x3FFFF |
53 | |
54 | /* number of entries in page table */ |
55 | #define AMDGPU_VM_PTE_COUNT(adev) (1 << (adev)->vm_manager.block_size) |
56 | |
57 | #define AMDGPU_PTE_VALID (1ULL << 0) |
58 | #define AMDGPU_PTE_SYSTEM (1ULL << 1) |
59 | #define AMDGPU_PTE_SNOOPED (1ULL << 2) |
60 | |
61 | /* RV+ */ |
62 | #define AMDGPU_PTE_TMZ (1ULL << 3) |
63 | |
64 | /* VI only */ |
65 | #define AMDGPU_PTE_EXECUTABLE (1ULL << 4) |
66 | |
67 | #define AMDGPU_PTE_READABLE (1ULL << 5) |
68 | #define AMDGPU_PTE_WRITEABLE (1ULL << 6) |
69 | |
70 | #define AMDGPU_PTE_FRAG(x) ((x & 0x1fULL) << 7) |
71 | |
72 | /* TILED for VEGA10, reserved for older ASICs */ |
73 | #define AMDGPU_PTE_PRT (1ULL << 51) |
74 | |
75 | /* PDE is handled as PTE for VEGA10 */ |
76 | #define AMDGPU_PDE_PTE (1ULL << 54) |
77 | |
78 | #define AMDGPU_PTE_LOG (1ULL << 55) |
79 | |
80 | /* PTE is handled as PDE for VEGA10 (Translate Further) */ |
81 | #define AMDGPU_PTE_TF (1ULL << 56) |
82 | |
83 | /* MALL noalloc for sienna_cichlid, reserved for older ASICs */ |
84 | #define AMDGPU_PTE_NOALLOC (1ULL << 58) |
85 | |
86 | /* PDE Block Fragment Size for VEGA10 */ |
87 | #define AMDGPU_PDE_BFS(a) ((uint64_t)a << 59) |
88 | |
89 | /* Flag combination to set no-retry with TF disabled */ |
90 | #define AMDGPU_VM_NORETRY_FLAGS (AMDGPU_PTE_EXECUTABLE | AMDGPU_PDE_PTE | \ |
91 | AMDGPU_PTE_TF) |
92 | |
93 | /* Flag combination to set no-retry with TF enabled */ |
94 | #define AMDGPU_VM_NORETRY_FLAGS_TF (AMDGPU_PTE_VALID | AMDGPU_PTE_SYSTEM | \ |
95 | AMDGPU_PTE_PRT) |
96 | /* For GFX9 */ |
97 | #define AMDGPU_PTE_MTYPE_VG10(a) ((uint64_t)(a) << 57) |
98 | #define AMDGPU_PTE_MTYPE_VG10_MASK AMDGPU_PTE_MTYPE_VG10(3ULL) |
99 | |
100 | #define AMDGPU_MTYPE_NC 0 |
101 | #define AMDGPU_MTYPE_CC 2 |
102 | |
103 | #define AMDGPU_PTE_DEFAULT_ATC (AMDGPU_PTE_SYSTEM \ |
104 | | AMDGPU_PTE_SNOOPED \ |
105 | | AMDGPU_PTE_EXECUTABLE \ |
106 | | AMDGPU_PTE_READABLE \ |
107 | | AMDGPU_PTE_WRITEABLE \ |
108 | | AMDGPU_PTE_MTYPE_VG10(AMDGPU_MTYPE_CC)) |
109 | |
110 | /* gfx10 */ |
111 | #define AMDGPU_PTE_MTYPE_NV10(a) ((uint64_t)(a) << 48) |
112 | #define AMDGPU_PTE_MTYPE_NV10_MASK AMDGPU_PTE_MTYPE_NV10(7ULL) |
113 | |
114 | /* How to program VM fault handling */ |
115 | #define AMDGPU_VM_FAULT_STOP_NEVER 0 |
116 | #define AMDGPU_VM_FAULT_STOP_FIRST 1 |
117 | #define AMDGPU_VM_FAULT_STOP_ALWAYS 2 |
118 | |
119 | /* Reserve 4MB VRAM for page tables */ |
120 | #define AMDGPU_VM_RESERVED_VRAM (8ULL << 20) |
121 | |
122 | /* |
123 | * max number of VMHUB |
124 | * layout: max 8 GFXHUB + 4 MMHUB0 + 1 MMHUB1 |
125 | */ |
126 | #define AMDGPU_MAX_VMHUBS 13 |
127 | #define AMDGPU_GFXHUB_START 0 |
128 | #define AMDGPU_MMHUB0_START 8 |
129 | #define AMDGPU_MMHUB1_START 12 |
130 | #define AMDGPU_GFXHUB(x) (AMDGPU_GFXHUB_START + (x)) |
131 | #define AMDGPU_MMHUB0(x) (AMDGPU_MMHUB0_START + (x)) |
132 | #define AMDGPU_MMHUB1(x) (AMDGPU_MMHUB1_START + (x)) |
133 | |
134 | #define AMDGPU_IS_GFXHUB(x) ((x) >= AMDGPU_GFXHUB_START && (x) < AMDGPU_MMHUB0_START) |
135 | #define AMDGPU_IS_MMHUB0(x) ((x) >= AMDGPU_MMHUB0_START && (x) < AMDGPU_MMHUB1_START) |
136 | #define AMDGPU_IS_MMHUB1(x) ((x) >= AMDGPU_MMHUB1_START && (x) < AMDGPU_MAX_VMHUBS) |
137 | |
138 | /* Reserve 2MB at top/bottom of address space for kernel use */ |
139 | #define AMDGPU_VA_RESERVED_SIZE (2ULL << 20) |
140 | |
141 | /* See vm_update_mode */ |
142 | #define AMDGPU_VM_USE_CPU_FOR_GFX (1 << 0) |
143 | #define AMDGPU_VM_USE_CPU_FOR_COMPUTE (1 << 1) |
144 | |
145 | /* VMPT level enumerate, and the hiberachy is: |
146 | * PDB2->PDB1->PDB0->PTB |
147 | */ |
148 | enum amdgpu_vm_level { |
149 | AMDGPU_VM_PDB2, |
150 | AMDGPU_VM_PDB1, |
151 | AMDGPU_VM_PDB0, |
152 | AMDGPU_VM_PTB |
153 | }; |
154 | |
155 | /* base structure for tracking BO usage in a VM */ |
156 | struct amdgpu_vm_bo_base { |
157 | /* constant after initialization */ |
158 | struct amdgpu_vm *vm; |
159 | struct amdgpu_bo *bo; |
160 | |
161 | /* protected by bo being reserved */ |
162 | struct amdgpu_vm_bo_base *next; |
163 | |
164 | /* protected by spinlock */ |
165 | struct list_head vm_status; |
166 | |
167 | /* protected by the BO being reserved */ |
168 | bool moved; |
169 | }; |
170 | |
171 | /* provided by hw blocks that can write ptes, e.g., sdma */ |
172 | struct amdgpu_vm_pte_funcs { |
173 | /* number of dw to reserve per operation */ |
174 | unsigned copy_pte_num_dw; |
175 | |
176 | /* copy pte entries from GART */ |
177 | void (*copy_pte)(struct amdgpu_ib *ib, |
178 | uint64_t pe, uint64_t src, |
179 | unsigned count); |
180 | |
181 | /* write pte one entry at a time with addr mapping */ |
182 | void (*write_pte)(struct amdgpu_ib *ib, uint64_t pe, |
183 | uint64_t value, unsigned count, |
184 | uint32_t incr); |
185 | /* for linear pte/pde updates without addr mapping */ |
186 | void (*set_pte_pde)(struct amdgpu_ib *ib, |
187 | uint64_t pe, |
188 | uint64_t addr, unsigned count, |
189 | uint32_t incr, uint64_t flags); |
190 | }; |
191 | |
192 | struct amdgpu_task_info { |
193 | char process_name[TASK_COMM_LEN]; |
194 | char task_name[TASK_COMM_LEN]; |
195 | pid_t pid; |
196 | pid_t tgid; |
197 | }; |
198 | |
199 | /** |
200 | * struct amdgpu_vm_update_params |
201 | * |
202 | * Encapsulate some VM table update parameters to reduce |
203 | * the number of function parameters |
204 | * |
205 | */ |
206 | struct amdgpu_vm_update_params { |
207 | |
208 | /** |
209 | * @adev: amdgpu device we do this update for |
210 | */ |
211 | struct amdgpu_device *adev; |
212 | |
213 | /** |
214 | * @vm: optional amdgpu_vm we do this update for |
215 | */ |
216 | struct amdgpu_vm *vm; |
217 | |
218 | /** |
219 | * @immediate: if changes should be made immediately |
220 | */ |
221 | bool immediate; |
222 | |
223 | /** |
224 | * @unlocked: true if the root BO is not locked |
225 | */ |
226 | bool unlocked; |
227 | |
228 | /** |
229 | * @pages_addr: |
230 | * |
231 | * DMA addresses to use for mapping |
232 | */ |
233 | dma_addr_t *pages_addr; |
234 | |
235 | /** |
236 | * @job: job to used for hw submission |
237 | */ |
238 | struct amdgpu_job *job; |
239 | |
240 | /** |
241 | * @num_dw_left: number of dw left for the IB |
242 | */ |
243 | unsigned int num_dw_left; |
244 | |
245 | /** |
246 | * @table_freed: return true if page table is freed when updating |
247 | */ |
248 | bool table_freed; |
249 | |
250 | /** |
251 | * @allow_override: true for memory that is not uncached: allows MTYPE |
252 | * to be overridden for NUMA local memory. |
253 | */ |
254 | bool allow_override; |
255 | }; |
256 | |
257 | struct amdgpu_vm_update_funcs { |
258 | int (*map_table)(struct amdgpu_bo_vm *bo); |
259 | int (*prepare)(struct amdgpu_vm_update_params *p, struct dma_resv *resv, |
260 | enum amdgpu_sync_mode sync_mode); |
261 | int (*update)(struct amdgpu_vm_update_params *p, |
262 | struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr, |
263 | unsigned count, uint32_t incr, uint64_t flags); |
264 | int (*commit)(struct amdgpu_vm_update_params *p, |
265 | struct dma_fence **fence); |
266 | }; |
267 | |
268 | struct amdgpu_vm_fault_info { |
269 | /* fault address */ |
270 | uint64_t addr; |
271 | /* fault status register */ |
272 | uint32_t status; |
273 | /* which vmhub? gfxhub, mmhub, etc. */ |
274 | unsigned int vmhub; |
275 | }; |
276 | |
277 | struct amdgpu_vm { |
278 | /* tree of virtual addresses mapped */ |
279 | struct rb_root_cached va; |
280 | |
281 | /* Lock to prevent eviction while we are updating page tables |
282 | * use vm_eviction_lock/unlock(vm) |
283 | */ |
284 | struct mutex eviction_lock; |
285 | bool evicting; |
286 | unsigned int saved_flags; |
287 | |
288 | /* Lock to protect vm_bo add/del/move on all lists of vm */ |
289 | spinlock_t status_lock; |
290 | |
291 | /* BOs who needs a validation */ |
292 | struct list_head evicted; |
293 | |
294 | /* PT BOs which relocated and their parent need an update */ |
295 | struct list_head relocated; |
296 | |
297 | /* per VM BOs moved, but not yet updated in the PT */ |
298 | struct list_head moved; |
299 | |
300 | /* All BOs of this VM not currently in the state machine */ |
301 | struct list_head idle; |
302 | |
303 | /* regular invalidated BOs, but not yet updated in the PT */ |
304 | struct list_head invalidated; |
305 | |
306 | /* BO mappings freed, but not yet updated in the PT */ |
307 | struct list_head freed; |
308 | |
309 | /* BOs which are invalidated, has been updated in the PTs */ |
310 | struct list_head done; |
311 | |
312 | /* PT BOs scheduled to free and fill with zero if vm_resv is not hold */ |
313 | struct list_head pt_freed; |
314 | struct work_struct pt_free_work; |
315 | |
316 | /* contains the page directory */ |
317 | struct amdgpu_vm_bo_base root; |
318 | struct dma_fence *last_update; |
319 | |
320 | /* Scheduler entities for page table updates */ |
321 | struct drm_sched_entity immediate; |
322 | struct drm_sched_entity delayed; |
323 | |
324 | /* Last finished delayed update */ |
325 | atomic64_t tlb_seq; |
326 | struct dma_fence *last_tlb_flush; |
327 | |
328 | /* How many times we had to re-generate the page tables */ |
329 | uint64_t generation; |
330 | |
331 | /* Last unlocked submission to the scheduler entities */ |
332 | struct dma_fence *last_unlocked; |
333 | |
334 | unsigned int pasid; |
335 | bool reserved_vmid[AMDGPU_MAX_VMHUBS]; |
336 | |
337 | /* Flag to indicate if VM tables are updated by CPU or GPU (SDMA) */ |
338 | bool use_cpu_for_update; |
339 | |
340 | /* Functions to use for VM table updates */ |
341 | const struct amdgpu_vm_update_funcs *update_funcs; |
342 | |
343 | /* Flag to indicate ATS support from PTE for GFX9 */ |
344 | bool pte_support_ats; |
345 | |
346 | /* Up to 128 pending retry page faults */ |
347 | DECLARE_KFIFO(faults, u64, 128); |
348 | |
349 | /* Points to the KFD process VM info */ |
350 | struct amdkfd_process_info *process_info; |
351 | |
352 | /* List node in amdkfd_process_info.vm_list_head */ |
353 | struct list_head vm_list_node; |
354 | |
355 | /* Valid while the PD is reserved or fenced */ |
356 | uint64_t pd_phys_addr; |
357 | |
358 | /* Some basic info about the task */ |
359 | struct amdgpu_task_info task_info; |
360 | |
361 | /* Store positions of group of BOs */ |
362 | struct ttm_lru_bulk_move lru_bulk_move; |
363 | /* Flag to indicate if VM is used for compute */ |
364 | bool is_compute_context; |
365 | |
366 | /* Memory partition number, -1 means any partition */ |
367 | int8_t mem_id; |
368 | |
369 | /* cached fault info */ |
370 | struct amdgpu_vm_fault_info fault_info; |
371 | }; |
372 | |
373 | struct amdgpu_vm_manager { |
374 | /* Handling of VMIDs */ |
375 | struct amdgpu_vmid_mgr id_mgr[AMDGPU_MAX_VMHUBS]; |
376 | unsigned int first_kfd_vmid; |
377 | bool concurrent_flush; |
378 | |
379 | /* Handling of VM fences */ |
380 | u64 fence_context; |
381 | unsigned seqno[AMDGPU_MAX_RINGS]; |
382 | |
383 | uint64_t max_pfn; |
384 | uint32_t num_level; |
385 | uint32_t block_size; |
386 | uint32_t fragment_size; |
387 | enum amdgpu_vm_level root_level; |
388 | /* vram base address for page table entry */ |
389 | u64 vram_base_offset; |
390 | /* vm pte handling */ |
391 | const struct amdgpu_vm_pte_funcs *vm_pte_funcs; |
392 | struct drm_gpu_scheduler *vm_pte_scheds[AMDGPU_MAX_RINGS]; |
393 | unsigned vm_pte_num_scheds; |
394 | struct amdgpu_ring *page_fault; |
395 | |
396 | /* partial resident texture handling */ |
397 | spinlock_t prt_lock; |
398 | atomic_t num_prt_users; |
399 | |
400 | /* controls how VM page tables are updated for Graphics and Compute. |
401 | * BIT0[= 0] Graphics updated by SDMA [= 1] by CPU |
402 | * BIT1[= 0] Compute updated by SDMA [= 1] by CPU |
403 | */ |
404 | int vm_update_mode; |
405 | |
406 | /* PASID to VM mapping, will be used in interrupt context to |
407 | * look up VM of a page fault |
408 | */ |
409 | struct xarray pasids; |
410 | }; |
411 | |
412 | struct amdgpu_bo_va_mapping; |
413 | |
414 | #define amdgpu_vm_copy_pte(adev, ib, pe, src, count) ((adev)->vm_manager.vm_pte_funcs->copy_pte((ib), (pe), (src), (count))) |
415 | #define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr))) |
416 | #define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags))) |
417 | |
418 | extern const struct amdgpu_vm_update_funcs amdgpu_vm_cpu_funcs; |
419 | extern const struct amdgpu_vm_update_funcs amdgpu_vm_sdma_funcs; |
420 | |
421 | void amdgpu_vm_manager_init(struct amdgpu_device *adev); |
422 | void amdgpu_vm_manager_fini(struct amdgpu_device *adev); |
423 | |
424 | int amdgpu_vm_set_pasid(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
425 | u32 pasid); |
426 | |
427 | long amdgpu_vm_wait_idle(struct amdgpu_vm *vm, long timeout); |
428 | int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm, int32_t xcp_id); |
429 | int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); |
430 | void amdgpu_vm_release_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm); |
431 | void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm); |
432 | int amdgpu_vm_lock_pd(struct amdgpu_vm *vm, struct drm_exec *exec, |
433 | unsigned int num_fences); |
434 | bool amdgpu_vm_ready(struct amdgpu_vm *vm); |
435 | uint64_t amdgpu_vm_generation(struct amdgpu_device *adev, struct amdgpu_vm *vm); |
436 | int amdgpu_vm_validate_pt_bos(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
437 | int (*callback)(void *p, struct amdgpu_bo *bo), |
438 | void *param); |
439 | int amdgpu_vm_flush(struct amdgpu_ring *ring, struct amdgpu_job *job, bool need_pipe_sync); |
440 | int amdgpu_vm_update_pdes(struct amdgpu_device *adev, |
441 | struct amdgpu_vm *vm, bool immediate); |
442 | int amdgpu_vm_clear_freed(struct amdgpu_device *adev, |
443 | struct amdgpu_vm *vm, |
444 | struct dma_fence **fence); |
445 | int amdgpu_vm_handle_moved(struct amdgpu_device *adev, |
446 | struct amdgpu_vm *vm, |
447 | struct ww_acquire_ctx *ticket); |
448 | void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base, |
449 | struct amdgpu_vm *vm, struct amdgpu_bo *bo); |
450 | int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
451 | bool immediate, bool unlocked, bool flush_tlb, bool allow_override, |
452 | struct dma_resv *resv, uint64_t start, uint64_t last, |
453 | uint64_t flags, uint64_t offset, uint64_t vram_base, |
454 | struct ttm_resource *res, dma_addr_t *pages_addr, |
455 | struct dma_fence **fence); |
456 | int amdgpu_vm_bo_update(struct amdgpu_device *adev, |
457 | struct amdgpu_bo_va *bo_va, |
458 | bool clear); |
459 | bool amdgpu_vm_evictable(struct amdgpu_bo *bo); |
460 | void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev, |
461 | struct amdgpu_bo *bo, bool evicted); |
462 | uint64_t amdgpu_vm_map_gart(const dma_addr_t *pages_addr, uint64_t addr); |
463 | struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm, |
464 | struct amdgpu_bo *bo); |
465 | struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev, |
466 | struct amdgpu_vm *vm, |
467 | struct amdgpu_bo *bo); |
468 | int amdgpu_vm_bo_map(struct amdgpu_device *adev, |
469 | struct amdgpu_bo_va *bo_va, |
470 | uint64_t addr, uint64_t offset, |
471 | uint64_t size, uint64_t flags); |
472 | int amdgpu_vm_bo_replace_map(struct amdgpu_device *adev, |
473 | struct amdgpu_bo_va *bo_va, |
474 | uint64_t addr, uint64_t offset, |
475 | uint64_t size, uint64_t flags); |
476 | int amdgpu_vm_bo_unmap(struct amdgpu_device *adev, |
477 | struct amdgpu_bo_va *bo_va, |
478 | uint64_t addr); |
479 | int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev, |
480 | struct amdgpu_vm *vm, |
481 | uint64_t saddr, uint64_t size); |
482 | struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm, |
483 | uint64_t addr); |
484 | void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket); |
485 | void amdgpu_vm_bo_del(struct amdgpu_device *adev, |
486 | struct amdgpu_bo_va *bo_va); |
487 | void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size, |
488 | uint32_t fragment_size_default, unsigned max_level, |
489 | unsigned max_bits); |
490 | int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp); |
491 | bool amdgpu_vm_need_pipeline_sync(struct amdgpu_ring *ring, |
492 | struct amdgpu_job *job); |
493 | void amdgpu_vm_check_compute_bug(struct amdgpu_device *adev); |
494 | |
495 | void amdgpu_vm_get_task_info(struct amdgpu_device *adev, u32 pasid, |
496 | struct amdgpu_task_info *task_info); |
497 | bool amdgpu_vm_handle_fault(struct amdgpu_device *adev, u32 pasid, |
498 | u32 vmid, u32 node_id, uint64_t addr, |
499 | bool write_fault); |
500 | |
501 | void amdgpu_vm_set_task_info(struct amdgpu_vm *vm); |
502 | |
503 | void amdgpu_vm_move_to_lru_tail(struct amdgpu_device *adev, |
504 | struct amdgpu_vm *vm); |
505 | void amdgpu_vm_get_memory(struct amdgpu_vm *vm, |
506 | struct amdgpu_mem_stats *stats); |
507 | |
508 | int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
509 | struct amdgpu_bo_vm *vmbo, bool immediate); |
510 | int amdgpu_vm_pt_create(struct amdgpu_device *adev, struct amdgpu_vm *vm, |
511 | int level, bool immediate, struct amdgpu_bo_vm **vmbo, |
512 | int32_t xcp_id); |
513 | void amdgpu_vm_pt_free_root(struct amdgpu_device *adev, struct amdgpu_vm *vm); |
514 | bool amdgpu_vm_pt_is_root_clean(struct amdgpu_device *adev, |
515 | struct amdgpu_vm *vm); |
516 | |
517 | int amdgpu_vm_pde_update(struct amdgpu_vm_update_params *params, |
518 | struct amdgpu_vm_bo_base *entry); |
519 | int amdgpu_vm_ptes_update(struct amdgpu_vm_update_params *params, |
520 | uint64_t start, uint64_t end, |
521 | uint64_t dst, uint64_t flags); |
522 | void amdgpu_vm_pt_free_work(struct work_struct *work); |
523 | |
524 | #if defined(CONFIG_DEBUG_FS) |
525 | void amdgpu_debugfs_vm_bo_info(struct amdgpu_vm *vm, struct seq_file *m); |
526 | #endif |
527 | |
528 | int amdgpu_vm_pt_map_tables(struct amdgpu_device *adev, struct amdgpu_vm *vm); |
529 | |
530 | /** |
531 | * amdgpu_vm_tlb_seq - return tlb flush sequence number |
532 | * @vm: the amdgpu_vm structure to query |
533 | * |
534 | * Returns the tlb flush sequence number which indicates that the VM TLBs needs |
535 | * to be invalidated whenever the sequence number change. |
536 | */ |
537 | static inline uint64_t amdgpu_vm_tlb_seq(struct amdgpu_vm *vm) |
538 | { |
539 | unsigned long flags; |
540 | spinlock_t *lock; |
541 | |
542 | /* |
543 | * Workaround to stop racing between the fence signaling and handling |
544 | * the cb. The lock is static after initially setting it up, just make |
545 | * sure that the dma_fence structure isn't freed up. |
546 | */ |
547 | rcu_read_lock(); |
548 | lock = vm->last_tlb_flush->lock; |
549 | rcu_read_unlock(); |
550 | |
551 | spin_lock_irqsave(lock, flags); |
552 | spin_unlock_irqrestore(lock, flags); |
553 | |
554 | return atomic64_read(v: &vm->tlb_seq); |
555 | } |
556 | |
557 | /* |
558 | * vm eviction_lock can be taken in MMU notifiers. Make sure no reclaim-FS |
559 | * happens while holding this lock anywhere to prevent deadlocks when |
560 | * an MMU notifier runs in reclaim-FS context. |
561 | */ |
562 | static inline void amdgpu_vm_eviction_lock(struct amdgpu_vm *vm) |
563 | { |
564 | mutex_lock(&vm->eviction_lock); |
565 | vm->saved_flags = memalloc_noreclaim_save(); |
566 | } |
567 | |
568 | static inline bool amdgpu_vm_eviction_trylock(struct amdgpu_vm *vm) |
569 | { |
570 | if (mutex_trylock(lock: &vm->eviction_lock)) { |
571 | vm->saved_flags = memalloc_noreclaim_save(); |
572 | return true; |
573 | } |
574 | return false; |
575 | } |
576 | |
577 | static inline void amdgpu_vm_eviction_unlock(struct amdgpu_vm *vm) |
578 | { |
579 | memalloc_noreclaim_restore(flags: vm->saved_flags); |
580 | mutex_unlock(lock: &vm->eviction_lock); |
581 | } |
582 | |
583 | void amdgpu_vm_update_fault_cache(struct amdgpu_device *adev, |
584 | unsigned int pasid, |
585 | uint64_t addr, |
586 | uint32_t status, |
587 | unsigned int vmhub); |
588 | |
589 | #endif |
590 | |