1 | /* SPDX-License-Identifier: GPL-2.0 OR MIT */ |
2 | /* |
3 | * Copyright 2020-2021 Advanced Micro Devices, Inc. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * |
23 | */ |
24 | |
25 | #ifndef KFD_SVM_H_ |
26 | #define KFD_SVM_H_ |
27 | |
28 | #if IS_ENABLED(CONFIG_HSA_AMD_SVM) |
29 | |
30 | #include <linux/rwsem.h> |
31 | #include <linux/list.h> |
32 | #include <linux/mutex.h> |
33 | #include <linux/sched/mm.h> |
34 | #include <linux/hmm.h> |
35 | #include "amdgpu.h" |
36 | #include "kfd_priv.h" |
37 | |
38 | #define SVM_RANGE_VRAM_DOMAIN (1UL << 0) |
39 | #define SVM_ADEV_PGMAP_OWNER(adev)\ |
40 | ((adev)->hive ? (void *)(adev)->hive : (void *)(adev)) |
41 | |
42 | struct svm_range_bo { |
43 | struct amdgpu_bo *bo; |
44 | struct kref kref; |
45 | struct list_head range_list; /* all svm ranges shared this bo */ |
46 | spinlock_t list_lock; |
47 | struct amdgpu_amdkfd_fence *eviction_fence; |
48 | struct work_struct eviction_work; |
49 | uint32_t evicting; |
50 | struct work_struct release_work; |
51 | struct kfd_node *node; |
52 | }; |
53 | |
54 | enum svm_work_list_ops { |
55 | SVM_OP_NULL, |
56 | SVM_OP_UNMAP_RANGE, |
57 | SVM_OP_UPDATE_RANGE_NOTIFIER, |
58 | SVM_OP_UPDATE_RANGE_NOTIFIER_AND_MAP, |
59 | SVM_OP_ADD_RANGE, |
60 | SVM_OP_ADD_RANGE_AND_MAP |
61 | }; |
62 | |
63 | struct svm_work_list_item { |
64 | enum svm_work_list_ops op; |
65 | struct mm_struct *mm; |
66 | }; |
67 | |
68 | /** |
69 | * struct svm_range - shared virtual memory range |
70 | * |
71 | * @svms: list of svm ranges, structure defined in kfd_process |
72 | * @migrate_mutex: to serialize range migration, validation and mapping update |
73 | * @start: range start address in pages |
74 | * @last: range last address in pages |
75 | * @it_node: node [start, last] stored in interval tree, start, last are page |
76 | * aligned, page size is (last - start + 1) |
77 | * @list: link list node, used to scan all ranges of svms |
78 | * @update_list:link list node used to add to update_list |
79 | * @mapping: bo_va mapping structure to create and update GPU page table |
80 | * @npages: number of pages |
81 | * @dma_addr: dma mapping address on each GPU for system memory physical page |
82 | * @ttm_res: vram ttm resource map |
83 | * @offset: range start offset within mm_nodes |
84 | * @svm_bo: struct to manage splited amdgpu_bo |
85 | * @svm_bo_list:link list node, to scan all ranges which share same svm_bo |
86 | * @lock: protect prange start, last, child_list, svm_bo_list |
87 | * @saved_flags:save/restore current PF_MEMALLOC flags |
88 | * @flags: flags defined as KFD_IOCTL_SVM_FLAG_* |
89 | * @perferred_loc: perferred location, 0 for CPU, or GPU id |
90 | * @perfetch_loc: last prefetch location, 0 for CPU, or GPU id |
91 | * @actual_loc: the actual location, 0 for CPU, or GPU id |
92 | * @granularity:migration granularity, log2 num pages |
93 | * @invalid: not 0 means cpu page table is invalidated |
94 | * @validate_timestamp: system timestamp when range is validated |
95 | * @notifier: register mmu interval notifier |
96 | * @work_item: deferred work item information |
97 | * @deferred_list: list header used to add range to deferred list |
98 | * @child_list: list header for split ranges which are not added to svms yet |
99 | * @bitmap_access: index bitmap of GPUs which can access the range |
100 | * @bitmap_aip: index bitmap of GPUs which can access the range in place |
101 | * |
102 | * Data structure for virtual memory range shared by CPU and GPUs, it can be |
103 | * allocated from system memory ram or device vram, and migrate from ram to vram |
104 | * or from vram to ram. |
105 | */ |
106 | struct svm_range { |
107 | struct svm_range_list *svms; |
108 | struct mutex migrate_mutex; |
109 | unsigned long start; |
110 | unsigned long last; |
111 | struct interval_tree_node it_node; |
112 | struct list_head list; |
113 | struct list_head update_list; |
114 | uint64_t npages; |
115 | dma_addr_t *dma_addr[MAX_GPU_INSTANCE]; |
116 | struct ttm_resource *ttm_res; |
117 | uint64_t offset; |
118 | struct svm_range_bo *svm_bo; |
119 | struct list_head svm_bo_list; |
120 | struct mutex lock; |
121 | unsigned int saved_flags; |
122 | uint32_t flags; |
123 | uint32_t preferred_loc; |
124 | uint32_t prefetch_loc; |
125 | uint32_t actual_loc; |
126 | uint8_t granularity; |
127 | atomic_t invalid; |
128 | ktime_t validate_timestamp; |
129 | struct mmu_interval_notifier notifier; |
130 | struct svm_work_list_item work_item; |
131 | struct list_head deferred_list; |
132 | struct list_head child_list; |
133 | DECLARE_BITMAP(bitmap_access, MAX_GPU_INSTANCE); |
134 | DECLARE_BITMAP(bitmap_aip, MAX_GPU_INSTANCE); |
135 | bool mapped_to_gpu; |
136 | }; |
137 | |
138 | static inline void svm_range_lock(struct svm_range *prange) |
139 | { |
140 | mutex_lock(&prange->lock); |
141 | prange->saved_flags = memalloc_noreclaim_save(); |
142 | |
143 | } |
144 | static inline void svm_range_unlock(struct svm_range *prange) |
145 | { |
146 | memalloc_noreclaim_restore(flags: prange->saved_flags); |
147 | mutex_unlock(lock: &prange->lock); |
148 | } |
149 | |
150 | static inline struct svm_range_bo *svm_range_bo_ref(struct svm_range_bo *svm_bo) |
151 | { |
152 | if (svm_bo) |
153 | kref_get(kref: &svm_bo->kref); |
154 | |
155 | return svm_bo; |
156 | } |
157 | |
158 | int svm_range_list_init(struct kfd_process *p); |
159 | void svm_range_list_fini(struct kfd_process *p); |
160 | int svm_ioctl(struct kfd_process *p, enum kfd_ioctl_svm_op op, uint64_t start, |
161 | uint64_t size, uint32_t nattrs, |
162 | struct kfd_ioctl_svm_attribute *attrs); |
163 | struct svm_range *svm_range_from_addr(struct svm_range_list *svms, |
164 | unsigned long addr, |
165 | struct svm_range **parent); |
166 | struct kfd_node *svm_range_get_node_by_id(struct svm_range *prange, |
167 | uint32_t gpu_id); |
168 | int svm_range_vram_node_new(struct kfd_node *node, struct svm_range *prange, |
169 | bool clear); |
170 | void svm_range_vram_node_free(struct svm_range *prange); |
171 | int svm_range_split_by_granularity(struct kfd_process *p, struct mm_struct *mm, |
172 | unsigned long addr, struct svm_range *parent, |
173 | struct svm_range *prange); |
174 | int svm_range_restore_pages(struct amdgpu_device *adev, unsigned int pasid, |
175 | uint32_t vmid, uint32_t node_id, uint64_t addr, |
176 | bool write_fault); |
177 | int svm_range_schedule_evict_svm_bo(struct amdgpu_amdkfd_fence *fence); |
178 | void svm_range_add_list_work(struct svm_range_list *svms, |
179 | struct svm_range *prange, struct mm_struct *mm, |
180 | enum svm_work_list_ops op); |
181 | void schedule_deferred_list_work(struct svm_range_list *svms); |
182 | void svm_range_dma_unmap_dev(struct device *dev, dma_addr_t *dma_addr, |
183 | unsigned long offset, unsigned long npages); |
184 | void svm_range_dma_unmap(struct svm_range *prange); |
185 | int svm_range_get_info(struct kfd_process *p, uint32_t *num_svm_ranges, |
186 | uint64_t *svm_priv_data_size); |
187 | int kfd_criu_checkpoint_svm(struct kfd_process *p, |
188 | uint8_t __user *user_priv_data, |
189 | uint64_t *priv_offset); |
190 | int kfd_criu_restore_svm(struct kfd_process *p, |
191 | uint8_t __user *user_priv_ptr, |
192 | uint64_t *priv_data_offset, |
193 | uint64_t max_priv_data_size); |
194 | int kfd_criu_resume_svm(struct kfd_process *p); |
195 | struct kfd_process_device * |
196 | svm_range_get_pdd_by_node(struct svm_range *prange, struct kfd_node *node); |
197 | void svm_range_list_lock_and_flush_work(struct svm_range_list *svms, struct mm_struct *mm); |
198 | |
199 | /* SVM API and HMM page migration work together, device memory type |
200 | * is initialized to not 0 when page migration register device memory. |
201 | */ |
202 | #define KFD_IS_SVM_API_SUPPORTED(adev) ((adev)->kfd.pgmap.type != 0 ||\ |
203 | (adev)->gmc.is_app_apu) |
204 | |
205 | void svm_range_bo_unref_async(struct svm_range_bo *svm_bo); |
206 | |
207 | void svm_range_set_max_pages(struct amdgpu_device *adev); |
208 | int svm_range_switch_xnack_reserve_mem(struct kfd_process *p, bool xnack_enabled); |
209 | |
210 | #else |
211 | |
212 | struct kfd_process; |
213 | |
214 | static inline int svm_range_list_init(struct kfd_process *p) |
215 | { |
216 | return 0; |
217 | } |
218 | static inline void svm_range_list_fini(struct kfd_process *p) |
219 | { |
220 | /* empty */ |
221 | } |
222 | |
223 | static inline int svm_range_restore_pages(struct amdgpu_device *adev, |
224 | unsigned int pasid, |
225 | uint32_t client_id, uint32_t node_id, |
226 | uint64_t addr, bool write_fault) |
227 | { |
228 | return -EFAULT; |
229 | } |
230 | |
231 | static inline int svm_range_schedule_evict_svm_bo( |
232 | struct amdgpu_amdkfd_fence *fence) |
233 | { |
234 | WARN_ONCE(1, "SVM eviction fence triggered, but SVM is disabled" ); |
235 | return -EINVAL; |
236 | } |
237 | |
238 | static inline int svm_range_get_info(struct kfd_process *p, |
239 | uint32_t *num_svm_ranges, |
240 | uint64_t *svm_priv_data_size) |
241 | { |
242 | *num_svm_ranges = 0; |
243 | *svm_priv_data_size = 0; |
244 | return 0; |
245 | } |
246 | |
247 | static inline int kfd_criu_checkpoint_svm(struct kfd_process *p, |
248 | uint8_t __user *user_priv_data, |
249 | uint64_t *priv_offset) |
250 | { |
251 | return 0; |
252 | } |
253 | |
254 | static inline int kfd_criu_restore_svm(struct kfd_process *p, |
255 | uint8_t __user *user_priv_ptr, |
256 | uint64_t *priv_data_offset, |
257 | uint64_t max_priv_data_size) |
258 | { |
259 | return -EINVAL; |
260 | } |
261 | |
262 | static inline int kfd_criu_resume_svm(struct kfd_process *p) |
263 | { |
264 | return 0; |
265 | } |
266 | |
267 | static inline void svm_range_set_max_pages(struct amdgpu_device *adev) |
268 | { |
269 | } |
270 | |
271 | #define KFD_IS_SVM_API_SUPPORTED(dev) false |
272 | |
273 | #endif /* IS_ENABLED(CONFIG_HSA_AMD_SVM) */ |
274 | |
275 | #endif /* KFD_SVM_H_ */ |
276 | |