1 | /* |
2 | * Copyright 2022 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | #include "amdgpu.h" |
24 | #include "amdgpu_xcp.h" |
25 | #include "amdgpu_drv.h" |
26 | |
27 | #include <drm/drm_drv.h> |
28 | #include "../amdxcp/amdgpu_xcp_drv.h" |
29 | |
30 | static int __amdgpu_xcp_run(struct amdgpu_xcp_mgr *xcp_mgr, |
31 | struct amdgpu_xcp_ip *xcp_ip, int xcp_state) |
32 | { |
33 | int (*run_func)(void *handle, uint32_t inst_mask); |
34 | int ret = 0; |
35 | |
36 | if (!xcp_ip || !xcp_ip->valid || !xcp_ip->ip_funcs) |
37 | return 0; |
38 | |
39 | run_func = NULL; |
40 | |
41 | switch (xcp_state) { |
42 | case AMDGPU_XCP_PREPARE_SUSPEND: |
43 | run_func = xcp_ip->ip_funcs->prepare_suspend; |
44 | break; |
45 | case AMDGPU_XCP_SUSPEND: |
46 | run_func = xcp_ip->ip_funcs->suspend; |
47 | break; |
48 | case AMDGPU_XCP_PREPARE_RESUME: |
49 | run_func = xcp_ip->ip_funcs->prepare_resume; |
50 | break; |
51 | case AMDGPU_XCP_RESUME: |
52 | run_func = xcp_ip->ip_funcs->resume; |
53 | break; |
54 | } |
55 | |
56 | if (run_func) |
57 | ret = run_func(xcp_mgr->adev, xcp_ip->inst_mask); |
58 | |
59 | return ret; |
60 | } |
61 | |
62 | static int amdgpu_xcp_run_transition(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, |
63 | int state) |
64 | { |
65 | struct amdgpu_xcp_ip *xcp_ip; |
66 | struct amdgpu_xcp *xcp; |
67 | int i, ret; |
68 | |
69 | if (xcp_id >= MAX_XCP || !xcp_mgr->xcp[xcp_id].valid) |
70 | return -EINVAL; |
71 | |
72 | xcp = &xcp_mgr->xcp[xcp_id]; |
73 | for (i = 0; i < AMDGPU_XCP_MAX_BLOCKS; ++i) { |
74 | xcp_ip = &xcp->ip[i]; |
75 | ret = __amdgpu_xcp_run(xcp_mgr, xcp_ip, xcp_state: state); |
76 | if (ret) |
77 | break; |
78 | } |
79 | |
80 | return ret; |
81 | } |
82 | |
83 | int amdgpu_xcp_prepare_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) |
84 | { |
85 | return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, |
86 | state: AMDGPU_XCP_PREPARE_SUSPEND); |
87 | } |
88 | |
89 | int amdgpu_xcp_suspend(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) |
90 | { |
91 | return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, state: AMDGPU_XCP_SUSPEND); |
92 | } |
93 | |
94 | int amdgpu_xcp_prepare_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) |
95 | { |
96 | return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, |
97 | state: AMDGPU_XCP_PREPARE_RESUME); |
98 | } |
99 | |
100 | int amdgpu_xcp_resume(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id) |
101 | { |
102 | return amdgpu_xcp_run_transition(xcp_mgr, xcp_id, state: AMDGPU_XCP_RESUME); |
103 | } |
104 | |
105 | static void __amdgpu_xcp_add_block(struct amdgpu_xcp_mgr *xcp_mgr, int xcp_id, |
106 | struct amdgpu_xcp_ip *ip) |
107 | { |
108 | struct amdgpu_xcp *xcp; |
109 | |
110 | if (!ip) |
111 | return; |
112 | |
113 | xcp = &xcp_mgr->xcp[xcp_id]; |
114 | xcp->ip[ip->ip_id] = *ip; |
115 | xcp->ip[ip->ip_id].valid = true; |
116 | |
117 | xcp->valid = true; |
118 | } |
119 | |
120 | int amdgpu_xcp_init(struct amdgpu_xcp_mgr *xcp_mgr, int num_xcps, int mode) |
121 | { |
122 | struct amdgpu_device *adev = xcp_mgr->adev; |
123 | struct amdgpu_xcp_ip ip; |
124 | uint8_t mem_id; |
125 | int i, j, ret; |
126 | |
127 | if (!num_xcps || num_xcps > MAX_XCP) |
128 | return -EINVAL; |
129 | |
130 | xcp_mgr->mode = mode; |
131 | |
132 | for (i = 0; i < MAX_XCP; ++i) |
133 | xcp_mgr->xcp[i].valid = false; |
134 | |
135 | /* This is needed for figuring out memory id of xcp */ |
136 | xcp_mgr->num_xcp_per_mem_partition = num_xcps / xcp_mgr->adev->gmc.num_mem_partitions; |
137 | |
138 | for (i = 0; i < num_xcps; ++i) { |
139 | for (j = AMDGPU_XCP_GFXHUB; j < AMDGPU_XCP_MAX_BLOCKS; ++j) { |
140 | ret = xcp_mgr->funcs->get_ip_details(xcp_mgr, i, j, |
141 | &ip); |
142 | if (ret) |
143 | continue; |
144 | |
145 | __amdgpu_xcp_add_block(xcp_mgr, xcp_id: i, ip: &ip); |
146 | } |
147 | |
148 | xcp_mgr->xcp[i].id = i; |
149 | |
150 | if (xcp_mgr->funcs->get_xcp_mem_id) { |
151 | ret = xcp_mgr->funcs->get_xcp_mem_id( |
152 | xcp_mgr, &xcp_mgr->xcp[i], &mem_id); |
153 | if (ret) |
154 | continue; |
155 | else |
156 | xcp_mgr->xcp[i].mem_id = mem_id; |
157 | } |
158 | } |
159 | |
160 | xcp_mgr->num_xcps = num_xcps; |
161 | amdgpu_xcp_update_partition_sched_list(adev); |
162 | |
163 | return 0; |
164 | } |
165 | |
166 | static int __amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, |
167 | int mode) |
168 | { |
169 | int ret, curr_mode, num_xcps = 0; |
170 | |
171 | if (!xcp_mgr->funcs || !xcp_mgr->funcs->switch_partition_mode) |
172 | return 0; |
173 | |
174 | mutex_lock(&xcp_mgr->xcp_lock); |
175 | |
176 | curr_mode = xcp_mgr->mode; |
177 | /* State set to transient mode */ |
178 | xcp_mgr->mode = AMDGPU_XCP_MODE_TRANS; |
179 | |
180 | ret = xcp_mgr->funcs->switch_partition_mode(xcp_mgr, mode, &num_xcps); |
181 | |
182 | if (ret) { |
183 | /* Failed, get whatever mode it's at now */ |
184 | if (xcp_mgr->funcs->query_partition_mode) |
185 | xcp_mgr->mode = amdgpu_xcp_query_partition_mode( |
186 | xcp_mgr, AMDGPU_XCP_FL_LOCKED); |
187 | else |
188 | xcp_mgr->mode = curr_mode; |
189 | |
190 | goto out; |
191 | } |
192 | |
193 | out: |
194 | mutex_unlock(lock: &xcp_mgr->xcp_lock); |
195 | |
196 | return ret; |
197 | } |
198 | |
199 | int amdgpu_xcp_switch_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, int mode) |
200 | { |
201 | if (!xcp_mgr || mode == AMDGPU_XCP_MODE_NONE) |
202 | return -EINVAL; |
203 | |
204 | if (xcp_mgr->mode == mode) |
205 | return 0; |
206 | |
207 | return __amdgpu_xcp_switch_partition_mode(xcp_mgr, mode); |
208 | } |
209 | |
210 | int amdgpu_xcp_restore_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr) |
211 | { |
212 | if (!xcp_mgr || xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) |
213 | return 0; |
214 | |
215 | return __amdgpu_xcp_switch_partition_mode(xcp_mgr, mode: xcp_mgr->mode); |
216 | } |
217 | |
218 | int amdgpu_xcp_query_partition_mode(struct amdgpu_xcp_mgr *xcp_mgr, u32 flags) |
219 | { |
220 | int mode; |
221 | |
222 | if (xcp_mgr->mode == AMDGPU_XCP_MODE_NONE) |
223 | return xcp_mgr->mode; |
224 | |
225 | if (!xcp_mgr->funcs || !xcp_mgr->funcs->query_partition_mode) |
226 | return xcp_mgr->mode; |
227 | |
228 | if (!(flags & AMDGPU_XCP_FL_LOCKED)) |
229 | mutex_lock(&xcp_mgr->xcp_lock); |
230 | mode = xcp_mgr->funcs->query_partition_mode(xcp_mgr); |
231 | if (xcp_mgr->mode != AMDGPU_XCP_MODE_TRANS && mode != xcp_mgr->mode) |
232 | dev_WARN( |
233 | xcp_mgr->adev->dev, |
234 | "Cached partition mode %d not matching with device mode %d" , |
235 | xcp_mgr->mode, mode); |
236 | |
237 | if (!(flags & AMDGPU_XCP_FL_LOCKED)) |
238 | mutex_unlock(lock: &xcp_mgr->xcp_lock); |
239 | |
240 | return mode; |
241 | } |
242 | |
243 | static int amdgpu_xcp_dev_alloc(struct amdgpu_device *adev) |
244 | { |
245 | struct drm_device *p_ddev; |
246 | struct drm_device *ddev; |
247 | int i, ret; |
248 | |
249 | ddev = adev_to_drm(adev); |
250 | |
251 | /* xcp #0 shares drm device setting with adev */ |
252 | adev->xcp_mgr->xcp->ddev = ddev; |
253 | |
254 | for (i = 1; i < MAX_XCP; i++) { |
255 | ret = amdgpu_xcp_drm_dev_alloc(ddev: &p_ddev); |
256 | if (ret == -ENOSPC) { |
257 | dev_warn(adev->dev, |
258 | "Skip xcp node #%d when out of drm node resource." , i); |
259 | return 0; |
260 | } else if (ret) { |
261 | return ret; |
262 | } |
263 | |
264 | /* Redirect all IOCTLs to the primary device */ |
265 | adev->xcp_mgr->xcp[i].rdev = p_ddev->render->dev; |
266 | adev->xcp_mgr->xcp[i].pdev = p_ddev->primary->dev; |
267 | adev->xcp_mgr->xcp[i].driver = (struct drm_driver *)p_ddev->driver; |
268 | adev->xcp_mgr->xcp[i].vma_offset_manager = p_ddev->vma_offset_manager; |
269 | p_ddev->render->dev = ddev; |
270 | p_ddev->primary->dev = ddev; |
271 | p_ddev->vma_offset_manager = ddev->vma_offset_manager; |
272 | p_ddev->driver = &amdgpu_partition_driver; |
273 | adev->xcp_mgr->xcp[i].ddev = p_ddev; |
274 | } |
275 | |
276 | return 0; |
277 | } |
278 | |
279 | int amdgpu_xcp_mgr_init(struct amdgpu_device *adev, int init_mode, |
280 | int init_num_xcps, |
281 | struct amdgpu_xcp_mgr_funcs *xcp_funcs) |
282 | { |
283 | struct amdgpu_xcp_mgr *xcp_mgr; |
284 | |
285 | if (!xcp_funcs || !xcp_funcs->switch_partition_mode || |
286 | !xcp_funcs->get_ip_details) |
287 | return -EINVAL; |
288 | |
289 | xcp_mgr = kzalloc(size: sizeof(*xcp_mgr), GFP_KERNEL); |
290 | |
291 | if (!xcp_mgr) |
292 | return -ENOMEM; |
293 | |
294 | xcp_mgr->adev = adev; |
295 | xcp_mgr->funcs = xcp_funcs; |
296 | xcp_mgr->mode = init_mode; |
297 | mutex_init(&xcp_mgr->xcp_lock); |
298 | |
299 | if (init_mode != AMDGPU_XCP_MODE_NONE) |
300 | amdgpu_xcp_init(xcp_mgr, num_xcps: init_num_xcps, mode: init_mode); |
301 | |
302 | adev->xcp_mgr = xcp_mgr; |
303 | |
304 | return amdgpu_xcp_dev_alloc(adev); |
305 | } |
306 | |
307 | int amdgpu_xcp_get_partition(struct amdgpu_xcp_mgr *xcp_mgr, |
308 | enum AMDGPU_XCP_IP_BLOCK ip, int instance) |
309 | { |
310 | struct amdgpu_xcp *xcp; |
311 | int i, id_mask = 0; |
312 | |
313 | if (ip >= AMDGPU_XCP_MAX_BLOCKS) |
314 | return -EINVAL; |
315 | |
316 | for (i = 0; i < xcp_mgr->num_xcps; ++i) { |
317 | xcp = &xcp_mgr->xcp[i]; |
318 | if ((xcp->valid) && (xcp->ip[ip].valid) && |
319 | (xcp->ip[ip].inst_mask & BIT(instance))) |
320 | id_mask |= BIT(i); |
321 | } |
322 | |
323 | if (!id_mask) |
324 | id_mask = -ENXIO; |
325 | |
326 | return id_mask; |
327 | } |
328 | |
329 | int amdgpu_xcp_get_inst_details(struct amdgpu_xcp *xcp, |
330 | enum AMDGPU_XCP_IP_BLOCK ip, |
331 | uint32_t *inst_mask) |
332 | { |
333 | if (!xcp->valid || !inst_mask || !(xcp->ip[ip].valid)) |
334 | return -EINVAL; |
335 | |
336 | *inst_mask = xcp->ip[ip].inst_mask; |
337 | |
338 | return 0; |
339 | } |
340 | |
341 | int amdgpu_xcp_dev_register(struct amdgpu_device *adev, |
342 | const struct pci_device_id *ent) |
343 | { |
344 | int i, ret; |
345 | |
346 | if (!adev->xcp_mgr) |
347 | return 0; |
348 | |
349 | for (i = 1; i < MAX_XCP; i++) { |
350 | if (!adev->xcp_mgr->xcp[i].ddev) |
351 | break; |
352 | |
353 | ret = drm_dev_register(dev: adev->xcp_mgr->xcp[i].ddev, flags: ent->driver_data); |
354 | if (ret) |
355 | return ret; |
356 | } |
357 | |
358 | return 0; |
359 | } |
360 | |
361 | void amdgpu_xcp_dev_unplug(struct amdgpu_device *adev) |
362 | { |
363 | struct drm_device *p_ddev; |
364 | int i; |
365 | |
366 | if (!adev->xcp_mgr) |
367 | return; |
368 | |
369 | for (i = 1; i < MAX_XCP; i++) { |
370 | if (!adev->xcp_mgr->xcp[i].ddev) |
371 | break; |
372 | |
373 | p_ddev = adev->xcp_mgr->xcp[i].ddev; |
374 | drm_dev_unplug(dev: p_ddev); |
375 | p_ddev->render->dev = adev->xcp_mgr->xcp[i].rdev; |
376 | p_ddev->primary->dev = adev->xcp_mgr->xcp[i].pdev; |
377 | p_ddev->driver = adev->xcp_mgr->xcp[i].driver; |
378 | p_ddev->vma_offset_manager = adev->xcp_mgr->xcp[i].vma_offset_manager; |
379 | } |
380 | } |
381 | |
382 | int amdgpu_xcp_open_device(struct amdgpu_device *adev, |
383 | struct amdgpu_fpriv *fpriv, |
384 | struct drm_file *file_priv) |
385 | { |
386 | int i; |
387 | |
388 | if (!adev->xcp_mgr) |
389 | return 0; |
390 | |
391 | fpriv->xcp_id = AMDGPU_XCP_NO_PARTITION; |
392 | for (i = 0; i < MAX_XCP; ++i) { |
393 | if (!adev->xcp_mgr->xcp[i].ddev) |
394 | break; |
395 | |
396 | if (file_priv->minor == adev->xcp_mgr->xcp[i].ddev->render) { |
397 | if (adev->xcp_mgr->xcp[i].valid == FALSE) { |
398 | dev_err(adev->dev, "renderD%d partition %d not valid!" , |
399 | file_priv->minor->index, i); |
400 | return -ENOENT; |
401 | } |
402 | dev_dbg(adev->dev, "renderD%d partition %d opened!" , |
403 | file_priv->minor->index, i); |
404 | fpriv->xcp_id = i; |
405 | break; |
406 | } |
407 | } |
408 | |
409 | fpriv->vm.mem_id = fpriv->xcp_id == AMDGPU_XCP_NO_PARTITION ? -1 : |
410 | adev->xcp_mgr->xcp[fpriv->xcp_id].mem_id; |
411 | return 0; |
412 | } |
413 | |
414 | void amdgpu_xcp_release_sched(struct amdgpu_device *adev, |
415 | struct amdgpu_ctx_entity *entity) |
416 | { |
417 | struct drm_gpu_scheduler *sched; |
418 | struct amdgpu_ring *ring; |
419 | |
420 | if (!adev->xcp_mgr) |
421 | return; |
422 | |
423 | sched = entity->entity.rq->sched; |
424 | if (sched->ready) { |
425 | ring = to_amdgpu_ring(entity->entity.rq->sched); |
426 | atomic_dec(v: &adev->xcp_mgr->xcp[ring->xcp_id].ref_cnt); |
427 | } |
428 | } |
429 | |
430 | |