1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
2 | /* |
3 | * Copyright 2014-2022 Advanced Micro Devices, Inc. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * |
23 | */ |
24 | |
25 | #include <linux/printk.h> |
26 | #include <linux/slab.h> |
27 | #include <linux/mm_types.h> |
28 | |
29 | #include "kfd_priv.h" |
30 | #include "kfd_mqd_manager.h" |
31 | #include "vi_structs.h" |
32 | #include "gca/gfx_8_0_sh_mask.h" |
33 | #include "gca/gfx_8_0_enum.h" |
34 | #include "oss/oss_3_0_sh_mask.h" |
35 | |
36 | #define CP_MQD_CONTROL__PRIV_STATE__SHIFT 0x8 |
37 | |
38 | static inline struct vi_mqd *get_mqd(void *mqd) |
39 | { |
40 | return (struct vi_mqd *)mqd; |
41 | } |
42 | |
43 | static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd) |
44 | { |
45 | return (struct vi_sdma_mqd *)mqd; |
46 | } |
47 | |
48 | static void update_cu_mask(struct mqd_manager *mm, void *mqd, |
49 | struct mqd_update_info *minfo) |
50 | { |
51 | struct vi_mqd *m; |
52 | uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */ |
53 | |
54 | if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) || |
55 | !minfo->cu_mask.ptr) |
56 | return; |
57 | |
58 | mqd_symmetrically_map_cu_mask(mm, |
59 | cu_mask: minfo->cu_mask.ptr, cu_mask_count: minfo->cu_mask.count, se_mask); |
60 | |
61 | m = get_mqd(mqd); |
62 | m->compute_static_thread_mgmt_se0 = se_mask[0]; |
63 | m->compute_static_thread_mgmt_se1 = se_mask[1]; |
64 | m->compute_static_thread_mgmt_se2 = se_mask[2]; |
65 | m->compute_static_thread_mgmt_se3 = se_mask[3]; |
66 | |
67 | pr_debug("Update cu mask to %#x %#x %#x %#x\n" , |
68 | m->compute_static_thread_mgmt_se0, |
69 | m->compute_static_thread_mgmt_se1, |
70 | m->compute_static_thread_mgmt_se2, |
71 | m->compute_static_thread_mgmt_se3); |
72 | } |
73 | |
74 | static void set_priority(struct vi_mqd *m, struct queue_properties *q) |
75 | { |
76 | m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; |
77 | m->cp_hqd_queue_priority = q->priority; |
78 | } |
79 | |
80 | static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd, |
81 | struct queue_properties *q) |
82 | { |
83 | struct kfd_mem_obj *mqd_mem_obj; |
84 | |
85 | if (kfd_gtt_sa_allocate(kfd, sizeof(struct vi_mqd), |
86 | &mqd_mem_obj)) |
87 | return NULL; |
88 | |
89 | return mqd_mem_obj; |
90 | } |
91 | |
92 | static void init_mqd(struct mqd_manager *mm, void **mqd, |
93 | struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, |
94 | struct queue_properties *q) |
95 | { |
96 | uint64_t addr; |
97 | struct vi_mqd *m; |
98 | |
99 | m = (struct vi_mqd *) mqd_mem_obj->cpu_ptr; |
100 | addr = mqd_mem_obj->gpu_addr; |
101 | |
102 | memset(m, 0, sizeof(struct vi_mqd)); |
103 | |
104 | m->header = 0xC0310800; |
105 | m->compute_pipelinestat_enable = 1; |
106 | m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF; |
107 | m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF; |
108 | m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF; |
109 | m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF; |
110 | |
111 | m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | |
112 | 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; |
113 | |
114 | m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT | |
115 | MTYPE_UC << CP_MQD_CONTROL__MTYPE__SHIFT; |
116 | |
117 | m->cp_mqd_base_addr_lo = lower_32_bits(addr); |
118 | m->cp_mqd_base_addr_hi = upper_32_bits(addr); |
119 | |
120 | m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT | |
121 | 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | |
122 | 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; |
123 | |
124 | set_priority(m, q); |
125 | m->cp_hqd_eop_rptr = 1 << CP_HQD_EOP_RPTR__INIT_FETCHER__SHIFT; |
126 | |
127 | if (q->format == KFD_QUEUE_FORMAT_AQL) |
128 | m->cp_hqd_iq_rptr = 1; |
129 | |
130 | if (q->tba_addr) { |
131 | m->compute_tba_lo = lower_32_bits(q->tba_addr >> 8); |
132 | m->compute_tba_hi = upper_32_bits(q->tba_addr >> 8); |
133 | m->compute_tma_lo = lower_32_bits(q->tma_addr >> 8); |
134 | m->compute_tma_hi = upper_32_bits(q->tma_addr >> 8); |
135 | m->compute_pgm_rsrc2 |= |
136 | (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT); |
137 | } |
138 | |
139 | if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) { |
140 | m->cp_hqd_persistent_state |= |
141 | (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); |
142 | m->cp_hqd_ctx_save_base_addr_lo = |
143 | lower_32_bits(q->ctx_save_restore_area_address); |
144 | m->cp_hqd_ctx_save_base_addr_hi = |
145 | upper_32_bits(q->ctx_save_restore_area_address); |
146 | m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size; |
147 | m->cp_hqd_cntl_stack_size = q->ctl_stack_size; |
148 | m->cp_hqd_cntl_stack_offset = q->ctl_stack_size; |
149 | m->cp_hqd_wg_state_offset = q->ctl_stack_size; |
150 | } |
151 | |
152 | *mqd = m; |
153 | if (gart_addr) |
154 | *gart_addr = addr; |
155 | mm->update_mqd(mm, m, q, NULL); |
156 | } |
157 | |
158 | static int load_mqd(struct mqd_manager *mm, void *mqd, |
159 | uint32_t pipe_id, uint32_t queue_id, |
160 | struct queue_properties *p, struct mm_struct *mms) |
161 | { |
162 | /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ |
163 | uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); |
164 | uint32_t wptr_mask = (uint32_t)((p->queue_size / 4) - 1); |
165 | |
166 | return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, |
167 | (uint32_t __user *)p->write_ptr, |
168 | wptr_shift, wptr_mask, mms); |
169 | } |
170 | |
171 | static void __update_mqd(struct mqd_manager *mm, void *mqd, |
172 | struct queue_properties *q, struct mqd_update_info *minfo, |
173 | unsigned int mtype, unsigned int atc_bit) |
174 | { |
175 | struct vi_mqd *m; |
176 | |
177 | m = get_mqd(mqd); |
178 | |
179 | m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT | |
180 | atc_bit << CP_HQD_PQ_CONTROL__PQ_ATC__SHIFT | |
181 | mtype << CP_HQD_PQ_CONTROL__MTYPE__SHIFT; |
182 | m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1; |
183 | pr_debug("cp_hqd_pq_control 0x%x\n" , m->cp_hqd_pq_control); |
184 | |
185 | m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); |
186 | m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); |
187 | |
188 | m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); |
189 | m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); |
190 | m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); |
191 | m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); |
192 | |
193 | m->cp_hqd_pq_doorbell_control = |
194 | q->doorbell_off << |
195 | CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; |
196 | pr_debug("cp_hqd_pq_doorbell_control 0x%x\n" , |
197 | m->cp_hqd_pq_doorbell_control); |
198 | |
199 | m->cp_hqd_eop_control = atc_bit << CP_HQD_EOP_CONTROL__EOP_ATC__SHIFT | |
200 | mtype << CP_HQD_EOP_CONTROL__MTYPE__SHIFT; |
201 | |
202 | m->cp_hqd_ib_control = atc_bit << CP_HQD_IB_CONTROL__IB_ATC__SHIFT | |
203 | 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT | |
204 | mtype << CP_HQD_IB_CONTROL__MTYPE__SHIFT; |
205 | |
206 | /* |
207 | * HW does not clamp this field correctly. Maximum EOP queue size |
208 | * is constrained by per-SE EOP done signal count, which is 8-bit. |
209 | * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit |
210 | * more than (EOP entry count - 1) so a queue size of 0x800 dwords |
211 | * is safe, giving a maximum field value of 0xA. |
212 | */ |
213 | m->cp_hqd_eop_control |= min(0xA, |
214 | order_base_2(q->eop_ring_buffer_size / 4) - 1); |
215 | m->cp_hqd_eop_base_addr_lo = |
216 | lower_32_bits(q->eop_ring_buffer_address >> 8); |
217 | m->cp_hqd_eop_base_addr_hi = |
218 | upper_32_bits(q->eop_ring_buffer_address >> 8); |
219 | |
220 | m->cp_hqd_iq_timer = atc_bit << CP_HQD_IQ_TIMER__IQ_ATC__SHIFT | |
221 | mtype << CP_HQD_IQ_TIMER__MTYPE__SHIFT; |
222 | |
223 | m->cp_hqd_vmid = q->vmid; |
224 | |
225 | if (q->format == KFD_QUEUE_FORMAT_AQL) { |
226 | m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK | |
227 | 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT; |
228 | } |
229 | |
230 | if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) |
231 | m->cp_hqd_ctx_save_control = |
232 | atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT | |
233 | mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT; |
234 | |
235 | update_cu_mask(mm, mqd, minfo); |
236 | set_priority(m, q); |
237 | |
238 | q->is_active = QUEUE_IS_ACTIVE(*q); |
239 | } |
240 | |
241 | |
242 | static void update_mqd(struct mqd_manager *mm, void *mqd, |
243 | struct queue_properties *q, |
244 | struct mqd_update_info *minfo) |
245 | { |
246 | __update_mqd(mm, mqd, q, minfo, MTYPE_CC, 1); |
247 | } |
248 | |
249 | static uint32_t read_doorbell_id(void *mqd) |
250 | { |
251 | struct vi_mqd *m = (struct vi_mqd *)mqd; |
252 | |
253 | return m->queue_doorbell_id0; |
254 | } |
255 | |
256 | static void update_mqd_tonga(struct mqd_manager *mm, void *mqd, |
257 | struct queue_properties *q, |
258 | struct mqd_update_info *minfo) |
259 | { |
260 | __update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0); |
261 | } |
262 | |
263 | static int get_wave_state(struct mqd_manager *mm, void *mqd, |
264 | void __user *ctl_stack, |
265 | u32 *ctl_stack_used_size, |
266 | u32 *save_area_used_size) |
267 | { |
268 | struct vi_mqd *m; |
269 | |
270 | m = get_mqd(mqd); |
271 | |
272 | *ctl_stack_used_size = m->cp_hqd_cntl_stack_size - |
273 | m->cp_hqd_cntl_stack_offset; |
274 | *save_area_used_size = m->cp_hqd_wg_state_offset - |
275 | m->cp_hqd_cntl_stack_size; |
276 | |
277 | /* Control stack is not copied to user mode for GFXv8 because |
278 | * it's part of the context save area that is already |
279 | * accessible to user mode |
280 | */ |
281 | |
282 | return 0; |
283 | } |
284 | |
285 | static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size) |
286 | { |
287 | /* Control stack is stored in user mode */ |
288 | *ctl_stack_size = 0; |
289 | } |
290 | |
291 | static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst) |
292 | { |
293 | struct vi_mqd *m; |
294 | |
295 | m = get_mqd(mqd); |
296 | |
297 | memcpy(mqd_dst, m, sizeof(struct vi_mqd)); |
298 | } |
299 | |
300 | static void restore_mqd(struct mqd_manager *mm, void **mqd, |
301 | struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, |
302 | struct queue_properties *qp, |
303 | const void *mqd_src, |
304 | const void *ctl_stack_src, const u32 ctl_stack_size) |
305 | { |
306 | uint64_t addr; |
307 | struct vi_mqd *m; |
308 | |
309 | m = (struct vi_mqd *) mqd_mem_obj->cpu_ptr; |
310 | addr = mqd_mem_obj->gpu_addr; |
311 | |
312 | memcpy(m, mqd_src, sizeof(*m)); |
313 | |
314 | *mqd = m; |
315 | if (gart_addr) |
316 | *gart_addr = addr; |
317 | |
318 | m->cp_hqd_pq_doorbell_control = |
319 | qp->doorbell_off << |
320 | CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; |
321 | pr_debug("cp_hqd_pq_doorbell_control 0x%x\n" , |
322 | m->cp_hqd_pq_doorbell_control); |
323 | |
324 | qp->is_active = 0; |
325 | } |
326 | |
327 | static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, |
328 | struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, |
329 | struct queue_properties *q) |
330 | { |
331 | struct vi_mqd *m; |
332 | |
333 | init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); |
334 | |
335 | m = get_mqd(mqd: *mqd); |
336 | |
337 | m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT | |
338 | 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; |
339 | } |
340 | |
341 | static void update_mqd_hiq(struct mqd_manager *mm, void *mqd, |
342 | struct queue_properties *q, |
343 | struct mqd_update_info *minfo) |
344 | { |
345 | __update_mqd(mm, mqd, q, minfo, MTYPE_UC, 0); |
346 | } |
347 | |
348 | static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, |
349 | struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, |
350 | struct queue_properties *q) |
351 | { |
352 | struct vi_sdma_mqd *m; |
353 | |
354 | m = (struct vi_sdma_mqd *) mqd_mem_obj->cpu_ptr; |
355 | |
356 | memset(m, 0, sizeof(struct vi_sdma_mqd)); |
357 | |
358 | *mqd = m; |
359 | if (gart_addr) |
360 | *gart_addr = mqd_mem_obj->gpu_addr; |
361 | |
362 | mm->update_mqd(mm, m, q, NULL); |
363 | } |
364 | |
365 | static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, |
366 | struct queue_properties *q, |
367 | struct mqd_update_info *minfo) |
368 | { |
369 | struct vi_sdma_mqd *m; |
370 | |
371 | m = get_sdma_mqd(mqd); |
372 | m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4) |
373 | << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT | |
374 | q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT | |
375 | 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | |
376 | 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT; |
377 | |
378 | m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8); |
379 | m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8); |
380 | m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr); |
381 | m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr); |
382 | m->sdmax_rlcx_doorbell = |
383 | q->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT; |
384 | |
385 | m->sdmax_rlcx_virtual_addr = q->sdma_vm_addr; |
386 | |
387 | m->sdma_engine_id = q->sdma_engine_id; |
388 | m->sdma_queue_id = q->sdma_queue_id; |
389 | |
390 | q->is_active = QUEUE_IS_ACTIVE(*q); |
391 | } |
392 | |
393 | static void checkpoint_mqd_sdma(struct mqd_manager *mm, |
394 | void *mqd, |
395 | void *mqd_dst, |
396 | void *ctl_stack_dst) |
397 | { |
398 | struct vi_sdma_mqd *m; |
399 | |
400 | m = get_sdma_mqd(mqd); |
401 | |
402 | memcpy(mqd_dst, m, sizeof(struct vi_sdma_mqd)); |
403 | } |
404 | |
405 | static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd, |
406 | struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, |
407 | struct queue_properties *qp, |
408 | const void *mqd_src, |
409 | const void *ctl_stack_src, const u32 ctl_stack_size) |
410 | { |
411 | uint64_t addr; |
412 | struct vi_sdma_mqd *m; |
413 | |
414 | m = (struct vi_sdma_mqd *) mqd_mem_obj->cpu_ptr; |
415 | addr = mqd_mem_obj->gpu_addr; |
416 | |
417 | memcpy(m, mqd_src, sizeof(*m)); |
418 | |
419 | m->sdmax_rlcx_doorbell = |
420 | qp->doorbell_off << SDMA0_RLC0_DOORBELL__OFFSET__SHIFT; |
421 | |
422 | *mqd = m; |
423 | if (gart_addr) |
424 | *gart_addr = addr; |
425 | |
426 | qp->is_active = 0; |
427 | } |
428 | |
429 | #if defined(CONFIG_DEBUG_FS) |
430 | |
431 | |
432 | static int debugfs_show_mqd(struct seq_file *m, void *data) |
433 | { |
434 | seq_hex_dump(m, " " , DUMP_PREFIX_OFFSET, 32, 4, |
435 | data, sizeof(struct vi_mqd), false); |
436 | return 0; |
437 | } |
438 | |
439 | static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) |
440 | { |
441 | seq_hex_dump(m, " " , DUMP_PREFIX_OFFSET, 32, 4, |
442 | data, sizeof(struct vi_sdma_mqd), false); |
443 | return 0; |
444 | } |
445 | |
446 | #endif |
447 | |
448 | struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type, |
449 | struct kfd_dev *dev) |
450 | { |
451 | struct mqd_manager *mqd; |
452 | |
453 | if (WARN_ON(type >= KFD_MQD_TYPE_MAX)) |
454 | return NULL; |
455 | |
456 | mqd = kzalloc(size: sizeof(*mqd), GFP_KERNEL); |
457 | if (!mqd) |
458 | return NULL; |
459 | |
460 | mqd->dev = dev; |
461 | |
462 | switch (type) { |
463 | case KFD_MQD_TYPE_CP: |
464 | mqd->allocate_mqd = allocate_mqd; |
465 | mqd->init_mqd = init_mqd; |
466 | mqd->free_mqd = kfd_free_mqd_cp; |
467 | mqd->load_mqd = load_mqd; |
468 | mqd->update_mqd = update_mqd; |
469 | mqd->destroy_mqd = kfd_destroy_mqd_cp; |
470 | mqd->is_occupied = kfd_is_occupied_cp; |
471 | mqd->get_wave_state = get_wave_state; |
472 | mqd->get_checkpoint_info = get_checkpoint_info; |
473 | mqd->checkpoint_mqd = checkpoint_mqd; |
474 | mqd->restore_mqd = restore_mqd; |
475 | mqd->mqd_size = sizeof(struct vi_mqd); |
476 | #if defined(CONFIG_DEBUG_FS) |
477 | mqd->debugfs_show_mqd = debugfs_show_mqd; |
478 | #endif |
479 | break; |
480 | case KFD_MQD_TYPE_HIQ: |
481 | mqd->allocate_mqd = allocate_hiq_mqd; |
482 | mqd->init_mqd = init_mqd_hiq; |
483 | mqd->free_mqd = free_mqd_hiq_sdma; |
484 | mqd->load_mqd = load_mqd; |
485 | mqd->update_mqd = update_mqd_hiq; |
486 | mqd->destroy_mqd = kfd_destroy_mqd_cp; |
487 | mqd->is_occupied = kfd_is_occupied_cp; |
488 | mqd->mqd_size = sizeof(struct vi_mqd); |
489 | #if defined(CONFIG_DEBUG_FS) |
490 | mqd->debugfs_show_mqd = debugfs_show_mqd; |
491 | #endif |
492 | mqd->read_doorbell_id = read_doorbell_id; |
493 | break; |
494 | case KFD_MQD_TYPE_DIQ: |
495 | mqd->allocate_mqd = allocate_mqd; |
496 | mqd->init_mqd = init_mqd_hiq; |
497 | mqd->free_mqd = kfd_free_mqd_cp; |
498 | mqd->load_mqd = load_mqd; |
499 | mqd->update_mqd = update_mqd_hiq; |
500 | mqd->destroy_mqd = kfd_destroy_mqd_cp; |
501 | mqd->is_occupied = kfd_is_occupied_cp; |
502 | mqd->mqd_size = sizeof(struct vi_mqd); |
503 | #if defined(CONFIG_DEBUG_FS) |
504 | mqd->debugfs_show_mqd = debugfs_show_mqd; |
505 | #endif |
506 | break; |
507 | case KFD_MQD_TYPE_SDMA: |
508 | mqd->allocate_mqd = allocate_sdma_mqd; |
509 | mqd->init_mqd = init_mqd_sdma; |
510 | mqd->free_mqd = free_mqd_hiq_sdma; |
511 | mqd->load_mqd = kfd_load_mqd_sdma; |
512 | mqd->update_mqd = update_mqd_sdma; |
513 | mqd->destroy_mqd = kfd_destroy_mqd_sdma; |
514 | mqd->is_occupied = kfd_is_occupied_sdma; |
515 | mqd->checkpoint_mqd = checkpoint_mqd_sdma; |
516 | mqd->restore_mqd = restore_mqd_sdma; |
517 | mqd->mqd_size = sizeof(struct vi_sdma_mqd); |
518 | #if defined(CONFIG_DEBUG_FS) |
519 | mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; |
520 | #endif |
521 | break; |
522 | default: |
523 | kfree(objp: mqd); |
524 | return NULL; |
525 | } |
526 | |
527 | return mqd; |
528 | } |
529 | |
530 | struct mqd_manager *mqd_manager_init_vi_tonga(enum KFD_MQD_TYPE type, |
531 | struct kfd_dev *dev) |
532 | { |
533 | struct mqd_manager *mqd; |
534 | |
535 | mqd = mqd_manager_init_vi(type, dev); |
536 | if (!mqd) |
537 | return NULL; |
538 | if (type == KFD_MQD_TYPE_CP) |
539 | mqd->update_mqd = update_mqd_tonga; |
540 | return mqd; |
541 | } |
542 | |