1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
2 | /* |
3 | * Copyright 2016-2022 Advanced Micro Devices, Inc. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * |
23 | */ |
24 | |
25 | #include "kfd_kernel_queue.h" |
26 | #include "kfd_device_queue_manager.h" |
27 | #include "kfd_pm4_headers_ai.h" |
28 | #include "kfd_pm4_headers_aldebaran.h" |
29 | #include "kfd_pm4_opcodes.h" |
30 | #include "gc/gc_10_1_0_sh_mask.h" |
31 | |
32 | static int pm_map_process_v9(struct packet_manager *pm, |
33 | uint32_t *buffer, struct qcm_process_device *qpd) |
34 | { |
35 | struct pm4_mes_map_process *packet; |
36 | uint64_t vm_page_table_base_addr = qpd->page_table_base; |
37 | struct kfd_node *kfd = pm->dqm->dev; |
38 | struct kfd_process_device *pdd = |
39 | container_of(qpd, struct kfd_process_device, qpd); |
40 | |
41 | packet = (struct pm4_mes_map_process *)buffer; |
42 | memset(buffer, 0, sizeof(struct pm4_mes_map_process)); |
43 | packet->header.u32All = pm_build_pm4_header(opcode: IT_MAP_PROCESS, |
44 | packet_size: sizeof(struct pm4_mes_map_process)); |
45 | packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; |
46 | packet->bitfields2.process_quantum = 10; |
47 | packet->bitfields2.pasid = qpd->pqm->process->pasid; |
48 | packet->bitfields14.gds_size = qpd->gds_size & 0x3F; |
49 | packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF; |
50 | packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0; |
51 | packet->bitfields14.num_oac = qpd->num_oac; |
52 | packet->bitfields14.sdma_enable = 1; |
53 | packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; |
54 | |
55 | if (kfd->dqm->trap_debug_vmid && pdd->process->debug_trap_enabled && |
56 | pdd->process->runtime_info.runtime_state == DEBUG_RUNTIME_STATE_ENABLED) { |
57 | packet->bitfields2.debug_vmid = kfd->dqm->trap_debug_vmid; |
58 | packet->bitfields2.new_debug = 1; |
59 | } |
60 | |
61 | packet->sh_mem_config = qpd->sh_mem_config; |
62 | packet->sh_mem_bases = qpd->sh_mem_bases; |
63 | if (qpd->tba_addr) { |
64 | packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8); |
65 | /* On GFX9, unlike GFX10, bit TRAP_EN of SQ_SHADER_TBA_HI is |
66 | * not defined, so setting it won't do any harm. |
67 | */ |
68 | packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8) |
69 | | 1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT; |
70 | |
71 | packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8); |
72 | packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8); |
73 | } |
74 | |
75 | packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); |
76 | packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); |
77 | |
78 | packet->vm_context_page_table_base_addr_lo32 = |
79 | lower_32_bits(vm_page_table_base_addr); |
80 | packet->vm_context_page_table_base_addr_hi32 = |
81 | upper_32_bits(vm_page_table_base_addr); |
82 | |
83 | return 0; |
84 | } |
85 | |
86 | static int pm_map_process_aldebaran(struct packet_manager *pm, |
87 | uint32_t *buffer, struct qcm_process_device *qpd) |
88 | { |
89 | struct pm4_mes_map_process_aldebaran *packet; |
90 | uint64_t vm_page_table_base_addr = qpd->page_table_base; |
91 | struct kfd_dev *kfd = pm->dqm->dev->kfd; |
92 | struct kfd_process_device *pdd = |
93 | container_of(qpd, struct kfd_process_device, qpd); |
94 | int i; |
95 | |
96 | packet = (struct pm4_mes_map_process_aldebaran *)buffer; |
97 | memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran)); |
98 | packet->header.u32All = pm_build_pm4_header(opcode: IT_MAP_PROCESS, |
99 | packet_size: sizeof(struct pm4_mes_map_process_aldebaran)); |
100 | packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0; |
101 | packet->bitfields2.process_quantum = 10; |
102 | packet->bitfields2.pasid = qpd->pqm->process->pasid; |
103 | packet->bitfields14.gds_size = qpd->gds_size & 0x3F; |
104 | packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF; |
105 | packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0; |
106 | packet->bitfields14.num_oac = qpd->num_oac; |
107 | packet->bitfields14.sdma_enable = 1; |
108 | packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count; |
109 | packet->spi_gdbg_per_vmid_cntl = pdd->spi_dbg_override | |
110 | pdd->spi_dbg_launch_mode; |
111 | |
112 | if (pdd->process->debug_trap_enabled) { |
113 | for (i = 0; i < kfd->device_info.num_of_watch_points; i++) |
114 | packet->tcp_watch_cntl[i] = pdd->watch_points[i]; |
115 | |
116 | packet->bitfields2.single_memops = |
117 | !!(pdd->process->dbg_flags & KFD_DBG_TRAP_FLAG_SINGLE_MEM_OP); |
118 | } |
119 | |
120 | packet->sh_mem_config = qpd->sh_mem_config; |
121 | packet->sh_mem_bases = qpd->sh_mem_bases; |
122 | if (qpd->tba_addr) { |
123 | packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8); |
124 | packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8); |
125 | packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8); |
126 | packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8); |
127 | } |
128 | |
129 | packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area); |
130 | packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area); |
131 | |
132 | packet->vm_context_page_table_base_addr_lo32 = |
133 | lower_32_bits(vm_page_table_base_addr); |
134 | packet->vm_context_page_table_base_addr_hi32 = |
135 | upper_32_bits(vm_page_table_base_addr); |
136 | |
137 | return 0; |
138 | } |
139 | |
140 | static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer, |
141 | uint64_t ib, size_t ib_size_in_dwords, bool chain) |
142 | { |
143 | struct pm4_mes_runlist *packet; |
144 | |
145 | int concurrent_proc_cnt = 0; |
146 | struct kfd_node *kfd = pm->dqm->dev; |
147 | |
148 | /* Determine the number of processes to map together to HW: |
149 | * it can not exceed the number of VMIDs available to the |
150 | * scheduler, and it is determined by the smaller of the number |
151 | * of processes in the runlist and kfd module parameter |
152 | * hws_max_conc_proc. |
153 | * Note: the arbitration between the number of VMIDs and |
154 | * hws_max_conc_proc has been done in |
155 | * kgd2kfd_device_init(). |
156 | */ |
157 | concurrent_proc_cnt = min(pm->dqm->processes_count, |
158 | kfd->max_proc_per_quantum); |
159 | |
160 | packet = (struct pm4_mes_runlist *)buffer; |
161 | |
162 | memset(buffer, 0, sizeof(struct pm4_mes_runlist)); |
163 | packet->header.u32All = pm_build_pm4_header(opcode: IT_RUN_LIST, |
164 | packet_size: sizeof(struct pm4_mes_runlist)); |
165 | |
166 | packet->bitfields4.ib_size = ib_size_in_dwords; |
167 | packet->bitfields4.chain = chain ? 1 : 0; |
168 | packet->bitfields4.offload_polling = 0; |
169 | packet->bitfields4.chained_runlist_idle_disable = chain ? 1 : 0; |
170 | packet->bitfields4.valid = 1; |
171 | packet->bitfields4.process_cnt = concurrent_proc_cnt; |
172 | packet->ordinal2 = lower_32_bits(ib); |
173 | packet->ib_base_hi = upper_32_bits(ib); |
174 | |
175 | return 0; |
176 | } |
177 | |
178 | static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer, |
179 | struct scheduling_resources *res) |
180 | { |
181 | struct pm4_mes_set_resources *packet; |
182 | |
183 | packet = (struct pm4_mes_set_resources *)buffer; |
184 | memset(buffer, 0, sizeof(struct pm4_mes_set_resources)); |
185 | |
186 | packet->header.u32All = pm_build_pm4_header(opcode: IT_SET_RESOURCES, |
187 | packet_size: sizeof(struct pm4_mes_set_resources)); |
188 | |
189 | packet->bitfields2.queue_type = |
190 | queue_type__mes_set_resources__hsa_interface_queue_hiq; |
191 | packet->bitfields2.vmid_mask = res->vmid_mask; |
192 | packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100; |
193 | packet->bitfields7.oac_mask = res->oac_mask; |
194 | packet->bitfields8.gds_heap_base = res->gds_heap_base; |
195 | packet->bitfields8.gds_heap_size = res->gds_heap_size; |
196 | |
197 | packet->gws_mask_lo = lower_32_bits(res->gws_mask); |
198 | packet->gws_mask_hi = upper_32_bits(res->gws_mask); |
199 | |
200 | packet->queue_mask_lo = lower_32_bits(res->queue_mask); |
201 | packet->queue_mask_hi = upper_32_bits(res->queue_mask); |
202 | |
203 | return 0; |
204 | } |
205 | |
206 | static inline bool pm_use_ext_eng(struct kfd_dev *dev) |
207 | { |
208 | return amdgpu_ip_version(adev: dev->adev, ip: SDMA0_HWIP, inst: 0) >= |
209 | IP_VERSION(5, 2, 0); |
210 | } |
211 | |
212 | static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer, |
213 | struct queue *q, bool is_static) |
214 | { |
215 | struct pm4_mes_map_queues *packet; |
216 | bool use_static = is_static; |
217 | |
218 | packet = (struct pm4_mes_map_queues *)buffer; |
219 | memset(buffer, 0, sizeof(struct pm4_mes_map_queues)); |
220 | |
221 | packet->header.u32All = pm_build_pm4_header(opcode: IT_MAP_QUEUES, |
222 | packet_size: sizeof(struct pm4_mes_map_queues)); |
223 | packet->bitfields2.num_queues = 1; |
224 | packet->bitfields2.queue_sel = |
225 | queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi; |
226 | |
227 | packet->bitfields2.engine_sel = |
228 | engine_sel__mes_map_queues__compute_vi; |
229 | packet->bitfields2.gws_control_queue = q->gws ? 1 : 0; |
230 | packet->bitfields2.extended_engine_sel = |
231 | extended_engine_sel__mes_map_queues__legacy_engine_sel; |
232 | packet->bitfields2.queue_type = |
233 | queue_type__mes_map_queues__normal_compute_vi; |
234 | |
235 | switch (q->properties.type) { |
236 | case KFD_QUEUE_TYPE_COMPUTE: |
237 | if (use_static) |
238 | packet->bitfields2.queue_type = |
239 | queue_type__mes_map_queues__normal_latency_static_queue_vi; |
240 | break; |
241 | case KFD_QUEUE_TYPE_DIQ: |
242 | packet->bitfields2.queue_type = |
243 | queue_type__mes_map_queues__debug_interface_queue_vi; |
244 | break; |
245 | case KFD_QUEUE_TYPE_SDMA: |
246 | case KFD_QUEUE_TYPE_SDMA_XGMI: |
247 | use_static = false; /* no static queues under SDMA */ |
248 | if (q->properties.sdma_engine_id < 2 && |
249 | !pm_use_ext_eng(dev: q->device->kfd)) |
250 | packet->bitfields2.engine_sel = q->properties.sdma_engine_id + |
251 | engine_sel__mes_map_queues__sdma0_vi; |
252 | else { |
253 | /* |
254 | * For GFX9.4.3, SDMA engine id can be greater than 8. |
255 | * For such cases, set extended_engine_sel to 2 and |
256 | * ensure engine_sel lies between 0-7. |
257 | */ |
258 | if (q->properties.sdma_engine_id >= 8) |
259 | packet->bitfields2.extended_engine_sel = |
260 | extended_engine_sel__mes_map_queues__sdma8_to_15_sel; |
261 | else |
262 | packet->bitfields2.extended_engine_sel = |
263 | extended_engine_sel__mes_map_queues__sdma0_to_7_sel; |
264 | |
265 | packet->bitfields2.engine_sel = q->properties.sdma_engine_id % 8; |
266 | } |
267 | break; |
268 | default: |
269 | WARN(1, "queue type %d" , q->properties.type); |
270 | return -EINVAL; |
271 | } |
272 | packet->bitfields3.doorbell_offset = |
273 | q->properties.doorbell_off; |
274 | |
275 | packet->mqd_addr_lo = |
276 | lower_32_bits(q->gart_mqd_addr); |
277 | |
278 | packet->mqd_addr_hi = |
279 | upper_32_bits(q->gart_mqd_addr); |
280 | |
281 | packet->wptr_addr_lo = |
282 | lower_32_bits((uint64_t)q->properties.write_ptr); |
283 | |
284 | packet->wptr_addr_hi = |
285 | upper_32_bits((uint64_t)q->properties.write_ptr); |
286 | |
287 | return 0; |
288 | } |
289 | |
290 | static int pm_set_grace_period_v9(struct packet_manager *pm, |
291 | uint32_t *buffer, |
292 | uint32_t grace_period) |
293 | { |
294 | struct pm4_mec_write_data_mmio *packet; |
295 | uint32_t reg_offset = 0; |
296 | uint32_t reg_data = 0; |
297 | |
298 | pm->dqm->dev->kfd2kgd->build_grace_period_packet_info( |
299 | pm->dqm->dev->adev, |
300 | pm->dqm->wait_times, |
301 | grace_period, |
302 | ®_offset, |
303 | ®_data); |
304 | |
305 | if (grace_period == USE_DEFAULT_GRACE_PERIOD) |
306 | reg_data = pm->dqm->wait_times; |
307 | |
308 | packet = (struct pm4_mec_write_data_mmio *)buffer; |
309 | memset(buffer, 0, sizeof(struct pm4_mec_write_data_mmio)); |
310 | |
311 | packet->header.u32All = pm_build_pm4_header(opcode: IT_WRITE_DATA, |
312 | packet_size: sizeof(struct pm4_mec_write_data_mmio)); |
313 | |
314 | packet->bitfields2.dst_sel = dst_sel___write_data__mem_mapped_register; |
315 | packet->bitfields2.addr_incr = |
316 | addr_incr___write_data__do_not_increment_address; |
317 | |
318 | packet->bitfields3.dst_mmreg_addr = reg_offset; |
319 | |
320 | packet->data = reg_data; |
321 | |
322 | return 0; |
323 | } |
324 | |
325 | static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer, |
326 | enum kfd_unmap_queues_filter filter, |
327 | uint32_t filter_param, bool reset) |
328 | { |
329 | struct pm4_mes_unmap_queues *packet; |
330 | |
331 | packet = (struct pm4_mes_unmap_queues *)buffer; |
332 | memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues)); |
333 | |
334 | packet->header.u32All = pm_build_pm4_header(opcode: IT_UNMAP_QUEUES, |
335 | packet_size: sizeof(struct pm4_mes_unmap_queues)); |
336 | |
337 | packet->bitfields2.extended_engine_sel = |
338 | pm_use_ext_eng(dev: pm->dqm->dev->kfd) ? |
339 | extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel : |
340 | extended_engine_sel__mes_unmap_queues__legacy_engine_sel; |
341 | |
342 | packet->bitfields2.engine_sel = |
343 | engine_sel__mes_unmap_queues__compute; |
344 | |
345 | if (reset) |
346 | packet->bitfields2.action = |
347 | action__mes_unmap_queues__reset_queues; |
348 | else |
349 | packet->bitfields2.action = |
350 | action__mes_unmap_queues__preempt_queues; |
351 | |
352 | switch (filter) { |
353 | case KFD_UNMAP_QUEUES_FILTER_BY_PASID: |
354 | packet->bitfields2.queue_sel = |
355 | queue_sel__mes_unmap_queues__perform_request_on_pasid_queues; |
356 | packet->bitfields3a.pasid = filter_param; |
357 | break; |
358 | case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES: |
359 | packet->bitfields2.queue_sel = |
360 | queue_sel__mes_unmap_queues__unmap_all_queues; |
361 | break; |
362 | case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES: |
363 | /* in this case, we do not preempt static queues */ |
364 | packet->bitfields2.queue_sel = |
365 | queue_sel__mes_unmap_queues__unmap_all_non_static_queues; |
366 | break; |
367 | default: |
368 | WARN(1, "filter %d" , filter); |
369 | return -EINVAL; |
370 | } |
371 | |
372 | return 0; |
373 | |
374 | } |
375 | |
376 | static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer, |
377 | uint64_t fence_address, uint64_t fence_value) |
378 | { |
379 | struct pm4_mes_query_status *packet; |
380 | |
381 | packet = (struct pm4_mes_query_status *)buffer; |
382 | memset(buffer, 0, sizeof(struct pm4_mes_query_status)); |
383 | |
384 | |
385 | packet->header.u32All = pm_build_pm4_header(opcode: IT_QUERY_STATUS, |
386 | packet_size: sizeof(struct pm4_mes_query_status)); |
387 | |
388 | packet->bitfields2.context_id = 0; |
389 | packet->bitfields2.interrupt_sel = |
390 | interrupt_sel__mes_query_status__completion_status; |
391 | packet->bitfields2.command = |
392 | command__mes_query_status__fence_only_after_write_ack; |
393 | |
394 | packet->addr_hi = upper_32_bits((uint64_t)fence_address); |
395 | packet->addr_lo = lower_32_bits((uint64_t)fence_address); |
396 | packet->data_hi = upper_32_bits((uint64_t)fence_value); |
397 | packet->data_lo = lower_32_bits((uint64_t)fence_value); |
398 | |
399 | return 0; |
400 | } |
401 | |
402 | const struct packet_manager_funcs kfd_v9_pm_funcs = { |
403 | .map_process = pm_map_process_v9, |
404 | .runlist = pm_runlist_v9, |
405 | .set_resources = pm_set_resources_v9, |
406 | .map_queues = pm_map_queues_v9, |
407 | .unmap_queues = pm_unmap_queues_v9, |
408 | .set_grace_period = pm_set_grace_period_v9, |
409 | .query_status = pm_query_status_v9, |
410 | .release_mem = NULL, |
411 | .map_process_size = sizeof(struct pm4_mes_map_process), |
412 | .runlist_size = sizeof(struct pm4_mes_runlist), |
413 | .set_resources_size = sizeof(struct pm4_mes_set_resources), |
414 | .map_queues_size = sizeof(struct pm4_mes_map_queues), |
415 | .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), |
416 | .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio), |
417 | .query_status_size = sizeof(struct pm4_mes_query_status), |
418 | .release_mem_size = 0, |
419 | }; |
420 | |
421 | const struct packet_manager_funcs kfd_aldebaran_pm_funcs = { |
422 | .map_process = pm_map_process_aldebaran, |
423 | .runlist = pm_runlist_v9, |
424 | .set_resources = pm_set_resources_v9, |
425 | .map_queues = pm_map_queues_v9, |
426 | .unmap_queues = pm_unmap_queues_v9, |
427 | .set_grace_period = pm_set_grace_period_v9, |
428 | .query_status = pm_query_status_v9, |
429 | .release_mem = NULL, |
430 | .map_process_size = sizeof(struct pm4_mes_map_process_aldebaran), |
431 | .runlist_size = sizeof(struct pm4_mes_runlist), |
432 | .set_resources_size = sizeof(struct pm4_mes_set_resources), |
433 | .map_queues_size = sizeof(struct pm4_mes_map_queues), |
434 | .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues), |
435 | .set_grace_period_size = sizeof(struct pm4_mec_write_data_mmio), |
436 | .query_status_size = sizeof(struct pm4_mes_query_status), |
437 | .release_mem_size = 0, |
438 | }; |
439 | |