| 1 | /* |
| 2 | * Copyright 2021 Advanced Micro Devices, Inc. |
| 3 | * |
| 4 | * Permission is hereby granted, free of charge, to any person obtaining a |
| 5 | * copy of this software and associated documentation files (the "Software"), |
| 6 | * to deal in the Software without restriction, including without limitation |
| 7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
| 8 | * and/or sell copies of the Software, and to permit persons to whom the |
| 9 | * Software is furnished to do so, subject to the following conditions: |
| 10 | * |
| 11 | * The above copyright notice and this permission notice shall be included in |
| 12 | * all copies or substantial portions of the Software. |
| 13 | * |
| 14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
| 18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
| 19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
| 20 | * OTHER DEALINGS IN THE SOFTWARE. |
| 21 | * |
| 22 | */ |
| 23 | |
| 24 | #include <linux/printk.h> |
| 25 | #include <linux/slab.h> |
| 26 | #include <linux/uaccess.h> |
| 27 | #include "kfd_priv.h" |
| 28 | #include "kfd_mqd_manager.h" |
| 29 | #include "v11_structs.h" |
| 30 | #include "gc/gc_11_0_0_offset.h" |
| 31 | #include "gc/gc_11_0_0_sh_mask.h" |
| 32 | #include "amdgpu_amdkfd.h" |
| 33 | |
| 34 | static inline struct v11_compute_mqd *get_mqd(void *mqd) |
| 35 | { |
| 36 | return (struct v11_compute_mqd *)mqd; |
| 37 | } |
| 38 | |
| 39 | static inline struct v11_sdma_mqd *get_sdma_mqd(void *mqd) |
| 40 | { |
| 41 | return (struct v11_sdma_mqd *)mqd; |
| 42 | } |
| 43 | |
| 44 | static void update_cu_mask(struct mqd_manager *mm, void *mqd, |
| 45 | struct mqd_update_info *minfo) |
| 46 | { |
| 47 | struct v11_compute_mqd *m; |
| 48 | uint32_t se_mask[KFD_MAX_NUM_SE] = {0}; |
| 49 | bool has_wa_flag = minfo && (minfo->update_flag & (UPDATE_FLAG_DBG_WA_ENABLE | |
| 50 | UPDATE_FLAG_DBG_WA_DISABLE)); |
| 51 | |
| 52 | if (!minfo || !(has_wa_flag || minfo->cu_mask.ptr)) |
| 53 | return; |
| 54 | |
| 55 | m = get_mqd(mqd); |
| 56 | |
| 57 | if (has_wa_flag) { |
| 58 | uint32_t wa_mask = |
| 59 | (minfo->update_flag & UPDATE_FLAG_DBG_WA_ENABLE) ? 0xffff : 0xffffffff; |
| 60 | |
| 61 | m->compute_static_thread_mgmt_se0 = wa_mask; |
| 62 | m->compute_static_thread_mgmt_se1 = wa_mask; |
| 63 | m->compute_static_thread_mgmt_se2 = wa_mask; |
| 64 | m->compute_static_thread_mgmt_se3 = wa_mask; |
| 65 | m->compute_static_thread_mgmt_se4 = wa_mask; |
| 66 | m->compute_static_thread_mgmt_se5 = wa_mask; |
| 67 | m->compute_static_thread_mgmt_se6 = wa_mask; |
| 68 | m->compute_static_thread_mgmt_se7 = wa_mask; |
| 69 | |
| 70 | return; |
| 71 | } |
| 72 | |
| 73 | mqd_symmetrically_map_cu_mask(mm, |
| 74 | cu_mask: minfo->cu_mask.ptr, cu_mask_count: minfo->cu_mask.count, se_mask, inst: 0); |
| 75 | |
| 76 | m->compute_static_thread_mgmt_se0 = se_mask[0]; |
| 77 | m->compute_static_thread_mgmt_se1 = se_mask[1]; |
| 78 | m->compute_static_thread_mgmt_se2 = se_mask[2]; |
| 79 | m->compute_static_thread_mgmt_se3 = se_mask[3]; |
| 80 | m->compute_static_thread_mgmt_se4 = se_mask[4]; |
| 81 | m->compute_static_thread_mgmt_se5 = se_mask[5]; |
| 82 | m->compute_static_thread_mgmt_se6 = se_mask[6]; |
| 83 | m->compute_static_thread_mgmt_se7 = se_mask[7]; |
| 84 | |
| 85 | pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n" , |
| 86 | m->compute_static_thread_mgmt_se0, |
| 87 | m->compute_static_thread_mgmt_se1, |
| 88 | m->compute_static_thread_mgmt_se2, |
| 89 | m->compute_static_thread_mgmt_se3, |
| 90 | m->compute_static_thread_mgmt_se4, |
| 91 | m->compute_static_thread_mgmt_se5, |
| 92 | m->compute_static_thread_mgmt_se6, |
| 93 | m->compute_static_thread_mgmt_se7); |
| 94 | } |
| 95 | |
| 96 | static void set_priority(struct v11_compute_mqd *m, struct queue_properties *q) |
| 97 | { |
| 98 | m->cp_hqd_pipe_priority = pipe_priority_map[q->priority]; |
| 99 | m->cp_hqd_queue_priority = q->priority; |
| 100 | } |
| 101 | |
| 102 | static struct kfd_mem_obj *allocate_mqd(struct kfd_node *node, |
| 103 | struct queue_properties *q) |
| 104 | { |
| 105 | struct kfd_mem_obj *mqd_mem_obj; |
| 106 | int size; |
| 107 | |
| 108 | /* |
| 109 | * MES write to areas beyond MQD size. So allocate |
| 110 | * 1 PAGE_SIZE memory for MQD is MES is enabled. |
| 111 | */ |
| 112 | if (node->kfd->shared_resources.enable_mes) |
| 113 | size = PAGE_SIZE; |
| 114 | else |
| 115 | size = sizeof(struct v11_compute_mqd); |
| 116 | |
| 117 | if (kfd_gtt_sa_allocate(node, size, mem_obj: &mqd_mem_obj)) |
| 118 | return NULL; |
| 119 | |
| 120 | return mqd_mem_obj; |
| 121 | } |
| 122 | |
| 123 | static void init_mqd(struct mqd_manager *mm, void **mqd, |
| 124 | struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, |
| 125 | struct queue_properties *q) |
| 126 | { |
| 127 | uint64_t addr; |
| 128 | struct v11_compute_mqd *m; |
| 129 | int size; |
| 130 | uint32_t wa_mask = q->is_dbg_wa ? 0xffff : 0xffffffff; |
| 131 | |
| 132 | m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr; |
| 133 | addr = mqd_mem_obj->gpu_addr; |
| 134 | |
| 135 | if (mm->dev->kfd->shared_resources.enable_mes) |
| 136 | size = PAGE_SIZE; |
| 137 | else |
| 138 | size = sizeof(struct v11_compute_mqd); |
| 139 | |
| 140 | memset(m, 0, size); |
| 141 | |
| 142 | m->header = 0xC0310800; |
| 143 | m->compute_pipelinestat_enable = 1; |
| 144 | |
| 145 | m->compute_static_thread_mgmt_se0 = wa_mask; |
| 146 | m->compute_static_thread_mgmt_se1 = wa_mask; |
| 147 | m->compute_static_thread_mgmt_se2 = wa_mask; |
| 148 | m->compute_static_thread_mgmt_se3 = wa_mask; |
| 149 | m->compute_static_thread_mgmt_se4 = wa_mask; |
| 150 | m->compute_static_thread_mgmt_se5 = wa_mask; |
| 151 | m->compute_static_thread_mgmt_se6 = wa_mask; |
| 152 | m->compute_static_thread_mgmt_se7 = wa_mask; |
| 153 | |
| 154 | m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK | |
| 155 | 0x55 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT; |
| 156 | |
| 157 | m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT; |
| 158 | m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__UNORD_DISPATCH_MASK; |
| 159 | m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT; |
| 160 | |
| 161 | m->cp_mqd_base_addr_lo = lower_32_bits(addr); |
| 162 | m->cp_mqd_base_addr_hi = upper_32_bits(addr); |
| 163 | |
| 164 | m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT | |
| 165 | 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT | |
| 166 | 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT; |
| 167 | |
| 168 | /* Set cp_hqd_hq_scheduler0 bit 14 to 1 to have the CP set up the |
| 169 | * DISPATCH_PTR. This is required for the kfd debugger |
| 170 | */ |
| 171 | m->cp_hqd_hq_status0 = 1 << 14; |
| 172 | |
| 173 | /* |
| 174 | * GFX11 RS64 CPFW version >= 509 supports PCIe atomics support |
| 175 | * acknowledgment. |
| 176 | */ |
| 177 | if (amdgpu_amdkfd_have_atomics_support(adev: mm->dev->adev)) |
| 178 | m->cp_hqd_hq_status0 |= 1 << 29; |
| 179 | |
| 180 | if (q->format == KFD_QUEUE_FORMAT_AQL) { |
| 181 | m->cp_hqd_aql_control = |
| 182 | 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT; |
| 183 | } |
| 184 | |
| 185 | if (mm->dev->kfd->cwsr_enabled) { |
| 186 | m->cp_hqd_persistent_state |= |
| 187 | (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT); |
| 188 | m->cp_hqd_ctx_save_base_addr_lo = |
| 189 | lower_32_bits(q->ctx_save_restore_area_address); |
| 190 | m->cp_hqd_ctx_save_base_addr_hi = |
| 191 | upper_32_bits(q->ctx_save_restore_area_address); |
| 192 | m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size; |
| 193 | m->cp_hqd_cntl_stack_size = q->ctl_stack_size; |
| 194 | m->cp_hqd_cntl_stack_offset = q->ctl_stack_size; |
| 195 | m->cp_hqd_wg_state_offset = q->ctl_stack_size; |
| 196 | } |
| 197 | |
| 198 | *mqd = m; |
| 199 | if (gart_addr) |
| 200 | *gart_addr = addr; |
| 201 | mm->update_mqd(mm, m, q, NULL); |
| 202 | } |
| 203 | |
| 204 | static int load_mqd(struct mqd_manager *mm, void *mqd, |
| 205 | uint32_t pipe_id, uint32_t queue_id, |
| 206 | struct queue_properties *p, struct mm_struct *mms) |
| 207 | { |
| 208 | int r = 0; |
| 209 | /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */ |
| 210 | uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0); |
| 211 | |
| 212 | r = mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id, |
| 213 | (uint32_t __user *)p->write_ptr, |
| 214 | wptr_shift, 0, mms, 0); |
| 215 | return r; |
| 216 | } |
| 217 | |
| 218 | static void update_mqd(struct mqd_manager *mm, void *mqd, |
| 219 | struct queue_properties *q, |
| 220 | struct mqd_update_info *minfo) |
| 221 | { |
| 222 | struct v11_compute_mqd *m; |
| 223 | |
| 224 | m = get_mqd(mqd); |
| 225 | |
| 226 | m->cp_hqd_pq_control &= ~CP_HQD_PQ_CONTROL__QUEUE_SIZE_MASK; |
| 227 | m->cp_hqd_pq_control |= |
| 228 | ffs(q->queue_size / sizeof(unsigned int)) - 1 - 1; |
| 229 | pr_debug("cp_hqd_pq_control 0x%x\n" , m->cp_hqd_pq_control); |
| 230 | |
| 231 | m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8); |
| 232 | m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8); |
| 233 | |
| 234 | m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr); |
| 235 | m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr); |
| 236 | m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); |
| 237 | m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); |
| 238 | |
| 239 | m->cp_hqd_pq_doorbell_control = |
| 240 | q->doorbell_off << |
| 241 | CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; |
| 242 | pr_debug("cp_hqd_pq_doorbell_control 0x%x\n" , |
| 243 | m->cp_hqd_pq_doorbell_control); |
| 244 | |
| 245 | m->cp_hqd_ib_control = 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT; |
| 246 | |
| 247 | /* |
| 248 | * HW does not clamp this field correctly. Maximum EOP queue size |
| 249 | * is constrained by per-SE EOP done signal count, which is 8-bit. |
| 250 | * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit |
| 251 | * more than (EOP entry count - 1) so a queue size of 0x800 dwords |
| 252 | * is safe, giving a maximum field value of 0xA. |
| 253 | */ |
| 254 | m->cp_hqd_eop_control = min(0xA, |
| 255 | ffs(q->eop_ring_buffer_size / sizeof(unsigned int)) - 1 - 1); |
| 256 | m->cp_hqd_eop_base_addr_lo = |
| 257 | lower_32_bits(q->eop_ring_buffer_address >> 8); |
| 258 | m->cp_hqd_eop_base_addr_hi = |
| 259 | upper_32_bits(q->eop_ring_buffer_address >> 8); |
| 260 | |
| 261 | m->cp_hqd_iq_timer = 0; |
| 262 | |
| 263 | m->cp_hqd_vmid = q->vmid; |
| 264 | |
| 265 | if (q->format == KFD_QUEUE_FORMAT_AQL) { |
| 266 | /* GC 10 removed WPP_CLAMP from PQ Control */ |
| 267 | m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK | |
| 268 | 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT | |
| 269 | 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT ; |
| 270 | m->cp_hqd_pq_doorbell_control |= |
| 271 | 1 << CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT; |
| 272 | } |
| 273 | if (mm->dev->kfd->cwsr_enabled) |
| 274 | m->cp_hqd_ctx_save_control = 0; |
| 275 | |
| 276 | update_cu_mask(mm, mqd, minfo); |
| 277 | set_priority(m, q); |
| 278 | |
| 279 | q->is_active = QUEUE_IS_ACTIVE(*q); |
| 280 | } |
| 281 | |
| 282 | static bool check_preemption_failed(struct mqd_manager *mm, void *mqd) |
| 283 | { |
| 284 | struct v11_compute_mqd *m = (struct v11_compute_mqd *)mqd; |
| 285 | |
| 286 | return kfd_check_hiq_mqd_doorbell_id(node: mm->dev, doorbell_id: m->queue_doorbell_id0, inst: 0); |
| 287 | } |
| 288 | |
| 289 | static int get_wave_state(struct mqd_manager *mm, void *mqd, |
| 290 | struct queue_properties *q, |
| 291 | void __user *ctl_stack, |
| 292 | u32 *ctl_stack_used_size, |
| 293 | u32 *save_area_used_size) |
| 294 | { |
| 295 | struct v11_compute_mqd *m; |
| 296 | struct kfd_context_save_area_header ; |
| 297 | |
| 298 | m = get_mqd(mqd); |
| 299 | |
| 300 | /* Control stack is written backwards, while workgroup context data |
| 301 | * is written forwards. Both starts from m->cp_hqd_cntl_stack_size. |
| 302 | * Current position is at m->cp_hqd_cntl_stack_offset and |
| 303 | * m->cp_hqd_wg_state_offset, respectively. |
| 304 | */ |
| 305 | *ctl_stack_used_size = m->cp_hqd_cntl_stack_size - |
| 306 | m->cp_hqd_cntl_stack_offset; |
| 307 | *save_area_used_size = m->cp_hqd_wg_state_offset - |
| 308 | m->cp_hqd_cntl_stack_size; |
| 309 | |
| 310 | /* Control stack is not copied to user mode for GFXv11 because |
| 311 | * it's part of the context save area that is already |
| 312 | * accessible to user mode |
| 313 | */ |
| 314 | header.wave_state.control_stack_size = *ctl_stack_used_size; |
| 315 | header.wave_state.wave_state_size = *save_area_used_size; |
| 316 | |
| 317 | header.wave_state.wave_state_offset = m->cp_hqd_wg_state_offset; |
| 318 | header.wave_state.control_stack_offset = m->cp_hqd_cntl_stack_offset; |
| 319 | |
| 320 | if (copy_to_user(to: ctl_stack, from: &header, n: sizeof(header.wave_state))) |
| 321 | return -EFAULT; |
| 322 | |
| 323 | return 0; |
| 324 | } |
| 325 | |
| 326 | static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst) |
| 327 | { |
| 328 | struct v11_compute_mqd *m; |
| 329 | |
| 330 | m = get_mqd(mqd); |
| 331 | |
| 332 | memcpy(mqd_dst, m, sizeof(struct v11_compute_mqd)); |
| 333 | } |
| 334 | |
| 335 | static void restore_mqd(struct mqd_manager *mm, void **mqd, |
| 336 | struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, |
| 337 | struct queue_properties *qp, |
| 338 | const void *mqd_src, |
| 339 | const void *ctl_stack_src, const u32 ctl_stack_size) |
| 340 | { |
| 341 | uint64_t addr; |
| 342 | struct v11_compute_mqd *m; |
| 343 | |
| 344 | m = (struct v11_compute_mqd *) mqd_mem_obj->cpu_ptr; |
| 345 | addr = mqd_mem_obj->gpu_addr; |
| 346 | |
| 347 | memcpy(m, mqd_src, sizeof(*m)); |
| 348 | |
| 349 | *mqd = m; |
| 350 | if (gart_addr) |
| 351 | *gart_addr = addr; |
| 352 | |
| 353 | m->cp_hqd_pq_doorbell_control = |
| 354 | qp->doorbell_off << |
| 355 | CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; |
| 356 | pr_debug("cp_hqd_pq_doorbell_control 0x%x\n" , |
| 357 | m->cp_hqd_pq_doorbell_control); |
| 358 | |
| 359 | qp->is_active = 0; |
| 360 | } |
| 361 | |
| 362 | |
| 363 | static void init_mqd_hiq(struct mqd_manager *mm, void **mqd, |
| 364 | struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, |
| 365 | struct queue_properties *q) |
| 366 | { |
| 367 | struct v11_compute_mqd *m; |
| 368 | |
| 369 | init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q); |
| 370 | |
| 371 | m = get_mqd(mqd: *mqd); |
| 372 | |
| 373 | m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT | |
| 374 | 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT; |
| 375 | } |
| 376 | |
| 377 | static int destroy_hiq_mqd(struct mqd_manager *mm, void *mqd, |
| 378 | enum kfd_preempt_type type, unsigned int timeout, |
| 379 | uint32_t pipe_id, uint32_t queue_id) |
| 380 | { |
| 381 | int err; |
| 382 | struct v11_compute_mqd *m; |
| 383 | u32 doorbell_off; |
| 384 | |
| 385 | m = get_mqd(mqd); |
| 386 | |
| 387 | doorbell_off = m->cp_hqd_pq_doorbell_control >> |
| 388 | CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT; |
| 389 | |
| 390 | err = amdgpu_amdkfd_unmap_hiq(adev: mm->dev->adev, doorbell_off, inst: 0); |
| 391 | if (err) |
| 392 | pr_debug("Destroy HIQ MQD failed: %d\n" , err); |
| 393 | |
| 394 | return err; |
| 395 | } |
| 396 | |
| 397 | static void init_mqd_sdma(struct mqd_manager *mm, void **mqd, |
| 398 | struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr, |
| 399 | struct queue_properties *q) |
| 400 | { |
| 401 | struct v11_sdma_mqd *m; |
| 402 | int size; |
| 403 | |
| 404 | m = (struct v11_sdma_mqd *) mqd_mem_obj->cpu_ptr; |
| 405 | |
| 406 | if (mm->dev->kfd->shared_resources.enable_mes) |
| 407 | size = PAGE_SIZE; |
| 408 | else |
| 409 | size = sizeof(struct v11_sdma_mqd); |
| 410 | |
| 411 | memset(m, 0, size); |
| 412 | *mqd = m; |
| 413 | if (gart_addr) |
| 414 | *gart_addr = mqd_mem_obj->gpu_addr; |
| 415 | |
| 416 | mm->update_mqd(mm, m, q, NULL); |
| 417 | } |
| 418 | |
| 419 | #define SDMA_RLC_DUMMY_DEFAULT 0xf |
| 420 | |
| 421 | static void update_mqd_sdma(struct mqd_manager *mm, void *mqd, |
| 422 | struct queue_properties *q, |
| 423 | struct mqd_update_info *minfo) |
| 424 | { |
| 425 | struct v11_sdma_mqd *m; |
| 426 | |
| 427 | m = get_sdma_mqd(mqd); |
| 428 | m->sdmax_rlcx_rb_cntl = (ffs(q->queue_size / sizeof(unsigned int)) - 1) |
| 429 | << SDMA0_QUEUE0_RB_CNTL__RB_SIZE__SHIFT | |
| 430 | q->vmid << SDMA0_QUEUE0_RB_CNTL__RB_VMID__SHIFT | |
| 431 | 1 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT | |
| 432 | 6 << SDMA0_QUEUE0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT | |
| 433 | 1 << SDMA0_QUEUE0_RB_CNTL__F32_WPTR_POLL_ENABLE__SHIFT; |
| 434 | |
| 435 | m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8); |
| 436 | m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8); |
| 437 | m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr); |
| 438 | m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr); |
| 439 | m->sdmax_rlcx_rb_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr); |
| 440 | m->sdmax_rlcx_rb_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr); |
| 441 | m->sdmax_rlcx_doorbell_offset = |
| 442 | q->doorbell_off << SDMA0_QUEUE0_DOORBELL_OFFSET__OFFSET__SHIFT; |
| 443 | |
| 444 | m->sdmax_rlcx_sched_cntl = (amdgpu_sdma_phase_quantum |
| 445 | << SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM__SHIFT) |
| 446 | & SDMA0_QUEUE0_SCHEDULE_CNTL__CONTEXT_QUANTUM_MASK; |
| 447 | |
| 448 | m->sdma_engine_id = q->sdma_engine_id; |
| 449 | m->sdma_queue_id = q->sdma_queue_id; |
| 450 | m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT; |
| 451 | |
| 452 | q->is_active = QUEUE_IS_ACTIVE(*q); |
| 453 | } |
| 454 | |
| 455 | #if defined(CONFIG_DEBUG_FS) |
| 456 | |
| 457 | static int debugfs_show_mqd(struct seq_file *m, void *data) |
| 458 | { |
| 459 | seq_hex_dump(m, prefix_str: " " , prefix_type: DUMP_PREFIX_OFFSET, rowsize: 32, groupsize: 4, |
| 460 | buf: data, len: sizeof(struct v11_compute_mqd), ascii: false); |
| 461 | return 0; |
| 462 | } |
| 463 | |
| 464 | static int debugfs_show_mqd_sdma(struct seq_file *m, void *data) |
| 465 | { |
| 466 | seq_hex_dump(m, prefix_str: " " , prefix_type: DUMP_PREFIX_OFFSET, rowsize: 32, groupsize: 4, |
| 467 | buf: data, len: sizeof(struct v11_sdma_mqd), ascii: false); |
| 468 | return 0; |
| 469 | } |
| 470 | |
| 471 | #endif |
| 472 | |
| 473 | struct mqd_manager *mqd_manager_init_v11(enum KFD_MQD_TYPE type, |
| 474 | struct kfd_node *dev) |
| 475 | { |
| 476 | struct mqd_manager *mqd; |
| 477 | |
| 478 | if (WARN_ON(type >= KFD_MQD_TYPE_MAX)) |
| 479 | return NULL; |
| 480 | |
| 481 | mqd = kzalloc(sizeof(*mqd), GFP_KERNEL); |
| 482 | if (!mqd) |
| 483 | return NULL; |
| 484 | |
| 485 | mqd->dev = dev; |
| 486 | |
| 487 | switch (type) { |
| 488 | case KFD_MQD_TYPE_CP: |
| 489 | pr_debug("%s@%i\n" , __func__, __LINE__); |
| 490 | mqd->allocate_mqd = allocate_mqd; |
| 491 | mqd->init_mqd = init_mqd; |
| 492 | mqd->free_mqd = kfd_free_mqd_cp; |
| 493 | mqd->load_mqd = load_mqd; |
| 494 | mqd->update_mqd = update_mqd; |
| 495 | mqd->destroy_mqd = kfd_destroy_mqd_cp; |
| 496 | mqd->is_occupied = kfd_is_occupied_cp; |
| 497 | mqd->mqd_size = sizeof(struct v11_compute_mqd); |
| 498 | mqd->get_wave_state = get_wave_state; |
| 499 | mqd->mqd_stride = kfd_mqd_stride; |
| 500 | mqd->checkpoint_mqd = checkpoint_mqd; |
| 501 | mqd->restore_mqd = restore_mqd; |
| 502 | #if defined(CONFIG_DEBUG_FS) |
| 503 | mqd->debugfs_show_mqd = debugfs_show_mqd; |
| 504 | #endif |
| 505 | pr_debug("%s@%i\n" , __func__, __LINE__); |
| 506 | break; |
| 507 | case KFD_MQD_TYPE_HIQ: |
| 508 | pr_debug("%s@%i\n" , __func__, __LINE__); |
| 509 | mqd->allocate_mqd = allocate_hiq_mqd; |
| 510 | mqd->init_mqd = init_mqd_hiq; |
| 511 | mqd->free_mqd = free_mqd_hiq_sdma; |
| 512 | mqd->load_mqd = kfd_hiq_load_mqd_kiq; |
| 513 | mqd->update_mqd = update_mqd; |
| 514 | mqd->destroy_mqd = destroy_hiq_mqd; |
| 515 | mqd->is_occupied = kfd_is_occupied_cp; |
| 516 | mqd->mqd_size = sizeof(struct v11_compute_mqd); |
| 517 | mqd->mqd_stride = kfd_mqd_stride; |
| 518 | #if defined(CONFIG_DEBUG_FS) |
| 519 | mqd->debugfs_show_mqd = debugfs_show_mqd; |
| 520 | #endif |
| 521 | mqd->check_preemption_failed = check_preemption_failed; |
| 522 | pr_debug("%s@%i\n" , __func__, __LINE__); |
| 523 | break; |
| 524 | case KFD_MQD_TYPE_DIQ: |
| 525 | mqd->allocate_mqd = allocate_mqd; |
| 526 | mqd->init_mqd = init_mqd_hiq; |
| 527 | mqd->free_mqd = kfd_free_mqd_cp; |
| 528 | mqd->load_mqd = load_mqd; |
| 529 | mqd->update_mqd = update_mqd; |
| 530 | mqd->destroy_mqd = kfd_destroy_mqd_cp; |
| 531 | mqd->is_occupied = kfd_is_occupied_cp; |
| 532 | mqd->mqd_size = sizeof(struct v11_compute_mqd); |
| 533 | #if defined(CONFIG_DEBUG_FS) |
| 534 | mqd->debugfs_show_mqd = debugfs_show_mqd; |
| 535 | #endif |
| 536 | break; |
| 537 | case KFD_MQD_TYPE_SDMA: |
| 538 | pr_debug("%s@%i\n" , __func__, __LINE__); |
| 539 | mqd->allocate_mqd = allocate_sdma_mqd; |
| 540 | mqd->init_mqd = init_mqd_sdma; |
| 541 | mqd->free_mqd = free_mqd_hiq_sdma; |
| 542 | mqd->load_mqd = kfd_load_mqd_sdma; |
| 543 | mqd->update_mqd = update_mqd_sdma; |
| 544 | mqd->destroy_mqd = kfd_destroy_mqd_sdma; |
| 545 | mqd->is_occupied = kfd_is_occupied_sdma; |
| 546 | mqd->checkpoint_mqd = checkpoint_mqd; |
| 547 | mqd->restore_mqd = restore_mqd; |
| 548 | mqd->mqd_size = sizeof(struct v11_sdma_mqd); |
| 549 | mqd->mqd_stride = kfd_mqd_stride; |
| 550 | #if defined(CONFIG_DEBUG_FS) |
| 551 | mqd->debugfs_show_mqd = debugfs_show_mqd_sdma; |
| 552 | #endif |
| 553 | /* |
| 554 | * To allocate SDMA MQDs by generic functions |
| 555 | * when MES is enabled. |
| 556 | */ |
| 557 | if (dev->kfd->shared_resources.enable_mes) { |
| 558 | mqd->allocate_mqd = allocate_mqd; |
| 559 | mqd->free_mqd = kfd_free_mqd_cp; |
| 560 | } |
| 561 | pr_debug("%s@%i\n" , __func__, __LINE__); |
| 562 | break; |
| 563 | default: |
| 564 | kfree(objp: mqd); |
| 565 | return NULL; |
| 566 | } |
| 567 | |
| 568 | return mqd; |
| 569 | } |
| 570 | |