1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include <linux/firmware.h> |
25 | #include <linux/module.h> |
26 | #include <linux/pci.h> |
27 | |
28 | #include <drm/drm_cache.h> |
29 | #include "amdgpu.h" |
30 | #include "gmc_v6_0.h" |
31 | #include "amdgpu_ucode.h" |
32 | #include "amdgpu_gem.h" |
33 | |
34 | #include "bif/bif_3_0_d.h" |
35 | #include "bif/bif_3_0_sh_mask.h" |
36 | #include "oss/oss_1_0_d.h" |
37 | #include "oss/oss_1_0_sh_mask.h" |
38 | #include "gmc/gmc_6_0_d.h" |
39 | #include "gmc/gmc_6_0_sh_mask.h" |
40 | #include "dce/dce_6_0_d.h" |
41 | #include "dce/dce_6_0_sh_mask.h" |
42 | #include "si_enums.h" |
43 | |
44 | static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev); |
45 | static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev); |
46 | static int gmc_v6_0_wait_for_idle(void *handle); |
47 | |
48 | MODULE_FIRMWARE("amdgpu/tahiti_mc.bin" ); |
49 | MODULE_FIRMWARE("amdgpu/pitcairn_mc.bin" ); |
50 | MODULE_FIRMWARE("amdgpu/verde_mc.bin" ); |
51 | MODULE_FIRMWARE("amdgpu/oland_mc.bin" ); |
52 | MODULE_FIRMWARE("amdgpu/hainan_mc.bin" ); |
53 | MODULE_FIRMWARE("amdgpu/si58_mc.bin" ); |
54 | |
55 | #define MC_SEQ_MISC0__MT__MASK 0xf0000000 |
56 | #define MC_SEQ_MISC0__MT__GDDR1 0x10000000 |
57 | #define MC_SEQ_MISC0__MT__DDR2 0x20000000 |
58 | #define MC_SEQ_MISC0__MT__GDDR3 0x30000000 |
59 | #define MC_SEQ_MISC0__MT__GDDR4 0x40000000 |
60 | #define MC_SEQ_MISC0__MT__GDDR5 0x50000000 |
61 | #define MC_SEQ_MISC0__MT__HBM 0x60000000 |
62 | #define MC_SEQ_MISC0__MT__DDR3 0xB0000000 |
63 | |
64 | static void gmc_v6_0_mc_stop(struct amdgpu_device *adev) |
65 | { |
66 | u32 blackout; |
67 | |
68 | gmc_v6_0_wait_for_idle(handle: (void *)adev); |
69 | |
70 | blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); |
71 | if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { |
72 | /* Block CPU access */ |
73 | WREG32(mmBIF_FB_EN, 0); |
74 | /* blackout the MC */ |
75 | blackout = REG_SET_FIELD(blackout, |
76 | MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); |
77 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); |
78 | } |
79 | /* wait for the MC to settle */ |
80 | udelay(100); |
81 | |
82 | } |
83 | |
84 | static void gmc_v6_0_mc_resume(struct amdgpu_device *adev) |
85 | { |
86 | u32 tmp; |
87 | |
88 | /* unblackout the MC */ |
89 | tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL); |
90 | tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); |
91 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp); |
92 | /* allow CPU access */ |
93 | tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); |
94 | tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); |
95 | WREG32(mmBIF_FB_EN, tmp); |
96 | } |
97 | |
98 | static int gmc_v6_0_init_microcode(struct amdgpu_device *adev) |
99 | { |
100 | const char *chip_name; |
101 | char fw_name[30]; |
102 | int err; |
103 | bool is_58_fw = false; |
104 | |
105 | DRM_DEBUG("\n" ); |
106 | |
107 | switch (adev->asic_type) { |
108 | case CHIP_TAHITI: |
109 | chip_name = "tahiti" ; |
110 | break; |
111 | case CHIP_PITCAIRN: |
112 | chip_name = "pitcairn" ; |
113 | break; |
114 | case CHIP_VERDE: |
115 | chip_name = "verde" ; |
116 | break; |
117 | case CHIP_OLAND: |
118 | chip_name = "oland" ; |
119 | break; |
120 | case CHIP_HAINAN: |
121 | chip_name = "hainan" ; |
122 | break; |
123 | default: |
124 | BUG(); |
125 | } |
126 | |
127 | /* this memory configuration requires special firmware */ |
128 | if (((RREG32(mmMC_SEQ_MISC0) & 0xff000000) >> 24) == 0x58) |
129 | is_58_fw = true; |
130 | |
131 | if (is_58_fw) |
132 | snprintf(buf: fw_name, size: sizeof(fw_name), fmt: "amdgpu/si58_mc.bin" ); |
133 | else |
134 | snprintf(buf: fw_name, size: sizeof(fw_name), fmt: "amdgpu/%s_mc.bin" , chip_name); |
135 | err = amdgpu_ucode_request(adev, fw: &adev->gmc.fw, fw_name); |
136 | if (err) { |
137 | dev_err(adev->dev, |
138 | "si_mc: Failed to load firmware \"%s\"\n" , |
139 | fw_name); |
140 | amdgpu_ucode_release(fw: &adev->gmc.fw); |
141 | } |
142 | return err; |
143 | } |
144 | |
145 | static int gmc_v6_0_mc_load_microcode(struct amdgpu_device *adev) |
146 | { |
147 | const __le32 *new_fw_data = NULL; |
148 | u32 running; |
149 | const __le32 *new_io_mc_regs = NULL; |
150 | int i, regs_size, ucode_size; |
151 | const struct mc_firmware_header_v1_0 *hdr; |
152 | |
153 | if (!adev->gmc.fw) |
154 | return -EINVAL; |
155 | |
156 | hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; |
157 | |
158 | amdgpu_ucode_print_mc_hdr(hdr: &hdr->header); |
159 | |
160 | adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); |
161 | regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); |
162 | new_io_mc_regs = (const __le32 *) |
163 | (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); |
164 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; |
165 | new_fw_data = (const __le32 *) |
166 | (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
167 | |
168 | running = RREG32(mmMC_SEQ_SUP_CNTL) & MC_SEQ_SUP_CNTL__RUN_MASK; |
169 | |
170 | if (running == 0) { |
171 | |
172 | /* reset the engine and set to writable */ |
173 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); |
174 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); |
175 | |
176 | /* load mc io regs */ |
177 | for (i = 0; i < regs_size; i++) { |
178 | WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(new_io_mc_regs++)); |
179 | WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(new_io_mc_regs++)); |
180 | } |
181 | /* load the MC ucode */ |
182 | for (i = 0; i < ucode_size; i++) |
183 | WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(new_fw_data++)); |
184 | |
185 | /* put the engine back into the active state */ |
186 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); |
187 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); |
188 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); |
189 | |
190 | /* wait for training to complete */ |
191 | for (i = 0; i < adev->usec_timeout; i++) { |
192 | if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D0_MASK) |
193 | break; |
194 | udelay(1); |
195 | } |
196 | for (i = 0; i < adev->usec_timeout; i++) { |
197 | if (RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL) & MC_SEQ_TRAIN_WAKEUP_CNTL__TRAIN_DONE_D1_MASK) |
198 | break; |
199 | udelay(1); |
200 | } |
201 | |
202 | } |
203 | |
204 | return 0; |
205 | } |
206 | |
207 | static void gmc_v6_0_vram_gtt_location(struct amdgpu_device *adev, |
208 | struct amdgpu_gmc *mc) |
209 | { |
210 | u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; |
211 | |
212 | base <<= 24; |
213 | |
214 | amdgpu_gmc_set_agp_default(adev, mc); |
215 | amdgpu_gmc_vram_location(adev, mc, base); |
216 | amdgpu_gmc_gart_location(adev, mc, gart_placement: AMDGPU_GART_PLACEMENT_BEST_FIT); |
217 | } |
218 | |
219 | static void gmc_v6_0_mc_program(struct amdgpu_device *adev) |
220 | { |
221 | int i, j; |
222 | |
223 | /* Initialize HDP */ |
224 | for (i = 0, j = 0; i < 32; i++, j += 0x6) { |
225 | WREG32((0xb05 + j), 0x00000000); |
226 | WREG32((0xb06 + j), 0x00000000); |
227 | WREG32((0xb07 + j), 0x00000000); |
228 | WREG32((0xb08 + j), 0x00000000); |
229 | WREG32((0xb09 + j), 0x00000000); |
230 | } |
231 | WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); |
232 | |
233 | if (gmc_v6_0_wait_for_idle(handle: (void *)adev)) |
234 | dev_warn(adev->dev, "Wait for MC idle timedout !\n" ); |
235 | |
236 | if (adev->mode_info.num_crtc) { |
237 | u32 tmp; |
238 | |
239 | /* Lockout access through VGA aperture*/ |
240 | tmp = RREG32(mmVGA_HDP_CONTROL); |
241 | tmp |= VGA_HDP_CONTROL__VGA_MEMORY_DISABLE_MASK; |
242 | WREG32(mmVGA_HDP_CONTROL, tmp); |
243 | |
244 | /* disable VGA render */ |
245 | tmp = RREG32(mmVGA_RENDER_CONTROL); |
246 | tmp &= ~VGA_VSTATUS_CNTL; |
247 | WREG32(mmVGA_RENDER_CONTROL, tmp); |
248 | } |
249 | /* Update configuration */ |
250 | WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, |
251 | adev->gmc.vram_start >> 12); |
252 | WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
253 | adev->gmc.vram_end >> 12); |
254 | WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, |
255 | adev->mem_scratch.gpu_addr >> 12); |
256 | WREG32(mmMC_VM_AGP_BASE, 0); |
257 | WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22); |
258 | WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22); |
259 | |
260 | if (gmc_v6_0_wait_for_idle(handle: (void *)adev)) |
261 | dev_warn(adev->dev, "Wait for MC idle timedout !\n" ); |
262 | } |
263 | |
264 | static int gmc_v6_0_mc_init(struct amdgpu_device *adev) |
265 | { |
266 | |
267 | u32 tmp; |
268 | int chansize, numchan; |
269 | int r; |
270 | |
271 | tmp = RREG32(mmMC_ARB_RAMCFG); |
272 | if (tmp & (1 << 11)) |
273 | chansize = 16; |
274 | else if (tmp & MC_ARB_RAMCFG__CHANSIZE_MASK) |
275 | chansize = 64; |
276 | else |
277 | chansize = 32; |
278 | |
279 | tmp = RREG32(mmMC_SHARED_CHMAP); |
280 | switch ((tmp & MC_SHARED_CHMAP__NOOFCHAN_MASK) >> MC_SHARED_CHMAP__NOOFCHAN__SHIFT) { |
281 | case 0: |
282 | default: |
283 | numchan = 1; |
284 | break; |
285 | case 1: |
286 | numchan = 2; |
287 | break; |
288 | case 2: |
289 | numchan = 4; |
290 | break; |
291 | case 3: |
292 | numchan = 8; |
293 | break; |
294 | case 4: |
295 | numchan = 3; |
296 | break; |
297 | case 5: |
298 | numchan = 6; |
299 | break; |
300 | case 6: |
301 | numchan = 10; |
302 | break; |
303 | case 7: |
304 | numchan = 12; |
305 | break; |
306 | case 8: |
307 | numchan = 16; |
308 | break; |
309 | } |
310 | adev->gmc.vram_width = numchan * chansize; |
311 | /* size in MB on si */ |
312 | adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; |
313 | adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; |
314 | |
315 | if (!(adev->flags & AMD_IS_APU)) { |
316 | r = amdgpu_device_resize_fb_bar(adev); |
317 | if (r) |
318 | return r; |
319 | } |
320 | adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); |
321 | adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); |
322 | adev->gmc.visible_vram_size = adev->gmc.aper_size; |
323 | |
324 | /* set the gart size */ |
325 | if (amdgpu_gart_size == -1) { |
326 | switch (adev->asic_type) { |
327 | case CHIP_HAINAN: /* no MM engines */ |
328 | default: |
329 | adev->gmc.gart_size = 256ULL << 20; |
330 | break; |
331 | case CHIP_VERDE: /* UVD, VCE do not support GPUVM */ |
332 | case CHIP_TAHITI: /* UVD, VCE do not support GPUVM */ |
333 | case CHIP_PITCAIRN: /* UVD, VCE do not support GPUVM */ |
334 | case CHIP_OLAND: /* UVD, VCE do not support GPUVM */ |
335 | adev->gmc.gart_size = 1024ULL << 20; |
336 | break; |
337 | } |
338 | } else { |
339 | adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; |
340 | } |
341 | |
342 | adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; |
343 | gmc_v6_0_vram_gtt_location(adev, mc: &adev->gmc); |
344 | |
345 | return 0; |
346 | } |
347 | |
348 | static void gmc_v6_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, |
349 | uint32_t vmhub, uint32_t flush_type) |
350 | { |
351 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); |
352 | } |
353 | |
354 | static uint64_t gmc_v6_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, |
355 | unsigned int vmid, uint64_t pd_addr) |
356 | { |
357 | uint32_t reg; |
358 | |
359 | /* write new base address */ |
360 | if (vmid < 8) |
361 | reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid; |
362 | else |
363 | reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (vmid - 8); |
364 | amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12); |
365 | |
366 | /* bits 0-15 are the VM contexts0-15 */ |
367 | amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid); |
368 | |
369 | return pd_addr; |
370 | } |
371 | |
372 | static void gmc_v6_0_get_vm_pde(struct amdgpu_device *adev, int level, |
373 | uint64_t *addr, uint64_t *flags) |
374 | { |
375 | BUG_ON(*addr & 0xFFFFFF0000000FFFULL); |
376 | } |
377 | |
378 | static void gmc_v6_0_get_vm_pte(struct amdgpu_device *adev, |
379 | struct amdgpu_bo_va_mapping *mapping, |
380 | uint64_t *flags) |
381 | { |
382 | *flags &= ~AMDGPU_PTE_EXECUTABLE; |
383 | *flags &= ~AMDGPU_PTE_PRT; |
384 | } |
385 | |
386 | static void gmc_v6_0_set_fault_enable_default(struct amdgpu_device *adev, |
387 | bool value) |
388 | { |
389 | u32 tmp; |
390 | |
391 | tmp = RREG32(mmVM_CONTEXT1_CNTL); |
392 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, |
393 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
394 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, |
395 | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
396 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, |
397 | PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
398 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, |
399 | VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
400 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, |
401 | READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
402 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, |
403 | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
404 | WREG32(mmVM_CONTEXT1_CNTL, tmp); |
405 | } |
406 | |
407 | /** |
408 | * gmc_v8_0_set_prt() - set PRT VM fault |
409 | * |
410 | * @adev: amdgpu_device pointer |
411 | * @enable: enable/disable VM fault handling for PRT |
412 | */ |
413 | static void gmc_v6_0_set_prt(struct amdgpu_device *adev, bool enable) |
414 | { |
415 | u32 tmp; |
416 | |
417 | if (enable && !adev->gmc.prt_warning) { |
418 | dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n" ); |
419 | adev->gmc.prt_warning = true; |
420 | } |
421 | |
422 | tmp = RREG32(mmVM_PRT_CNTL); |
423 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, |
424 | CB_DISABLE_FAULT_ON_UNMAPPED_ACCESS, |
425 | enable); |
426 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, |
427 | TC_DISABLE_FAULT_ON_UNMAPPED_ACCESS, |
428 | enable); |
429 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, |
430 | L2_CACHE_STORE_INVALID_ENTRIES, |
431 | enable); |
432 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, |
433 | L1_TLB_STORE_INVALID_ENTRIES, |
434 | enable); |
435 | WREG32(mmVM_PRT_CNTL, tmp); |
436 | |
437 | if (enable) { |
438 | uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >> |
439 | AMDGPU_GPU_PAGE_SHIFT; |
440 | uint32_t high = adev->vm_manager.max_pfn - |
441 | (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT); |
442 | |
443 | WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); |
444 | WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); |
445 | WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low); |
446 | WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low); |
447 | WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high); |
448 | WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high); |
449 | WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high); |
450 | WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high); |
451 | } else { |
452 | WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff); |
453 | WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff); |
454 | WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff); |
455 | WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff); |
456 | WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0); |
457 | WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0); |
458 | WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0); |
459 | WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0); |
460 | } |
461 | } |
462 | |
463 | static int gmc_v6_0_gart_enable(struct amdgpu_device *adev) |
464 | { |
465 | uint64_t table_addr; |
466 | u32 field; |
467 | int i; |
468 | |
469 | if (adev->gart.bo == NULL) { |
470 | dev_err(adev->dev, "No VRAM object for PCIE GART.\n" ); |
471 | return -EINVAL; |
472 | } |
473 | amdgpu_gtt_mgr_recover(mgr: &adev->mman.gtt_mgr); |
474 | |
475 | table_addr = amdgpu_bo_gpu_offset(bo: adev->gart.bo); |
476 | |
477 | /* Setup TLB control */ |
478 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, |
479 | (0xA << 7) | |
480 | MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_TLB_MASK | |
481 | MC_VM_MX_L1_TLB_CNTL__ENABLE_L1_FRAGMENT_PROCESSING_MASK | |
482 | MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK | |
483 | MC_VM_MX_L1_TLB_CNTL__ENABLE_ADVANCED_DRIVER_MODEL_MASK | |
484 | (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT)); |
485 | /* Setup L2 cache */ |
486 | WREG32(mmVM_L2_CNTL, |
487 | VM_L2_CNTL__ENABLE_L2_CACHE_MASK | |
488 | VM_L2_CNTL__ENABLE_L2_FRAGMENT_PROCESSING_MASK | |
489 | VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK | |
490 | VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK | |
491 | (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) | |
492 | (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT)); |
493 | WREG32(mmVM_L2_CNTL2, |
494 | VM_L2_CNTL2__INVALIDATE_ALL_L1_TLBS_MASK | |
495 | VM_L2_CNTL2__INVALIDATE_L2_CACHE_MASK); |
496 | |
497 | field = adev->vm_manager.fragment_size; |
498 | WREG32(mmVM_L2_CNTL3, |
499 | VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK | |
500 | (field << VM_L2_CNTL3__BANK_SELECT__SHIFT) | |
501 | (field << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); |
502 | /* setup context0 */ |
503 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12); |
504 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12); |
505 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12); |
506 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, |
507 | (u32)(adev->dummy_page_addr >> 12)); |
508 | WREG32(mmVM_CONTEXT0_CNTL2, 0); |
509 | WREG32(mmVM_CONTEXT0_CNTL, |
510 | VM_CONTEXT0_CNTL__ENABLE_CONTEXT_MASK | |
511 | (0UL << VM_CONTEXT0_CNTL__PAGE_TABLE_DEPTH__SHIFT) | |
512 | VM_CONTEXT0_CNTL__RANGE_PROTECTION_FAULT_ENABLE_DEFAULT_MASK); |
513 | |
514 | WREG32(0x575, 0); |
515 | WREG32(0x576, 0); |
516 | WREG32(0x577, 0); |
517 | |
518 | /* empty context1-15 */ |
519 | /* set vm size, must be a multiple of 4 */ |
520 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); |
521 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); |
522 | /* Assign the pt base to something valid for now; the pts used for |
523 | * the VMs are determined by the application and setup and assigned |
524 | * on the fly in the vm part of radeon_gart.c |
525 | */ |
526 | for (i = 1; i < AMDGPU_NUM_VMID; i++) { |
527 | if (i < 8) |
528 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, |
529 | table_addr >> 12); |
530 | else |
531 | WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, |
532 | table_addr >> 12); |
533 | } |
534 | |
535 | /* enable context1-15 */ |
536 | WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, |
537 | (u32)(adev->dummy_page_addr >> 12)); |
538 | WREG32(mmVM_CONTEXT1_CNTL2, 4); |
539 | WREG32(mmVM_CONTEXT1_CNTL, |
540 | VM_CONTEXT1_CNTL__ENABLE_CONTEXT_MASK | |
541 | (1UL << VM_CONTEXT1_CNTL__PAGE_TABLE_DEPTH__SHIFT) | |
542 | ((adev->vm_manager.block_size - 9) |
543 | << VM_CONTEXT1_CNTL__PAGE_TABLE_BLOCK_SIZE__SHIFT)); |
544 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) |
545 | gmc_v6_0_set_fault_enable_default(adev, value: false); |
546 | else |
547 | gmc_v6_0_set_fault_enable_default(adev, value: true); |
548 | |
549 | gmc_v6_0_flush_gpu_tlb(adev, vmid: 0, vmhub: 0, flush_type: 0); |
550 | dev_info(adev->dev, "PCIE GART of %uM enabled (table at 0x%016llX).\n" , |
551 | (unsigned int)(adev->gmc.gart_size >> 20), |
552 | (unsigned long long)table_addr); |
553 | return 0; |
554 | } |
555 | |
556 | static int gmc_v6_0_gart_init(struct amdgpu_device *adev) |
557 | { |
558 | int r; |
559 | |
560 | if (adev->gart.bo) { |
561 | dev_warn(adev->dev, "gmc_v6_0 PCIE GART already initialized\n" ); |
562 | return 0; |
563 | } |
564 | r = amdgpu_gart_init(adev); |
565 | if (r) |
566 | return r; |
567 | adev->gart.table_size = adev->gart.num_gpu_pages * 8; |
568 | adev->gart.gart_pte_flags = 0; |
569 | return amdgpu_gart_table_vram_alloc(adev); |
570 | } |
571 | |
572 | static void gmc_v6_0_gart_disable(struct amdgpu_device *adev) |
573 | { |
574 | /*unsigned i; |
575 | |
576 | for (i = 1; i < 16; ++i) { |
577 | uint32_t reg; |
578 | if (i < 8) |
579 | reg = VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i ; |
580 | else |
581 | reg = VM_CONTEXT8_PAGE_TABLE_BASE_ADDR + (i - 8); |
582 | adev->vm_manager.saved_table_addr[i] = RREG32(reg); |
583 | }*/ |
584 | |
585 | /* Disable all tables */ |
586 | WREG32(mmVM_CONTEXT0_CNTL, 0); |
587 | WREG32(mmVM_CONTEXT1_CNTL, 0); |
588 | /* Setup TLB control */ |
589 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, |
590 | MC_VM_MX_L1_TLB_CNTL__SYSTEM_ACCESS_MODE_MASK | |
591 | (0UL << MC_VM_MX_L1_TLB_CNTL__SYSTEM_APERTURE_UNMAPPED_ACCESS__SHIFT)); |
592 | /* Setup L2 cache */ |
593 | WREG32(mmVM_L2_CNTL, |
594 | VM_L2_CNTL__ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE_MASK | |
595 | VM_L2_CNTL__ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE_MASK | |
596 | (7UL << VM_L2_CNTL__EFFECTIVE_L2_QUEUE_SIZE__SHIFT) | |
597 | (1UL << VM_L2_CNTL__CONTEXT1_IDENTITY_ACCESS_MODE__SHIFT)); |
598 | WREG32(mmVM_L2_CNTL2, 0); |
599 | WREG32(mmVM_L2_CNTL3, |
600 | VM_L2_CNTL3__L2_CACHE_BIGK_ASSOCIATIVITY_MASK | |
601 | (0UL << VM_L2_CNTL3__L2_CACHE_BIGK_FRAGMENT_SIZE__SHIFT)); |
602 | } |
603 | |
604 | static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev, |
605 | u32 status, u32 addr, u32 mc_client) |
606 | { |
607 | u32 mc_id; |
608 | u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); |
609 | u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, |
610 | PROTECTIONS); |
611 | char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, |
612 | (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; |
613 | |
614 | mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, |
615 | MEMORY_CLIENT_ID); |
616 | |
617 | dev_err(adev->dev, "VM fault (0x%02x, vmid %d) at page %u, %s from '%s' (0x%08x) (%d)\n" , |
618 | protections, vmid, addr, |
619 | REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, |
620 | MEMORY_CLIENT_RW) ? |
621 | "write" : "read" , block, mc_client, mc_id); |
622 | } |
623 | |
624 | /* |
625 | static const u32 mc_cg_registers[] = { |
626 | MC_HUB_MISC_HUB_CG, |
627 | MC_HUB_MISC_SIP_CG, |
628 | MC_HUB_MISC_VM_CG, |
629 | MC_XPB_CLK_GAT, |
630 | ATC_MISC_CG, |
631 | MC_CITF_MISC_WR_CG, |
632 | MC_CITF_MISC_RD_CG, |
633 | MC_CITF_MISC_VM_CG, |
634 | VM_L2_CG, |
635 | }; |
636 | |
637 | static const u32 mc_cg_ls_en[] = { |
638 | MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK, |
639 | MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK, |
640 | MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK, |
641 | MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK, |
642 | ATC_MISC_CG__MEM_LS_ENABLE_MASK, |
643 | MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK, |
644 | MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK, |
645 | MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK, |
646 | VM_L2_CG__MEM_LS_ENABLE_MASK, |
647 | }; |
648 | |
649 | static const u32 mc_cg_en[] = { |
650 | MC_HUB_MISC_HUB_CG__ENABLE_MASK, |
651 | MC_HUB_MISC_SIP_CG__ENABLE_MASK, |
652 | MC_HUB_MISC_VM_CG__ENABLE_MASK, |
653 | MC_XPB_CLK_GAT__ENABLE_MASK, |
654 | ATC_MISC_CG__ENABLE_MASK, |
655 | MC_CITF_MISC_WR_CG__ENABLE_MASK, |
656 | MC_CITF_MISC_RD_CG__ENABLE_MASK, |
657 | MC_CITF_MISC_VM_CG__ENABLE_MASK, |
658 | VM_L2_CG__ENABLE_MASK, |
659 | }; |
660 | |
661 | static void gmc_v6_0_enable_mc_ls(struct amdgpu_device *adev, |
662 | bool enable) |
663 | { |
664 | int i; |
665 | u32 orig, data; |
666 | |
667 | for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { |
668 | orig = data = RREG32(mc_cg_registers[i]); |
669 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS)) |
670 | data |= mc_cg_ls_en[i]; |
671 | else |
672 | data &= ~mc_cg_ls_en[i]; |
673 | if (data != orig) |
674 | WREG32(mc_cg_registers[i], data); |
675 | } |
676 | } |
677 | |
678 | static void gmc_v6_0_enable_mc_mgcg(struct amdgpu_device *adev, |
679 | bool enable) |
680 | { |
681 | int i; |
682 | u32 orig, data; |
683 | |
684 | for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { |
685 | orig = data = RREG32(mc_cg_registers[i]); |
686 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG)) |
687 | data |= mc_cg_en[i]; |
688 | else |
689 | data &= ~mc_cg_en[i]; |
690 | if (data != orig) |
691 | WREG32(mc_cg_registers[i], data); |
692 | } |
693 | } |
694 | |
695 | static void gmc_v6_0_enable_bif_mgls(struct amdgpu_device *adev, |
696 | bool enable) |
697 | { |
698 | u32 orig, data; |
699 | |
700 | orig = data = RREG32_PCIE(ixPCIE_CNTL2); |
701 | |
702 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) { |
703 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); |
704 | data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); |
705 | data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); |
706 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1); |
707 | } else { |
708 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0); |
709 | data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0); |
710 | data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0); |
711 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0); |
712 | } |
713 | |
714 | if (orig != data) |
715 | WREG32_PCIE(ixPCIE_CNTL2, data); |
716 | } |
717 | |
718 | static void gmc_v6_0_enable_hdp_mgcg(struct amdgpu_device *adev, |
719 | bool enable) |
720 | { |
721 | u32 orig, data; |
722 | |
723 | orig = data = RREG32(mmHDP_HOST_PATH_CNTL); |
724 | |
725 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG)) |
726 | data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); |
727 | else |
728 | data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); |
729 | |
730 | if (orig != data) |
731 | WREG32(mmHDP_HOST_PATH_CNTL, data); |
732 | } |
733 | |
734 | static void gmc_v6_0_enable_hdp_ls(struct amdgpu_device *adev, |
735 | bool enable) |
736 | { |
737 | u32 orig, data; |
738 | |
739 | orig = data = RREG32(mmHDP_MEM_POWER_LS); |
740 | |
741 | if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS)) |
742 | data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); |
743 | else |
744 | data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); |
745 | |
746 | if (orig != data) |
747 | WREG32(mmHDP_MEM_POWER_LS, data); |
748 | } |
749 | */ |
750 | |
751 | static int gmc_v6_0_convert_vram_type(int mc_seq_vram_type) |
752 | { |
753 | switch (mc_seq_vram_type) { |
754 | case MC_SEQ_MISC0__MT__GDDR1: |
755 | return AMDGPU_VRAM_TYPE_GDDR1; |
756 | case MC_SEQ_MISC0__MT__DDR2: |
757 | return AMDGPU_VRAM_TYPE_DDR2; |
758 | case MC_SEQ_MISC0__MT__GDDR3: |
759 | return AMDGPU_VRAM_TYPE_GDDR3; |
760 | case MC_SEQ_MISC0__MT__GDDR4: |
761 | return AMDGPU_VRAM_TYPE_GDDR4; |
762 | case MC_SEQ_MISC0__MT__GDDR5: |
763 | return AMDGPU_VRAM_TYPE_GDDR5; |
764 | case MC_SEQ_MISC0__MT__DDR3: |
765 | return AMDGPU_VRAM_TYPE_DDR3; |
766 | default: |
767 | return AMDGPU_VRAM_TYPE_UNKNOWN; |
768 | } |
769 | } |
770 | |
771 | static int gmc_v6_0_early_init(void *handle) |
772 | { |
773 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
774 | |
775 | gmc_v6_0_set_gmc_funcs(adev); |
776 | gmc_v6_0_set_irq_funcs(adev); |
777 | |
778 | return 0; |
779 | } |
780 | |
781 | static int gmc_v6_0_late_init(void *handle) |
782 | { |
783 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
784 | |
785 | if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) |
786 | return amdgpu_irq_get(adev, src: &adev->gmc.vm_fault, type: 0); |
787 | else |
788 | return 0; |
789 | } |
790 | |
791 | static unsigned int gmc_v6_0_get_vbios_fb_size(struct amdgpu_device *adev) |
792 | { |
793 | u32 d1vga_control = RREG32(mmD1VGA_CONTROL); |
794 | unsigned int size; |
795 | |
796 | if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { |
797 | size = AMDGPU_VBIOS_VGA_ALLOCATION; |
798 | } else { |
799 | u32 viewport = RREG32(mmVIEWPORT_SIZE); |
800 | |
801 | size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * |
802 | REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * |
803 | 4); |
804 | } |
805 | return size; |
806 | } |
807 | |
808 | static int gmc_v6_0_sw_init(void *handle) |
809 | { |
810 | int r; |
811 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
812 | |
813 | set_bit(AMDGPU_GFXHUB(0), addr: adev->vmhubs_mask); |
814 | |
815 | if (adev->flags & AMD_IS_APU) { |
816 | adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; |
817 | } else { |
818 | u32 tmp = RREG32(mmMC_SEQ_MISC0); |
819 | |
820 | tmp &= MC_SEQ_MISC0__MT__MASK; |
821 | adev->gmc.vram_type = gmc_v6_0_convert_vram_type(mc_seq_vram_type: tmp); |
822 | } |
823 | |
824 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, src_id: 146, source: &adev->gmc.vm_fault); |
825 | if (r) |
826 | return r; |
827 | |
828 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, src_id: 147, source: &adev->gmc.vm_fault); |
829 | if (r) |
830 | return r; |
831 | |
832 | amdgpu_vm_adjust_size(adev, min_vm_size: 64, fragment_size_default: 9, max_level: 1, max_bits: 40); |
833 | |
834 | adev->gmc.mc_mask = 0xffffffffffULL; |
835 | |
836 | r = dma_set_mask_and_coherent(dev: adev->dev, DMA_BIT_MASK(40)); |
837 | if (r) { |
838 | dev_warn(adev->dev, "No suitable DMA available.\n" ); |
839 | return r; |
840 | } |
841 | adev->need_swiotlb = drm_need_swiotlb(dma_bits: 40); |
842 | |
843 | r = gmc_v6_0_init_microcode(adev); |
844 | if (r) { |
845 | dev_err(adev->dev, "Failed to load mc firmware!\n" ); |
846 | return r; |
847 | } |
848 | |
849 | r = gmc_v6_0_mc_init(adev); |
850 | if (r) |
851 | return r; |
852 | |
853 | amdgpu_gmc_get_vbios_allocations(adev); |
854 | |
855 | r = amdgpu_bo_init(adev); |
856 | if (r) |
857 | return r; |
858 | |
859 | r = gmc_v6_0_gart_init(adev); |
860 | if (r) |
861 | return r; |
862 | |
863 | /* |
864 | * number of VMs |
865 | * VMID 0 is reserved for System |
866 | * amdgpu graphics/compute will use VMIDs 1-7 |
867 | * amdkfd will use VMIDs 8-15 |
868 | */ |
869 | adev->vm_manager.first_kfd_vmid = 8; |
870 | amdgpu_vm_manager_init(adev); |
871 | |
872 | /* base offset of vram pages */ |
873 | if (adev->flags & AMD_IS_APU) { |
874 | u64 tmp = RREG32(mmMC_VM_FB_OFFSET); |
875 | |
876 | tmp <<= 22; |
877 | adev->vm_manager.vram_base_offset = tmp; |
878 | } else { |
879 | adev->vm_manager.vram_base_offset = 0; |
880 | } |
881 | |
882 | return 0; |
883 | } |
884 | |
885 | static int gmc_v6_0_sw_fini(void *handle) |
886 | { |
887 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
888 | |
889 | amdgpu_gem_force_release(adev); |
890 | amdgpu_vm_manager_fini(adev); |
891 | amdgpu_gart_table_vram_free(adev); |
892 | amdgpu_bo_fini(adev); |
893 | amdgpu_ucode_release(fw: &adev->gmc.fw); |
894 | |
895 | return 0; |
896 | } |
897 | |
898 | static int gmc_v6_0_hw_init(void *handle) |
899 | { |
900 | int r; |
901 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
902 | |
903 | gmc_v6_0_mc_program(adev); |
904 | |
905 | if (!(adev->flags & AMD_IS_APU)) { |
906 | r = gmc_v6_0_mc_load_microcode(adev); |
907 | if (r) { |
908 | dev_err(adev->dev, "Failed to load MC firmware!\n" ); |
909 | return r; |
910 | } |
911 | } |
912 | |
913 | r = gmc_v6_0_gart_enable(adev); |
914 | if (r) |
915 | return r; |
916 | |
917 | if (amdgpu_emu_mode == 1) |
918 | return amdgpu_gmc_vram_checking(adev); |
919 | |
920 | return 0; |
921 | } |
922 | |
923 | static int gmc_v6_0_hw_fini(void *handle) |
924 | { |
925 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
926 | |
927 | amdgpu_irq_put(adev, src: &adev->gmc.vm_fault, type: 0); |
928 | gmc_v6_0_gart_disable(adev); |
929 | |
930 | return 0; |
931 | } |
932 | |
933 | static int gmc_v6_0_suspend(void *handle) |
934 | { |
935 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
936 | |
937 | gmc_v6_0_hw_fini(handle: adev); |
938 | |
939 | return 0; |
940 | } |
941 | |
942 | static int gmc_v6_0_resume(void *handle) |
943 | { |
944 | int r; |
945 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
946 | |
947 | r = gmc_v6_0_hw_init(handle: adev); |
948 | if (r) |
949 | return r; |
950 | |
951 | amdgpu_vmid_reset_all(adev); |
952 | |
953 | return 0; |
954 | } |
955 | |
956 | static bool gmc_v6_0_is_idle(void *handle) |
957 | { |
958 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
959 | u32 tmp = RREG32(mmSRBM_STATUS); |
960 | |
961 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | |
962 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK)) |
963 | return false; |
964 | |
965 | return true; |
966 | } |
967 | |
968 | static int gmc_v6_0_wait_for_idle(void *handle) |
969 | { |
970 | unsigned int i; |
971 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
972 | |
973 | for (i = 0; i < adev->usec_timeout; i++) { |
974 | if (gmc_v6_0_is_idle(handle)) |
975 | return 0; |
976 | udelay(1); |
977 | } |
978 | return -ETIMEDOUT; |
979 | |
980 | } |
981 | |
982 | static int gmc_v6_0_soft_reset(void *handle) |
983 | { |
984 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
985 | u32 srbm_soft_reset = 0; |
986 | u32 tmp = RREG32(mmSRBM_STATUS); |
987 | |
988 | if (tmp & SRBM_STATUS__VMC_BUSY_MASK) |
989 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, |
990 | SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); |
991 | |
992 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | |
993 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { |
994 | if (!(adev->flags & AMD_IS_APU)) |
995 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, |
996 | SRBM_SOFT_RESET, SOFT_RESET_MC, 1); |
997 | } |
998 | |
999 | if (srbm_soft_reset) { |
1000 | gmc_v6_0_mc_stop(adev); |
1001 | if (gmc_v6_0_wait_for_idle(handle: adev)) |
1002 | dev_warn(adev->dev, "Wait for GMC idle timed out !\n" ); |
1003 | |
1004 | tmp = RREG32(mmSRBM_SOFT_RESET); |
1005 | tmp |= srbm_soft_reset; |
1006 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n" , tmp); |
1007 | WREG32(mmSRBM_SOFT_RESET, tmp); |
1008 | tmp = RREG32(mmSRBM_SOFT_RESET); |
1009 | |
1010 | udelay(50); |
1011 | |
1012 | tmp &= ~srbm_soft_reset; |
1013 | WREG32(mmSRBM_SOFT_RESET, tmp); |
1014 | tmp = RREG32(mmSRBM_SOFT_RESET); |
1015 | |
1016 | udelay(50); |
1017 | |
1018 | gmc_v6_0_mc_resume(adev); |
1019 | udelay(50); |
1020 | } |
1021 | |
1022 | return 0; |
1023 | } |
1024 | |
1025 | static int gmc_v6_0_vm_fault_interrupt_state(struct amdgpu_device *adev, |
1026 | struct amdgpu_irq_src *src, |
1027 | unsigned int type, |
1028 | enum amdgpu_interrupt_state state) |
1029 | { |
1030 | u32 tmp; |
1031 | u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
1032 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
1033 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
1034 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
1035 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
1036 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); |
1037 | |
1038 | switch (state) { |
1039 | case AMDGPU_IRQ_STATE_DISABLE: |
1040 | tmp = RREG32(mmVM_CONTEXT0_CNTL); |
1041 | tmp &= ~bits; |
1042 | WREG32(mmVM_CONTEXT0_CNTL, tmp); |
1043 | tmp = RREG32(mmVM_CONTEXT1_CNTL); |
1044 | tmp &= ~bits; |
1045 | WREG32(mmVM_CONTEXT1_CNTL, tmp); |
1046 | break; |
1047 | case AMDGPU_IRQ_STATE_ENABLE: |
1048 | tmp = RREG32(mmVM_CONTEXT0_CNTL); |
1049 | tmp |= bits; |
1050 | WREG32(mmVM_CONTEXT0_CNTL, tmp); |
1051 | tmp = RREG32(mmVM_CONTEXT1_CNTL); |
1052 | tmp |= bits; |
1053 | WREG32(mmVM_CONTEXT1_CNTL, tmp); |
1054 | break; |
1055 | default: |
1056 | break; |
1057 | } |
1058 | |
1059 | return 0; |
1060 | } |
1061 | |
1062 | static int gmc_v6_0_process_interrupt(struct amdgpu_device *adev, |
1063 | struct amdgpu_irq_src *source, |
1064 | struct amdgpu_iv_entry *entry) |
1065 | { |
1066 | u32 addr, status; |
1067 | |
1068 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); |
1069 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); |
1070 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); |
1071 | |
1072 | if (!addr && !status) |
1073 | return 0; |
1074 | |
1075 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST) |
1076 | gmc_v6_0_set_fault_enable_default(adev, value: false); |
1077 | |
1078 | if (printk_ratelimit()) { |
1079 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n" , |
1080 | entry->src_id, entry->src_data[0]); |
1081 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n" , |
1082 | addr); |
1083 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n" , |
1084 | status); |
1085 | gmc_v6_0_vm_decode_fault(adev, status, addr, mc_client: 0); |
1086 | } |
1087 | |
1088 | return 0; |
1089 | } |
1090 | |
1091 | static int gmc_v6_0_set_clockgating_state(void *handle, |
1092 | enum amd_clockgating_state state) |
1093 | { |
1094 | return 0; |
1095 | } |
1096 | |
1097 | static int gmc_v6_0_set_powergating_state(void *handle, |
1098 | enum amd_powergating_state state) |
1099 | { |
1100 | return 0; |
1101 | } |
1102 | |
1103 | static const struct amd_ip_funcs gmc_v6_0_ip_funcs = { |
1104 | .name = "gmc_v6_0" , |
1105 | .early_init = gmc_v6_0_early_init, |
1106 | .late_init = gmc_v6_0_late_init, |
1107 | .sw_init = gmc_v6_0_sw_init, |
1108 | .sw_fini = gmc_v6_0_sw_fini, |
1109 | .hw_init = gmc_v6_0_hw_init, |
1110 | .hw_fini = gmc_v6_0_hw_fini, |
1111 | .suspend = gmc_v6_0_suspend, |
1112 | .resume = gmc_v6_0_resume, |
1113 | .is_idle = gmc_v6_0_is_idle, |
1114 | .wait_for_idle = gmc_v6_0_wait_for_idle, |
1115 | .soft_reset = gmc_v6_0_soft_reset, |
1116 | .set_clockgating_state = gmc_v6_0_set_clockgating_state, |
1117 | .set_powergating_state = gmc_v6_0_set_powergating_state, |
1118 | }; |
1119 | |
1120 | static const struct amdgpu_gmc_funcs gmc_v6_0_gmc_funcs = { |
1121 | .flush_gpu_tlb = gmc_v6_0_flush_gpu_tlb, |
1122 | .emit_flush_gpu_tlb = gmc_v6_0_emit_flush_gpu_tlb, |
1123 | .set_prt = gmc_v6_0_set_prt, |
1124 | .get_vm_pde = gmc_v6_0_get_vm_pde, |
1125 | .get_vm_pte = gmc_v6_0_get_vm_pte, |
1126 | .get_vbios_fb_size = gmc_v6_0_get_vbios_fb_size, |
1127 | }; |
1128 | |
1129 | static const struct amdgpu_irq_src_funcs gmc_v6_0_irq_funcs = { |
1130 | .set = gmc_v6_0_vm_fault_interrupt_state, |
1131 | .process = gmc_v6_0_process_interrupt, |
1132 | }; |
1133 | |
1134 | static void gmc_v6_0_set_gmc_funcs(struct amdgpu_device *adev) |
1135 | { |
1136 | adev->gmc.gmc_funcs = &gmc_v6_0_gmc_funcs; |
1137 | } |
1138 | |
1139 | static void gmc_v6_0_set_irq_funcs(struct amdgpu_device *adev) |
1140 | { |
1141 | adev->gmc.vm_fault.num_types = 1; |
1142 | adev->gmc.vm_fault.funcs = &gmc_v6_0_irq_funcs; |
1143 | } |
1144 | |
1145 | const struct amdgpu_ip_block_version gmc_v6_0_ip_block = { |
1146 | .type = AMD_IP_BLOCK_TYPE_GMC, |
1147 | .major = 6, |
1148 | .minor = 0, |
1149 | .rev = 0, |
1150 | .funcs = &gmc_v6_0_ip_funcs, |
1151 | }; |
1152 | |