1 | /* |
2 | * Copyright 2014 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include <linux/firmware.h> |
25 | #include <linux/module.h> |
26 | #include <linux/pci.h> |
27 | |
28 | #include <drm/drm_cache.h> |
29 | #include "amdgpu.h" |
30 | #include "cikd.h" |
31 | #include "cik.h" |
32 | #include "gmc_v7_0.h" |
33 | #include "amdgpu_ucode.h" |
34 | #include "amdgpu_amdkfd.h" |
35 | #include "amdgpu_gem.h" |
36 | |
37 | #include "bif/bif_4_1_d.h" |
38 | #include "bif/bif_4_1_sh_mask.h" |
39 | |
40 | #include "gmc/gmc_7_1_d.h" |
41 | #include "gmc/gmc_7_1_sh_mask.h" |
42 | |
43 | #include "oss/oss_2_0_d.h" |
44 | #include "oss/oss_2_0_sh_mask.h" |
45 | |
46 | #include "dce/dce_8_0_d.h" |
47 | #include "dce/dce_8_0_sh_mask.h" |
48 | |
49 | #include "amdgpu_atombios.h" |
50 | |
51 | #include "ivsrcid/ivsrcid_vislands30.h" |
52 | |
53 | static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev); |
54 | static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev); |
55 | static int gmc_v7_0_wait_for_idle(void *handle); |
56 | |
57 | MODULE_FIRMWARE("amdgpu/bonaire_mc.bin" ); |
58 | MODULE_FIRMWARE("amdgpu/hawaii_mc.bin" ); |
59 | MODULE_FIRMWARE("amdgpu/topaz_mc.bin" ); |
60 | |
61 | static const u32 golden_settings_iceland_a11[] = { |
62 | mmVM_PRT_APERTURE0_LOW_ADDR, 0x0fffffff, 0x0fffffff, |
63 | mmVM_PRT_APERTURE1_LOW_ADDR, 0x0fffffff, 0x0fffffff, |
64 | mmVM_PRT_APERTURE2_LOW_ADDR, 0x0fffffff, 0x0fffffff, |
65 | mmVM_PRT_APERTURE3_LOW_ADDR, 0x0fffffff, 0x0fffffff |
66 | }; |
67 | |
68 | static const u32 iceland_mgcg_cgcg_init[] = { |
69 | mmMC_MEM_POWER_LS, 0xffffffff, 0x00000104 |
70 | }; |
71 | |
72 | static void gmc_v7_0_init_golden_registers(struct amdgpu_device *adev) |
73 | { |
74 | switch (adev->asic_type) { |
75 | case CHIP_TOPAZ: |
76 | amdgpu_device_program_register_sequence(adev, |
77 | registers: iceland_mgcg_cgcg_init, |
78 | ARRAY_SIZE(iceland_mgcg_cgcg_init)); |
79 | amdgpu_device_program_register_sequence(adev, |
80 | registers: golden_settings_iceland_a11, |
81 | ARRAY_SIZE(golden_settings_iceland_a11)); |
82 | break; |
83 | default: |
84 | break; |
85 | } |
86 | } |
87 | |
88 | static void gmc_v7_0_mc_stop(struct amdgpu_device *adev) |
89 | { |
90 | u32 blackout; |
91 | |
92 | gmc_v7_0_wait_for_idle(handle: (void *)adev); |
93 | |
94 | blackout = RREG32(mmMC_SHARED_BLACKOUT_CNTL); |
95 | if (REG_GET_FIELD(blackout, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE) != 1) { |
96 | /* Block CPU access */ |
97 | WREG32(mmBIF_FB_EN, 0); |
98 | /* blackout the MC */ |
99 | blackout = REG_SET_FIELD(blackout, |
100 | MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); |
101 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, blackout | 1); |
102 | } |
103 | /* wait for the MC to settle */ |
104 | udelay(100); |
105 | } |
106 | |
107 | static void gmc_v7_0_mc_resume(struct amdgpu_device *adev) |
108 | { |
109 | u32 tmp; |
110 | |
111 | /* unblackout the MC */ |
112 | tmp = RREG32(mmMC_SHARED_BLACKOUT_CNTL); |
113 | tmp = REG_SET_FIELD(tmp, MC_SHARED_BLACKOUT_CNTL, BLACKOUT_MODE, 0); |
114 | WREG32(mmMC_SHARED_BLACKOUT_CNTL, tmp); |
115 | /* allow CPU access */ |
116 | tmp = REG_SET_FIELD(0, BIF_FB_EN, FB_READ_EN, 1); |
117 | tmp = REG_SET_FIELD(tmp, BIF_FB_EN, FB_WRITE_EN, 1); |
118 | WREG32(mmBIF_FB_EN, tmp); |
119 | } |
120 | |
121 | /** |
122 | * gmc_v7_0_init_microcode - load ucode images from disk |
123 | * |
124 | * @adev: amdgpu_device pointer |
125 | * |
126 | * Use the firmware interface to load the ucode images into |
127 | * the driver (not loaded into hw). |
128 | * Returns 0 on success, error on failure. |
129 | */ |
130 | static int gmc_v7_0_init_microcode(struct amdgpu_device *adev) |
131 | { |
132 | const char *chip_name; |
133 | char fw_name[30]; |
134 | int err; |
135 | |
136 | DRM_DEBUG("\n" ); |
137 | |
138 | switch (adev->asic_type) { |
139 | case CHIP_BONAIRE: |
140 | chip_name = "bonaire" ; |
141 | break; |
142 | case CHIP_HAWAII: |
143 | chip_name = "hawaii" ; |
144 | break; |
145 | case CHIP_TOPAZ: |
146 | chip_name = "topaz" ; |
147 | break; |
148 | case CHIP_KAVERI: |
149 | case CHIP_KABINI: |
150 | case CHIP_MULLINS: |
151 | return 0; |
152 | default: |
153 | return -EINVAL; |
154 | } |
155 | |
156 | snprintf(buf: fw_name, size: sizeof(fw_name), fmt: "amdgpu/%s_mc.bin" , chip_name); |
157 | |
158 | err = amdgpu_ucode_request(adev, fw: &adev->gmc.fw, fw_name); |
159 | if (err) { |
160 | pr_err("cik_mc: Failed to load firmware \"%s\"\n" , fw_name); |
161 | amdgpu_ucode_release(fw: &adev->gmc.fw); |
162 | } |
163 | return err; |
164 | } |
165 | |
166 | /** |
167 | * gmc_v7_0_mc_load_microcode - load MC ucode into the hw |
168 | * |
169 | * @adev: amdgpu_device pointer |
170 | * |
171 | * Load the GDDR MC ucode into the hw (CIK). |
172 | * Returns 0 on success, error on failure. |
173 | */ |
174 | static int gmc_v7_0_mc_load_microcode(struct amdgpu_device *adev) |
175 | { |
176 | const struct mc_firmware_header_v1_0 *hdr; |
177 | const __le32 *fw_data = NULL; |
178 | const __le32 *io_mc_regs = NULL; |
179 | u32 running; |
180 | int i, ucode_size, regs_size; |
181 | |
182 | if (!adev->gmc.fw) |
183 | return -EINVAL; |
184 | |
185 | hdr = (const struct mc_firmware_header_v1_0 *)adev->gmc.fw->data; |
186 | amdgpu_ucode_print_mc_hdr(hdr: &hdr->header); |
187 | |
188 | adev->gmc.fw_version = le32_to_cpu(hdr->header.ucode_version); |
189 | regs_size = le32_to_cpu(hdr->io_debug_size_bytes) / (4 * 2); |
190 | io_mc_regs = (const __le32 *) |
191 | (adev->gmc.fw->data + le32_to_cpu(hdr->io_debug_array_offset_bytes)); |
192 | ucode_size = le32_to_cpu(hdr->header.ucode_size_bytes) / 4; |
193 | fw_data = (const __le32 *) |
194 | (adev->gmc.fw->data + le32_to_cpu(hdr->header.ucode_array_offset_bytes)); |
195 | |
196 | running = REG_GET_FIELD(RREG32(mmMC_SEQ_SUP_CNTL), MC_SEQ_SUP_CNTL, RUN); |
197 | |
198 | if (running == 0) { |
199 | /* reset the engine and set to writable */ |
200 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); |
201 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000010); |
202 | |
203 | /* load mc io regs */ |
204 | for (i = 0; i < regs_size; i++) { |
205 | WREG32(mmMC_SEQ_IO_DEBUG_INDEX, le32_to_cpup(io_mc_regs++)); |
206 | WREG32(mmMC_SEQ_IO_DEBUG_DATA, le32_to_cpup(io_mc_regs++)); |
207 | } |
208 | /* load the MC ucode */ |
209 | for (i = 0; i < ucode_size; i++) |
210 | WREG32(mmMC_SEQ_SUP_PGM, le32_to_cpup(fw_data++)); |
211 | |
212 | /* put the engine back into the active state */ |
213 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000008); |
214 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000004); |
215 | WREG32(mmMC_SEQ_SUP_CNTL, 0x00000001); |
216 | |
217 | /* wait for training to complete */ |
218 | for (i = 0; i < adev->usec_timeout; i++) { |
219 | if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), |
220 | MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D0)) |
221 | break; |
222 | udelay(1); |
223 | } |
224 | for (i = 0; i < adev->usec_timeout; i++) { |
225 | if (REG_GET_FIELD(RREG32(mmMC_SEQ_TRAIN_WAKEUP_CNTL), |
226 | MC_SEQ_TRAIN_WAKEUP_CNTL, TRAIN_DONE_D1)) |
227 | break; |
228 | udelay(1); |
229 | } |
230 | } |
231 | |
232 | return 0; |
233 | } |
234 | |
235 | static void gmc_v7_0_vram_gtt_location(struct amdgpu_device *adev, |
236 | struct amdgpu_gmc *mc) |
237 | { |
238 | u64 base = RREG32(mmMC_VM_FB_LOCATION) & 0xFFFF; |
239 | |
240 | base <<= 24; |
241 | |
242 | amdgpu_gmc_set_agp_default(adev, mc); |
243 | amdgpu_gmc_vram_location(adev, mc, base); |
244 | amdgpu_gmc_gart_location(adev, mc, gart_placement: AMDGPU_GART_PLACEMENT_BEST_FIT); |
245 | } |
246 | |
247 | /** |
248 | * gmc_v7_0_mc_program - program the GPU memory controller |
249 | * |
250 | * @adev: amdgpu_device pointer |
251 | * |
252 | * Set the location of vram, gart, and AGP in the GPU's |
253 | * physical address space (CIK). |
254 | */ |
255 | static void gmc_v7_0_mc_program(struct amdgpu_device *adev) |
256 | { |
257 | u32 tmp; |
258 | int i, j; |
259 | |
260 | /* Initialize HDP */ |
261 | for (i = 0, j = 0; i < 32; i++, j += 0x6) { |
262 | WREG32((0xb05 + j), 0x00000000); |
263 | WREG32((0xb06 + j), 0x00000000); |
264 | WREG32((0xb07 + j), 0x00000000); |
265 | WREG32((0xb08 + j), 0x00000000); |
266 | WREG32((0xb09 + j), 0x00000000); |
267 | } |
268 | WREG32(mmHDP_REG_COHERENCY_FLUSH_CNTL, 0); |
269 | |
270 | if (gmc_v7_0_wait_for_idle(handle: (void *)adev)) |
271 | dev_warn(adev->dev, "Wait for MC idle timedout !\n" ); |
272 | |
273 | if (adev->mode_info.num_crtc) { |
274 | /* Lockout access through VGA aperture*/ |
275 | tmp = RREG32(mmVGA_HDP_CONTROL); |
276 | tmp = REG_SET_FIELD(tmp, VGA_HDP_CONTROL, VGA_MEMORY_DISABLE, 1); |
277 | WREG32(mmVGA_HDP_CONTROL, tmp); |
278 | |
279 | /* disable VGA render */ |
280 | tmp = RREG32(mmVGA_RENDER_CONTROL); |
281 | tmp = REG_SET_FIELD(tmp, VGA_RENDER_CONTROL, VGA_VSTATUS_CNTL, 0); |
282 | WREG32(mmVGA_RENDER_CONTROL, tmp); |
283 | } |
284 | /* Update configuration */ |
285 | WREG32(mmMC_VM_SYSTEM_APERTURE_LOW_ADDR, |
286 | adev->gmc.vram_start >> 12); |
287 | WREG32(mmMC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
288 | adev->gmc.vram_end >> 12); |
289 | WREG32(mmMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR, |
290 | adev->mem_scratch.gpu_addr >> 12); |
291 | WREG32(mmMC_VM_AGP_BASE, 0); |
292 | WREG32(mmMC_VM_AGP_TOP, adev->gmc.agp_end >> 22); |
293 | WREG32(mmMC_VM_AGP_BOT, adev->gmc.agp_start >> 22); |
294 | if (gmc_v7_0_wait_for_idle(handle: (void *)adev)) |
295 | dev_warn(adev->dev, "Wait for MC idle timedout !\n" ); |
296 | |
297 | WREG32(mmBIF_FB_EN, BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); |
298 | |
299 | tmp = RREG32(mmHDP_MISC_CNTL); |
300 | tmp = REG_SET_FIELD(tmp, HDP_MISC_CNTL, FLUSH_INVALIDATE_CACHE, 0); |
301 | WREG32(mmHDP_MISC_CNTL, tmp); |
302 | |
303 | tmp = RREG32(mmHDP_HOST_PATH_CNTL); |
304 | WREG32(mmHDP_HOST_PATH_CNTL, tmp); |
305 | } |
306 | |
307 | /** |
308 | * gmc_v7_0_mc_init - initialize the memory controller driver params |
309 | * |
310 | * @adev: amdgpu_device pointer |
311 | * |
312 | * Look up the amount of vram, vram width, and decide how to place |
313 | * vram and gart within the GPU's physical address space (CIK). |
314 | * Returns 0 for success. |
315 | */ |
316 | static int gmc_v7_0_mc_init(struct amdgpu_device *adev) |
317 | { |
318 | int r; |
319 | |
320 | adev->gmc.vram_width = amdgpu_atombios_get_vram_width(adev); |
321 | if (!adev->gmc.vram_width) { |
322 | u32 tmp; |
323 | int chansize, numchan; |
324 | |
325 | /* Get VRAM informations */ |
326 | tmp = RREG32(mmMC_ARB_RAMCFG); |
327 | if (REG_GET_FIELD(tmp, MC_ARB_RAMCFG, CHANSIZE)) |
328 | chansize = 64; |
329 | else |
330 | chansize = 32; |
331 | |
332 | tmp = RREG32(mmMC_SHARED_CHMAP); |
333 | switch (REG_GET_FIELD(tmp, MC_SHARED_CHMAP, NOOFCHAN)) { |
334 | case 0: |
335 | default: |
336 | numchan = 1; |
337 | break; |
338 | case 1: |
339 | numchan = 2; |
340 | break; |
341 | case 2: |
342 | numchan = 4; |
343 | break; |
344 | case 3: |
345 | numchan = 8; |
346 | break; |
347 | case 4: |
348 | numchan = 3; |
349 | break; |
350 | case 5: |
351 | numchan = 6; |
352 | break; |
353 | case 6: |
354 | numchan = 10; |
355 | break; |
356 | case 7: |
357 | numchan = 12; |
358 | break; |
359 | case 8: |
360 | numchan = 16; |
361 | break; |
362 | } |
363 | adev->gmc.vram_width = numchan * chansize; |
364 | } |
365 | /* size in MB on si */ |
366 | adev->gmc.mc_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; |
367 | adev->gmc.real_vram_size = RREG32(mmCONFIG_MEMSIZE) * 1024ULL * 1024ULL; |
368 | |
369 | if (!(adev->flags & AMD_IS_APU)) { |
370 | r = amdgpu_device_resize_fb_bar(adev); |
371 | if (r) |
372 | return r; |
373 | } |
374 | adev->gmc.aper_base = pci_resource_start(adev->pdev, 0); |
375 | adev->gmc.aper_size = pci_resource_len(adev->pdev, 0); |
376 | |
377 | #ifdef CONFIG_X86_64 |
378 | if ((adev->flags & AMD_IS_APU) && |
379 | adev->gmc.real_vram_size > adev->gmc.aper_size && |
380 | !amdgpu_passthrough(adev)) { |
381 | adev->gmc.aper_base = ((u64)RREG32(mmMC_VM_FB_OFFSET)) << 22; |
382 | adev->gmc.aper_size = adev->gmc.real_vram_size; |
383 | } |
384 | #endif |
385 | |
386 | adev->gmc.visible_vram_size = adev->gmc.aper_size; |
387 | |
388 | /* set the gart size */ |
389 | if (amdgpu_gart_size == -1) { |
390 | switch (adev->asic_type) { |
391 | case CHIP_TOPAZ: /* no MM engines */ |
392 | default: |
393 | adev->gmc.gart_size = 256ULL << 20; |
394 | break; |
395 | #ifdef CONFIG_DRM_AMDGPU_CIK |
396 | case CHIP_BONAIRE: /* UVD, VCE do not support GPUVM */ |
397 | case CHIP_HAWAII: /* UVD, VCE do not support GPUVM */ |
398 | case CHIP_KAVERI: /* UVD, VCE do not support GPUVM */ |
399 | case CHIP_KABINI: /* UVD, VCE do not support GPUVM */ |
400 | case CHIP_MULLINS: /* UVD, VCE do not support GPUVM */ |
401 | adev->gmc.gart_size = 1024ULL << 20; |
402 | break; |
403 | #endif |
404 | } |
405 | } else { |
406 | adev->gmc.gart_size = (u64)amdgpu_gart_size << 20; |
407 | } |
408 | |
409 | adev->gmc.gart_size += adev->pm.smu_prv_buffer_size; |
410 | gmc_v7_0_vram_gtt_location(adev, mc: &adev->gmc); |
411 | |
412 | return 0; |
413 | } |
414 | |
415 | /** |
416 | * gmc_v7_0_flush_gpu_tlb_pasid - tlb flush via pasid |
417 | * |
418 | * @adev: amdgpu_device pointer |
419 | * @pasid: pasid to be flush |
420 | * @flush_type: type of flush |
421 | * @all_hub: flush all hubs |
422 | * @inst: is used to select which instance of KIQ to use for the invalidation |
423 | * |
424 | * Flush the TLB for the requested pasid. |
425 | */ |
426 | static void gmc_v7_0_flush_gpu_tlb_pasid(struct amdgpu_device *adev, |
427 | uint16_t pasid, uint32_t flush_type, |
428 | bool all_hub, uint32_t inst) |
429 | { |
430 | u32 mask = 0x0; |
431 | int vmid; |
432 | |
433 | for (vmid = 1; vmid < 16; vmid++) { |
434 | u32 tmp = RREG32(mmATC_VMID0_PASID_MAPPING + vmid); |
435 | |
436 | if ((tmp & ATC_VMID0_PASID_MAPPING__VALID_MASK) && |
437 | (tmp & ATC_VMID0_PASID_MAPPING__PASID_MASK) == pasid) |
438 | mask |= 1 << vmid; |
439 | } |
440 | |
441 | WREG32(mmVM_INVALIDATE_REQUEST, mask); |
442 | RREG32(mmVM_INVALIDATE_RESPONSE); |
443 | } |
444 | |
445 | /* |
446 | * GART |
447 | * VMID 0 is the physical GPU addresses as used by the kernel. |
448 | * VMIDs 1-15 are used for userspace clients and are handled |
449 | * by the amdgpu vm/hsa code. |
450 | */ |
451 | |
452 | /** |
453 | * gmc_v7_0_flush_gpu_tlb - gart tlb flush callback |
454 | * |
455 | * @adev: amdgpu_device pointer |
456 | * @vmid: vm instance to flush |
457 | * @vmhub: which hub to flush |
458 | * @flush_type: type of flush |
459 | * * |
460 | * Flush the TLB for the requested page table (CIK). |
461 | */ |
462 | static void gmc_v7_0_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, |
463 | uint32_t vmhub, uint32_t flush_type) |
464 | { |
465 | /* bits 0-15 are the VM contexts0-15 */ |
466 | WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid); |
467 | } |
468 | |
469 | static uint64_t gmc_v7_0_emit_flush_gpu_tlb(struct amdgpu_ring *ring, |
470 | unsigned int vmid, uint64_t pd_addr) |
471 | { |
472 | uint32_t reg; |
473 | |
474 | if (vmid < 8) |
475 | reg = mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + vmid; |
476 | else |
477 | reg = mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8; |
478 | amdgpu_ring_emit_wreg(ring, reg, pd_addr >> 12); |
479 | |
480 | /* bits 0-15 are the VM contexts0-15 */ |
481 | amdgpu_ring_emit_wreg(ring, mmVM_INVALIDATE_REQUEST, 1 << vmid); |
482 | |
483 | return pd_addr; |
484 | } |
485 | |
486 | static void gmc_v7_0_emit_pasid_mapping(struct amdgpu_ring *ring, unsigned int vmid, |
487 | unsigned int pasid) |
488 | { |
489 | amdgpu_ring_emit_wreg(ring, mmIH_VMID_0_LUT + vmid, pasid); |
490 | } |
491 | |
492 | static void gmc_v7_0_get_vm_pde(struct amdgpu_device *adev, int level, |
493 | uint64_t *addr, uint64_t *flags) |
494 | { |
495 | BUG_ON(*addr & 0xFFFFFF0000000FFFULL); |
496 | } |
497 | |
498 | static void gmc_v7_0_get_vm_pte(struct amdgpu_device *adev, |
499 | struct amdgpu_bo_va_mapping *mapping, |
500 | uint64_t *flags) |
501 | { |
502 | *flags &= ~AMDGPU_PTE_EXECUTABLE; |
503 | *flags &= ~AMDGPU_PTE_PRT; |
504 | } |
505 | |
506 | /** |
507 | * gmc_v7_0_set_fault_enable_default - update VM fault handling |
508 | * |
509 | * @adev: amdgpu_device pointer |
510 | * @value: true redirects VM faults to the default page |
511 | */ |
512 | static void gmc_v7_0_set_fault_enable_default(struct amdgpu_device *adev, |
513 | bool value) |
514 | { |
515 | u32 tmp; |
516 | |
517 | tmp = RREG32(mmVM_CONTEXT1_CNTL); |
518 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, |
519 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
520 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, |
521 | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
522 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, |
523 | PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
524 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, |
525 | VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
526 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, |
527 | READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
528 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, |
529 | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
530 | WREG32(mmVM_CONTEXT1_CNTL, tmp); |
531 | } |
532 | |
533 | /** |
534 | * gmc_v7_0_set_prt - set PRT VM fault |
535 | * |
536 | * @adev: amdgpu_device pointer |
537 | * @enable: enable/disable VM fault handling for PRT |
538 | */ |
539 | static void gmc_v7_0_set_prt(struct amdgpu_device *adev, bool enable) |
540 | { |
541 | uint32_t tmp; |
542 | |
543 | if (enable && !adev->gmc.prt_warning) { |
544 | dev_warn(adev->dev, "Disabling VM faults because of PRT request!\n" ); |
545 | adev->gmc.prt_warning = true; |
546 | } |
547 | |
548 | tmp = RREG32(mmVM_PRT_CNTL); |
549 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, |
550 | CB_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable); |
551 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, |
552 | CB_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable); |
553 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, |
554 | TC_DISABLE_READ_FAULT_ON_UNMAPPED_ACCESS, enable); |
555 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, |
556 | TC_DISABLE_WRITE_FAULT_ON_UNMAPPED_ACCESS, enable); |
557 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, |
558 | L2_CACHE_STORE_INVALID_ENTRIES, enable); |
559 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, |
560 | L1_TLB_STORE_INVALID_ENTRIES, enable); |
561 | tmp = REG_SET_FIELD(tmp, VM_PRT_CNTL, |
562 | MASK_PDE0_FAULT, enable); |
563 | WREG32(mmVM_PRT_CNTL, tmp); |
564 | |
565 | if (enable) { |
566 | uint32_t low = AMDGPU_VA_RESERVED_BOTTOM >> |
567 | AMDGPU_GPU_PAGE_SHIFT; |
568 | uint32_t high = adev->vm_manager.max_pfn - |
569 | (AMDGPU_VA_RESERVED_TOP >> AMDGPU_GPU_PAGE_SHIFT); |
570 | |
571 | WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, low); |
572 | WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, low); |
573 | WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, low); |
574 | WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, low); |
575 | WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, high); |
576 | WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, high); |
577 | WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, high); |
578 | WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, high); |
579 | } else { |
580 | WREG32(mmVM_PRT_APERTURE0_LOW_ADDR, 0xfffffff); |
581 | WREG32(mmVM_PRT_APERTURE1_LOW_ADDR, 0xfffffff); |
582 | WREG32(mmVM_PRT_APERTURE2_LOW_ADDR, 0xfffffff); |
583 | WREG32(mmVM_PRT_APERTURE3_LOW_ADDR, 0xfffffff); |
584 | WREG32(mmVM_PRT_APERTURE0_HIGH_ADDR, 0x0); |
585 | WREG32(mmVM_PRT_APERTURE1_HIGH_ADDR, 0x0); |
586 | WREG32(mmVM_PRT_APERTURE2_HIGH_ADDR, 0x0); |
587 | WREG32(mmVM_PRT_APERTURE3_HIGH_ADDR, 0x0); |
588 | } |
589 | } |
590 | |
591 | /** |
592 | * gmc_v7_0_gart_enable - gart enable |
593 | * |
594 | * @adev: amdgpu_device pointer |
595 | * |
596 | * This sets up the TLBs, programs the page tables for VMID0, |
597 | * sets up the hw for VMIDs 1-15 which are allocated on |
598 | * demand, and sets up the global locations for the LDS, GDS, |
599 | * and GPUVM for FSA64 clients (CIK). |
600 | * Returns 0 for success, errors for failure. |
601 | */ |
602 | static int gmc_v7_0_gart_enable(struct amdgpu_device *adev) |
603 | { |
604 | uint64_t table_addr; |
605 | u32 tmp, field; |
606 | int i; |
607 | |
608 | if (adev->gart.bo == NULL) { |
609 | dev_err(adev->dev, "No VRAM object for PCIE GART.\n" ); |
610 | return -EINVAL; |
611 | } |
612 | amdgpu_gtt_mgr_recover(mgr: &adev->mman.gtt_mgr); |
613 | table_addr = amdgpu_bo_gpu_offset(bo: adev->gart.bo); |
614 | |
615 | /* Setup TLB control */ |
616 | tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); |
617 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); |
618 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 1); |
619 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); |
620 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 1); |
621 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); |
622 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); |
623 | /* Setup L2 cache */ |
624 | tmp = RREG32(mmVM_L2_CNTL); |
625 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 1); |
626 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 1); |
627 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PTE_CACHE_LRU_UPDATE_BY_WRITE, 1); |
628 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1); |
629 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7); |
630 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); |
631 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); |
632 | WREG32(mmVM_L2_CNTL, tmp); |
633 | tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); |
634 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); |
635 | WREG32(mmVM_L2_CNTL2, tmp); |
636 | |
637 | field = adev->vm_manager.fragment_size; |
638 | tmp = RREG32(mmVM_L2_CNTL3); |
639 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_ASSOCIATIVITY, 1); |
640 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, BANK_SELECT, field); |
641 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL3, L2_CACHE_BIGK_FRAGMENT_SIZE, field); |
642 | WREG32(mmVM_L2_CNTL3, tmp); |
643 | /* setup context0 */ |
644 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_START_ADDR, adev->gmc.gart_start >> 12); |
645 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_END_ADDR, adev->gmc.gart_end >> 12); |
646 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR, table_addr >> 12); |
647 | WREG32(mmVM_CONTEXT0_PROTECTION_FAULT_DEFAULT_ADDR, |
648 | (u32)(adev->dummy_page_addr >> 12)); |
649 | WREG32(mmVM_CONTEXT0_CNTL2, 0); |
650 | tmp = RREG32(mmVM_CONTEXT0_CNTL); |
651 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); |
652 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); |
653 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT0_CNTL, RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
654 | WREG32(mmVM_CONTEXT0_CNTL, tmp); |
655 | |
656 | WREG32(0x575, 0); |
657 | WREG32(0x576, 0); |
658 | WREG32(0x577, 0); |
659 | |
660 | /* empty context1-15 */ |
661 | /* FIXME start with 4G, once using 2 level pt switch to full |
662 | * vm size space |
663 | */ |
664 | /* set vm size, must be a multiple of 4 */ |
665 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_START_ADDR, 0); |
666 | WREG32(mmVM_CONTEXT1_PAGE_TABLE_END_ADDR, adev->vm_manager.max_pfn - 1); |
667 | for (i = 1; i < AMDGPU_NUM_VMID; i++) { |
668 | if (i < 8) |
669 | WREG32(mmVM_CONTEXT0_PAGE_TABLE_BASE_ADDR + i, |
670 | table_addr >> 12); |
671 | else |
672 | WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + i - 8, |
673 | table_addr >> 12); |
674 | } |
675 | |
676 | /* enable context1-15 */ |
677 | WREG32(mmVM_CONTEXT1_PROTECTION_FAULT_DEFAULT_ADDR, |
678 | (u32)(adev->dummy_page_addr >> 12)); |
679 | WREG32(mmVM_CONTEXT1_CNTL2, 4); |
680 | tmp = RREG32(mmVM_CONTEXT1_CNTL); |
681 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); |
682 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, 1); |
683 | tmp = REG_SET_FIELD(tmp, VM_CONTEXT1_CNTL, PAGE_TABLE_BLOCK_SIZE, |
684 | adev->vm_manager.block_size - 9); |
685 | WREG32(mmVM_CONTEXT1_CNTL, tmp); |
686 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_ALWAYS) |
687 | gmc_v7_0_set_fault_enable_default(adev, value: false); |
688 | else |
689 | gmc_v7_0_set_fault_enable_default(adev, value: true); |
690 | |
691 | if (adev->asic_type == CHIP_KAVERI) { |
692 | tmp = RREG32(mmCHUB_CONTROL); |
693 | tmp &= ~BYPASS_VM; |
694 | WREG32(mmCHUB_CONTROL, tmp); |
695 | } |
696 | |
697 | gmc_v7_0_flush_gpu_tlb(adev, vmid: 0, vmhub: 0, flush_type: 0); |
698 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n" , |
699 | (unsigned int)(adev->gmc.gart_size >> 20), |
700 | (unsigned long long)table_addr); |
701 | return 0; |
702 | } |
703 | |
704 | static int gmc_v7_0_gart_init(struct amdgpu_device *adev) |
705 | { |
706 | int r; |
707 | |
708 | if (adev->gart.bo) { |
709 | WARN(1, "R600 PCIE GART already initialized\n" ); |
710 | return 0; |
711 | } |
712 | /* Initialize common gart structure */ |
713 | r = amdgpu_gart_init(adev); |
714 | if (r) |
715 | return r; |
716 | adev->gart.table_size = adev->gart.num_gpu_pages * 8; |
717 | adev->gart.gart_pte_flags = 0; |
718 | return amdgpu_gart_table_vram_alloc(adev); |
719 | } |
720 | |
721 | /** |
722 | * gmc_v7_0_gart_disable - gart disable |
723 | * |
724 | * @adev: amdgpu_device pointer |
725 | * |
726 | * This disables all VM page table (CIK). |
727 | */ |
728 | static void gmc_v7_0_gart_disable(struct amdgpu_device *adev) |
729 | { |
730 | u32 tmp; |
731 | |
732 | /* Disable all tables */ |
733 | WREG32(mmVM_CONTEXT0_CNTL, 0); |
734 | WREG32(mmVM_CONTEXT1_CNTL, 0); |
735 | /* Setup TLB control */ |
736 | tmp = RREG32(mmMC_VM_MX_L1_TLB_CNTL); |
737 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); |
738 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_L1_FRAGMENT_PROCESSING, 0); |
739 | tmp = REG_SET_FIELD(tmp, MC_VM_MX_L1_TLB_CNTL, ENABLE_ADVANCED_DRIVER_MODEL, 0); |
740 | WREG32(mmMC_VM_MX_L1_TLB_CNTL, tmp); |
741 | /* Setup L2 cache */ |
742 | tmp = RREG32(mmVM_L2_CNTL); |
743 | tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_CACHE, 0); |
744 | WREG32(mmVM_L2_CNTL, tmp); |
745 | WREG32(mmVM_L2_CNTL2, 0); |
746 | } |
747 | |
748 | /** |
749 | * gmc_v7_0_vm_decode_fault - print human readable fault info |
750 | * |
751 | * @adev: amdgpu_device pointer |
752 | * @status: VM_CONTEXT1_PROTECTION_FAULT_STATUS register value |
753 | * @addr: VM_CONTEXT1_PROTECTION_FAULT_ADDR register value |
754 | * @mc_client: VM_CONTEXT1_PROTECTION_FAULT_MCCLIENT register value |
755 | * @pasid: debug logging only - no functional use |
756 | * |
757 | * Print human readable fault information (CIK). |
758 | */ |
759 | static void gmc_v7_0_vm_decode_fault(struct amdgpu_device *adev, u32 status, |
760 | u32 addr, u32 mc_client, unsigned int pasid) |
761 | { |
762 | u32 vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID); |
763 | u32 protections = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, |
764 | PROTECTIONS); |
765 | char block[5] = { mc_client >> 24, (mc_client >> 16) & 0xff, |
766 | (mc_client >> 8) & 0xff, mc_client & 0xff, 0 }; |
767 | u32 mc_id; |
768 | |
769 | mc_id = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, |
770 | MEMORY_CLIENT_ID); |
771 | |
772 | dev_err(adev->dev, "VM fault (0x%02x, vmid %d, pasid %d) at page %u, %s from '%s' (0x%08x) (%d)\n" , |
773 | protections, vmid, pasid, addr, |
774 | REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, |
775 | MEMORY_CLIENT_RW) ? |
776 | "write" : "read" , block, mc_client, mc_id); |
777 | } |
778 | |
779 | |
780 | static const u32 mc_cg_registers[] = { |
781 | mmMC_HUB_MISC_HUB_CG, |
782 | mmMC_HUB_MISC_SIP_CG, |
783 | mmMC_HUB_MISC_VM_CG, |
784 | mmMC_XPB_CLK_GAT, |
785 | mmATC_MISC_CG, |
786 | mmMC_CITF_MISC_WR_CG, |
787 | mmMC_CITF_MISC_RD_CG, |
788 | mmMC_CITF_MISC_VM_CG, |
789 | mmVM_L2_CG, |
790 | }; |
791 | |
792 | static const u32 mc_cg_ls_en[] = { |
793 | MC_HUB_MISC_HUB_CG__MEM_LS_ENABLE_MASK, |
794 | MC_HUB_MISC_SIP_CG__MEM_LS_ENABLE_MASK, |
795 | MC_HUB_MISC_VM_CG__MEM_LS_ENABLE_MASK, |
796 | MC_XPB_CLK_GAT__MEM_LS_ENABLE_MASK, |
797 | ATC_MISC_CG__MEM_LS_ENABLE_MASK, |
798 | MC_CITF_MISC_WR_CG__MEM_LS_ENABLE_MASK, |
799 | MC_CITF_MISC_RD_CG__MEM_LS_ENABLE_MASK, |
800 | MC_CITF_MISC_VM_CG__MEM_LS_ENABLE_MASK, |
801 | VM_L2_CG__MEM_LS_ENABLE_MASK, |
802 | }; |
803 | |
804 | static const u32 mc_cg_en[] = { |
805 | MC_HUB_MISC_HUB_CG__ENABLE_MASK, |
806 | MC_HUB_MISC_SIP_CG__ENABLE_MASK, |
807 | MC_HUB_MISC_VM_CG__ENABLE_MASK, |
808 | MC_XPB_CLK_GAT__ENABLE_MASK, |
809 | ATC_MISC_CG__ENABLE_MASK, |
810 | MC_CITF_MISC_WR_CG__ENABLE_MASK, |
811 | MC_CITF_MISC_RD_CG__ENABLE_MASK, |
812 | MC_CITF_MISC_VM_CG__ENABLE_MASK, |
813 | VM_L2_CG__ENABLE_MASK, |
814 | }; |
815 | |
816 | static void gmc_v7_0_enable_mc_ls(struct amdgpu_device *adev, |
817 | bool enable) |
818 | { |
819 | int i; |
820 | u32 orig, data; |
821 | |
822 | for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { |
823 | orig = data = RREG32(mc_cg_registers[i]); |
824 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS)) |
825 | data |= mc_cg_ls_en[i]; |
826 | else |
827 | data &= ~mc_cg_ls_en[i]; |
828 | if (data != orig) |
829 | WREG32(mc_cg_registers[i], data); |
830 | } |
831 | } |
832 | |
833 | static void gmc_v7_0_enable_mc_mgcg(struct amdgpu_device *adev, |
834 | bool enable) |
835 | { |
836 | int i; |
837 | u32 orig, data; |
838 | |
839 | for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) { |
840 | orig = data = RREG32(mc_cg_registers[i]); |
841 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG)) |
842 | data |= mc_cg_en[i]; |
843 | else |
844 | data &= ~mc_cg_en[i]; |
845 | if (data != orig) |
846 | WREG32(mc_cg_registers[i], data); |
847 | } |
848 | } |
849 | |
850 | static void gmc_v7_0_enable_bif_mgls(struct amdgpu_device *adev, |
851 | bool enable) |
852 | { |
853 | u32 orig, data; |
854 | |
855 | orig = data = RREG32_PCIE(ixPCIE_CNTL2); |
856 | |
857 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) { |
858 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1); |
859 | data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1); |
860 | data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1); |
861 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 1); |
862 | } else { |
863 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 0); |
864 | data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 0); |
865 | data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 0); |
866 | data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_AGGRESSIVE_LS_EN, 0); |
867 | } |
868 | |
869 | if (orig != data) |
870 | WREG32_PCIE(ixPCIE_CNTL2, data); |
871 | } |
872 | |
873 | static void gmc_v7_0_enable_hdp_mgcg(struct amdgpu_device *adev, |
874 | bool enable) |
875 | { |
876 | u32 orig, data; |
877 | |
878 | orig = data = RREG32(mmHDP_HOST_PATH_CNTL); |
879 | |
880 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG)) |
881 | data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0); |
882 | else |
883 | data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1); |
884 | |
885 | if (orig != data) |
886 | WREG32(mmHDP_HOST_PATH_CNTL, data); |
887 | } |
888 | |
889 | static void gmc_v7_0_enable_hdp_ls(struct amdgpu_device *adev, |
890 | bool enable) |
891 | { |
892 | u32 orig, data; |
893 | |
894 | orig = data = RREG32(mmHDP_MEM_POWER_LS); |
895 | |
896 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS)) |
897 | data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1); |
898 | else |
899 | data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0); |
900 | |
901 | if (orig != data) |
902 | WREG32(mmHDP_MEM_POWER_LS, data); |
903 | } |
904 | |
905 | static int gmc_v7_0_convert_vram_type(int mc_seq_vram_type) |
906 | { |
907 | switch (mc_seq_vram_type) { |
908 | case MC_SEQ_MISC0__MT__GDDR1: |
909 | return AMDGPU_VRAM_TYPE_GDDR1; |
910 | case MC_SEQ_MISC0__MT__DDR2: |
911 | return AMDGPU_VRAM_TYPE_DDR2; |
912 | case MC_SEQ_MISC0__MT__GDDR3: |
913 | return AMDGPU_VRAM_TYPE_GDDR3; |
914 | case MC_SEQ_MISC0__MT__GDDR4: |
915 | return AMDGPU_VRAM_TYPE_GDDR4; |
916 | case MC_SEQ_MISC0__MT__GDDR5: |
917 | return AMDGPU_VRAM_TYPE_GDDR5; |
918 | case MC_SEQ_MISC0__MT__HBM: |
919 | return AMDGPU_VRAM_TYPE_HBM; |
920 | case MC_SEQ_MISC0__MT__DDR3: |
921 | return AMDGPU_VRAM_TYPE_DDR3; |
922 | default: |
923 | return AMDGPU_VRAM_TYPE_UNKNOWN; |
924 | } |
925 | } |
926 | |
927 | static int gmc_v7_0_early_init(void *handle) |
928 | { |
929 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
930 | |
931 | gmc_v7_0_set_gmc_funcs(adev); |
932 | gmc_v7_0_set_irq_funcs(adev); |
933 | |
934 | adev->gmc.shared_aperture_start = 0x2000000000000000ULL; |
935 | adev->gmc.shared_aperture_end = |
936 | adev->gmc.shared_aperture_start + (4ULL << 30) - 1; |
937 | adev->gmc.private_aperture_start = |
938 | adev->gmc.shared_aperture_end + 1; |
939 | adev->gmc.private_aperture_end = |
940 | adev->gmc.private_aperture_start + (4ULL << 30) - 1; |
941 | adev->gmc.noretry_flags = AMDGPU_VM_NORETRY_FLAGS_TF; |
942 | |
943 | return 0; |
944 | } |
945 | |
946 | static int gmc_v7_0_late_init(void *handle) |
947 | { |
948 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
949 | |
950 | if (amdgpu_vm_fault_stop != AMDGPU_VM_FAULT_STOP_ALWAYS) |
951 | return amdgpu_irq_get(adev, src: &adev->gmc.vm_fault, type: 0); |
952 | else |
953 | return 0; |
954 | } |
955 | |
956 | static unsigned int gmc_v7_0_get_vbios_fb_size(struct amdgpu_device *adev) |
957 | { |
958 | u32 d1vga_control = RREG32(mmD1VGA_CONTROL); |
959 | unsigned int size; |
960 | |
961 | if (REG_GET_FIELD(d1vga_control, D1VGA_CONTROL, D1VGA_MODE_ENABLE)) { |
962 | size = AMDGPU_VBIOS_VGA_ALLOCATION; |
963 | } else { |
964 | u32 viewport = RREG32(mmVIEWPORT_SIZE); |
965 | |
966 | size = (REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_HEIGHT) * |
967 | REG_GET_FIELD(viewport, VIEWPORT_SIZE, VIEWPORT_WIDTH) * |
968 | 4); |
969 | } |
970 | |
971 | return size; |
972 | } |
973 | |
974 | static int gmc_v7_0_sw_init(void *handle) |
975 | { |
976 | int r; |
977 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
978 | |
979 | set_bit(AMDGPU_GFXHUB(0), addr: adev->vmhubs_mask); |
980 | |
981 | if (adev->flags & AMD_IS_APU) { |
982 | adev->gmc.vram_type = AMDGPU_VRAM_TYPE_UNKNOWN; |
983 | } else { |
984 | u32 tmp = RREG32(mmMC_SEQ_MISC0); |
985 | |
986 | tmp &= MC_SEQ_MISC0__MT__MASK; |
987 | adev->gmc.vram_type = gmc_v7_0_convert_vram_type(mc_seq_vram_type: tmp); |
988 | } |
989 | |
990 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_PAGE_INV_FAULT, source: &adev->gmc.vm_fault); |
991 | if (r) |
992 | return r; |
993 | |
994 | r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_GFX_MEM_PROT_FAULT, source: &adev->gmc.vm_fault); |
995 | if (r) |
996 | return r; |
997 | |
998 | /* Adjust VM size here. |
999 | * Currently set to 4GB ((1 << 20) 4k pages). |
1000 | * Max GPUVM size for cayman and SI is 40 bits. |
1001 | */ |
1002 | amdgpu_vm_adjust_size(adev, min_vm_size: 64, fragment_size_default: 9, max_level: 1, max_bits: 40); |
1003 | |
1004 | /* Set the internal MC address mask |
1005 | * This is the max address of the GPU's |
1006 | * internal address space. |
1007 | */ |
1008 | adev->gmc.mc_mask = 0xffffffffffULL; /* 40 bit MC */ |
1009 | |
1010 | r = dma_set_mask_and_coherent(dev: adev->dev, DMA_BIT_MASK(40)); |
1011 | if (r) { |
1012 | pr_warn("No suitable DMA available\n" ); |
1013 | return r; |
1014 | } |
1015 | adev->need_swiotlb = drm_need_swiotlb(dma_bits: 40); |
1016 | |
1017 | r = gmc_v7_0_init_microcode(adev); |
1018 | if (r) { |
1019 | DRM_ERROR("Failed to load mc firmware!\n" ); |
1020 | return r; |
1021 | } |
1022 | |
1023 | r = gmc_v7_0_mc_init(adev); |
1024 | if (r) |
1025 | return r; |
1026 | |
1027 | amdgpu_gmc_get_vbios_allocations(adev); |
1028 | |
1029 | /* Memory manager */ |
1030 | r = amdgpu_bo_init(adev); |
1031 | if (r) |
1032 | return r; |
1033 | |
1034 | r = gmc_v7_0_gart_init(adev); |
1035 | if (r) |
1036 | return r; |
1037 | |
1038 | /* |
1039 | * number of VMs |
1040 | * VMID 0 is reserved for System |
1041 | * amdgpu graphics/compute will use VMIDs 1-7 |
1042 | * amdkfd will use VMIDs 8-15 |
1043 | */ |
1044 | adev->vm_manager.first_kfd_vmid = 8; |
1045 | amdgpu_vm_manager_init(adev); |
1046 | |
1047 | /* base offset of vram pages */ |
1048 | if (adev->flags & AMD_IS_APU) { |
1049 | u64 tmp = RREG32(mmMC_VM_FB_OFFSET); |
1050 | |
1051 | tmp <<= 22; |
1052 | adev->vm_manager.vram_base_offset = tmp; |
1053 | } else { |
1054 | adev->vm_manager.vram_base_offset = 0; |
1055 | } |
1056 | |
1057 | adev->gmc.vm_fault_info = kmalloc(size: sizeof(struct kfd_vm_fault_info), |
1058 | GFP_KERNEL); |
1059 | if (!adev->gmc.vm_fault_info) |
1060 | return -ENOMEM; |
1061 | atomic_set(v: &adev->gmc.vm_fault_info_updated, i: 0); |
1062 | |
1063 | return 0; |
1064 | } |
1065 | |
1066 | static int gmc_v7_0_sw_fini(void *handle) |
1067 | { |
1068 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1069 | |
1070 | amdgpu_gem_force_release(adev); |
1071 | amdgpu_vm_manager_fini(adev); |
1072 | kfree(objp: adev->gmc.vm_fault_info); |
1073 | amdgpu_gart_table_vram_free(adev); |
1074 | amdgpu_bo_fini(adev); |
1075 | amdgpu_ucode_release(fw: &adev->gmc.fw); |
1076 | |
1077 | return 0; |
1078 | } |
1079 | |
1080 | static int gmc_v7_0_hw_init(void *handle) |
1081 | { |
1082 | int r; |
1083 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1084 | |
1085 | gmc_v7_0_init_golden_registers(adev); |
1086 | |
1087 | gmc_v7_0_mc_program(adev); |
1088 | |
1089 | if (!(adev->flags & AMD_IS_APU)) { |
1090 | r = gmc_v7_0_mc_load_microcode(adev); |
1091 | if (r) { |
1092 | DRM_ERROR("Failed to load MC firmware!\n" ); |
1093 | return r; |
1094 | } |
1095 | } |
1096 | |
1097 | r = gmc_v7_0_gart_enable(adev); |
1098 | if (r) |
1099 | return r; |
1100 | |
1101 | if (amdgpu_emu_mode == 1) |
1102 | return amdgpu_gmc_vram_checking(adev); |
1103 | |
1104 | return 0; |
1105 | } |
1106 | |
1107 | static int gmc_v7_0_hw_fini(void *handle) |
1108 | { |
1109 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1110 | |
1111 | amdgpu_irq_put(adev, src: &adev->gmc.vm_fault, type: 0); |
1112 | gmc_v7_0_gart_disable(adev); |
1113 | |
1114 | return 0; |
1115 | } |
1116 | |
1117 | static int gmc_v7_0_suspend(void *handle) |
1118 | { |
1119 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1120 | |
1121 | gmc_v7_0_hw_fini(handle: adev); |
1122 | |
1123 | return 0; |
1124 | } |
1125 | |
1126 | static int gmc_v7_0_resume(void *handle) |
1127 | { |
1128 | int r; |
1129 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1130 | |
1131 | r = gmc_v7_0_hw_init(handle: adev); |
1132 | if (r) |
1133 | return r; |
1134 | |
1135 | amdgpu_vmid_reset_all(adev); |
1136 | |
1137 | return 0; |
1138 | } |
1139 | |
1140 | static bool gmc_v7_0_is_idle(void *handle) |
1141 | { |
1142 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1143 | u32 tmp = RREG32(mmSRBM_STATUS); |
1144 | |
1145 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | |
1146 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK | SRBM_STATUS__VMC_BUSY_MASK)) |
1147 | return false; |
1148 | |
1149 | return true; |
1150 | } |
1151 | |
1152 | static int gmc_v7_0_wait_for_idle(void *handle) |
1153 | { |
1154 | unsigned int i; |
1155 | u32 tmp; |
1156 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1157 | |
1158 | for (i = 0; i < adev->usec_timeout; i++) { |
1159 | /* read MC_STATUS */ |
1160 | tmp = RREG32(mmSRBM_STATUS) & (SRBM_STATUS__MCB_BUSY_MASK | |
1161 | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | |
1162 | SRBM_STATUS__MCC_BUSY_MASK | |
1163 | SRBM_STATUS__MCD_BUSY_MASK | |
1164 | SRBM_STATUS__VMC_BUSY_MASK); |
1165 | if (!tmp) |
1166 | return 0; |
1167 | udelay(1); |
1168 | } |
1169 | return -ETIMEDOUT; |
1170 | |
1171 | } |
1172 | |
1173 | static int gmc_v7_0_soft_reset(void *handle) |
1174 | { |
1175 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1176 | u32 srbm_soft_reset = 0; |
1177 | u32 tmp = RREG32(mmSRBM_STATUS); |
1178 | |
1179 | if (tmp & SRBM_STATUS__VMC_BUSY_MASK) |
1180 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, |
1181 | SRBM_SOFT_RESET, SOFT_RESET_VMC, 1); |
1182 | |
1183 | if (tmp & (SRBM_STATUS__MCB_BUSY_MASK | SRBM_STATUS__MCB_NON_DISPLAY_BUSY_MASK | |
1184 | SRBM_STATUS__MCC_BUSY_MASK | SRBM_STATUS__MCD_BUSY_MASK)) { |
1185 | if (!(adev->flags & AMD_IS_APU)) |
1186 | srbm_soft_reset = REG_SET_FIELD(srbm_soft_reset, |
1187 | SRBM_SOFT_RESET, SOFT_RESET_MC, 1); |
1188 | } |
1189 | |
1190 | if (srbm_soft_reset) { |
1191 | gmc_v7_0_mc_stop(adev); |
1192 | if (gmc_v7_0_wait_for_idle(handle: (void *)adev)) |
1193 | dev_warn(adev->dev, "Wait for GMC idle timed out !\n" ); |
1194 | |
1195 | tmp = RREG32(mmSRBM_SOFT_RESET); |
1196 | tmp |= srbm_soft_reset; |
1197 | dev_info(adev->dev, "SRBM_SOFT_RESET=0x%08X\n" , tmp); |
1198 | WREG32(mmSRBM_SOFT_RESET, tmp); |
1199 | tmp = RREG32(mmSRBM_SOFT_RESET); |
1200 | |
1201 | udelay(50); |
1202 | |
1203 | tmp &= ~srbm_soft_reset; |
1204 | WREG32(mmSRBM_SOFT_RESET, tmp); |
1205 | tmp = RREG32(mmSRBM_SOFT_RESET); |
1206 | |
1207 | /* Wait a little for things to settle down */ |
1208 | udelay(50); |
1209 | |
1210 | gmc_v7_0_mc_resume(adev); |
1211 | udelay(50); |
1212 | } |
1213 | |
1214 | return 0; |
1215 | } |
1216 | |
1217 | static int gmc_v7_0_vm_fault_interrupt_state(struct amdgpu_device *adev, |
1218 | struct amdgpu_irq_src *src, |
1219 | unsigned int type, |
1220 | enum amdgpu_interrupt_state state) |
1221 | { |
1222 | u32 tmp; |
1223 | u32 bits = (VM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
1224 | VM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
1225 | VM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
1226 | VM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
1227 | VM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
1228 | VM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK); |
1229 | |
1230 | switch (state) { |
1231 | case AMDGPU_IRQ_STATE_DISABLE: |
1232 | /* system context */ |
1233 | tmp = RREG32(mmVM_CONTEXT0_CNTL); |
1234 | tmp &= ~bits; |
1235 | WREG32(mmVM_CONTEXT0_CNTL, tmp); |
1236 | /* VMs */ |
1237 | tmp = RREG32(mmVM_CONTEXT1_CNTL); |
1238 | tmp &= ~bits; |
1239 | WREG32(mmVM_CONTEXT1_CNTL, tmp); |
1240 | break; |
1241 | case AMDGPU_IRQ_STATE_ENABLE: |
1242 | /* system context */ |
1243 | tmp = RREG32(mmVM_CONTEXT0_CNTL); |
1244 | tmp |= bits; |
1245 | WREG32(mmVM_CONTEXT0_CNTL, tmp); |
1246 | /* VMs */ |
1247 | tmp = RREG32(mmVM_CONTEXT1_CNTL); |
1248 | tmp |= bits; |
1249 | WREG32(mmVM_CONTEXT1_CNTL, tmp); |
1250 | break; |
1251 | default: |
1252 | break; |
1253 | } |
1254 | |
1255 | return 0; |
1256 | } |
1257 | |
1258 | static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev, |
1259 | struct amdgpu_irq_src *source, |
1260 | struct amdgpu_iv_entry *entry) |
1261 | { |
1262 | u32 addr, status, mc_client, vmid; |
1263 | |
1264 | addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR); |
1265 | status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS); |
1266 | mc_client = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_MCCLIENT); |
1267 | /* reset addr and status */ |
1268 | WREG32_P(mmVM_CONTEXT1_CNTL2, 1, ~1); |
1269 | |
1270 | if (!addr && !status) |
1271 | return 0; |
1272 | |
1273 | amdgpu_vm_update_fault_cache(adev, pasid: entry->pasid, |
1274 | addr: ((u64)addr) << AMDGPU_GPU_PAGE_SHIFT, status, AMDGPU_GFXHUB(0)); |
1275 | |
1276 | if (amdgpu_vm_fault_stop == AMDGPU_VM_FAULT_STOP_FIRST) |
1277 | gmc_v7_0_set_fault_enable_default(adev, value: false); |
1278 | |
1279 | if (printk_ratelimit()) { |
1280 | dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n" , |
1281 | entry->src_id, entry->src_data[0]); |
1282 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_ADDR 0x%08X\n" , |
1283 | addr); |
1284 | dev_err(adev->dev, " VM_CONTEXT1_PROTECTION_FAULT_STATUS 0x%08X\n" , |
1285 | status); |
1286 | gmc_v7_0_vm_decode_fault(adev, status, addr, mc_client, |
1287 | pasid: entry->pasid); |
1288 | } |
1289 | |
1290 | vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, |
1291 | VMID); |
1292 | if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid) |
1293 | && !atomic_read(v: &adev->gmc.vm_fault_info_updated)) { |
1294 | struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info; |
1295 | u32 protections = REG_GET_FIELD(status, |
1296 | VM_CONTEXT1_PROTECTION_FAULT_STATUS, |
1297 | PROTECTIONS); |
1298 | |
1299 | info->vmid = vmid; |
1300 | info->mc_id = REG_GET_FIELD(status, |
1301 | VM_CONTEXT1_PROTECTION_FAULT_STATUS, |
1302 | MEMORY_CLIENT_ID); |
1303 | info->status = status; |
1304 | info->page_addr = addr; |
1305 | info->prot_valid = protections & 0x7 ? true : false; |
1306 | info->prot_read = protections & 0x8 ? true : false; |
1307 | info->prot_write = protections & 0x10 ? true : false; |
1308 | info->prot_exec = protections & 0x20 ? true : false; |
1309 | mb(); |
1310 | atomic_set(v: &adev->gmc.vm_fault_info_updated, i: 1); |
1311 | } |
1312 | |
1313 | return 0; |
1314 | } |
1315 | |
1316 | static int gmc_v7_0_set_clockgating_state(void *handle, |
1317 | enum amd_clockgating_state state) |
1318 | { |
1319 | bool gate = false; |
1320 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1321 | |
1322 | if (state == AMD_CG_STATE_GATE) |
1323 | gate = true; |
1324 | |
1325 | if (!(adev->flags & AMD_IS_APU)) { |
1326 | gmc_v7_0_enable_mc_mgcg(adev, enable: gate); |
1327 | gmc_v7_0_enable_mc_ls(adev, enable: gate); |
1328 | } |
1329 | gmc_v7_0_enable_bif_mgls(adev, enable: gate); |
1330 | gmc_v7_0_enable_hdp_mgcg(adev, enable: gate); |
1331 | gmc_v7_0_enable_hdp_ls(adev, enable: gate); |
1332 | |
1333 | return 0; |
1334 | } |
1335 | |
1336 | static int gmc_v7_0_set_powergating_state(void *handle, |
1337 | enum amd_powergating_state state) |
1338 | { |
1339 | return 0; |
1340 | } |
1341 | |
1342 | static const struct amd_ip_funcs gmc_v7_0_ip_funcs = { |
1343 | .name = "gmc_v7_0" , |
1344 | .early_init = gmc_v7_0_early_init, |
1345 | .late_init = gmc_v7_0_late_init, |
1346 | .sw_init = gmc_v7_0_sw_init, |
1347 | .sw_fini = gmc_v7_0_sw_fini, |
1348 | .hw_init = gmc_v7_0_hw_init, |
1349 | .hw_fini = gmc_v7_0_hw_fini, |
1350 | .suspend = gmc_v7_0_suspend, |
1351 | .resume = gmc_v7_0_resume, |
1352 | .is_idle = gmc_v7_0_is_idle, |
1353 | .wait_for_idle = gmc_v7_0_wait_for_idle, |
1354 | .soft_reset = gmc_v7_0_soft_reset, |
1355 | .set_clockgating_state = gmc_v7_0_set_clockgating_state, |
1356 | .set_powergating_state = gmc_v7_0_set_powergating_state, |
1357 | }; |
1358 | |
1359 | static const struct amdgpu_gmc_funcs gmc_v7_0_gmc_funcs = { |
1360 | .flush_gpu_tlb = gmc_v7_0_flush_gpu_tlb, |
1361 | .flush_gpu_tlb_pasid = gmc_v7_0_flush_gpu_tlb_pasid, |
1362 | .emit_flush_gpu_tlb = gmc_v7_0_emit_flush_gpu_tlb, |
1363 | .emit_pasid_mapping = gmc_v7_0_emit_pasid_mapping, |
1364 | .set_prt = gmc_v7_0_set_prt, |
1365 | .get_vm_pde = gmc_v7_0_get_vm_pde, |
1366 | .get_vm_pte = gmc_v7_0_get_vm_pte, |
1367 | .get_vbios_fb_size = gmc_v7_0_get_vbios_fb_size, |
1368 | }; |
1369 | |
1370 | static const struct amdgpu_irq_src_funcs gmc_v7_0_irq_funcs = { |
1371 | .set = gmc_v7_0_vm_fault_interrupt_state, |
1372 | .process = gmc_v7_0_process_interrupt, |
1373 | }; |
1374 | |
1375 | static void gmc_v7_0_set_gmc_funcs(struct amdgpu_device *adev) |
1376 | { |
1377 | adev->gmc.gmc_funcs = &gmc_v7_0_gmc_funcs; |
1378 | } |
1379 | |
1380 | static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev) |
1381 | { |
1382 | adev->gmc.vm_fault.num_types = 1; |
1383 | adev->gmc.vm_fault.funcs = &gmc_v7_0_irq_funcs; |
1384 | } |
1385 | |
1386 | const struct amdgpu_ip_block_version gmc_v7_0_ip_block = { |
1387 | .type = AMD_IP_BLOCK_TYPE_GMC, |
1388 | .major = 7, |
1389 | .minor = 0, |
1390 | .rev = 0, |
1391 | .funcs = &gmc_v7_0_ip_funcs, |
1392 | }; |
1393 | |
1394 | const struct amdgpu_ip_block_version gmc_v7_4_ip_block = { |
1395 | .type = AMD_IP_BLOCK_TYPE_GMC, |
1396 | .major = 7, |
1397 | .minor = 4, |
1398 | .rev = 0, |
1399 | .funcs = &gmc_v7_0_ip_funcs, |
1400 | }; |
1401 | |