1 | /* |
2 | * Copyright 2023 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | |
24 | #include "amdgpu.h" |
25 | #include "mmhub_v4_1_0.h" |
26 | |
27 | #include "mmhub/mmhub_4_1_0_offset.h" |
28 | #include "mmhub/mmhub_4_1_0_sh_mask.h" |
29 | |
30 | #include "soc15_common.h" |
31 | #include "soc24_enum.h" |
32 | |
33 | #define regMMVM_L2_CNTL3_DEFAULT 0x80100007 |
34 | #define regMMVM_L2_CNTL4_DEFAULT 0x000000c1 |
35 | #define regMMVM_L2_CNTL5_DEFAULT 0x00003fe0 |
36 | |
37 | static const char *mmhub_client_ids_v4_1_0[][2] = { |
38 | [0][0] = "VMC" , |
39 | [4][0] = "DCEDMC" , |
40 | [5][0] = "DCEVGA" , |
41 | [6][0] = "MP0" , |
42 | [7][0] = "MP1" , |
43 | [8][0] = "MPIO" , |
44 | [16][0] = "HDP" , |
45 | [17][0] = "LSDMA" , |
46 | [18][0] = "JPEG" , |
47 | [19][0] = "VCNU0" , |
48 | [21][0] = "VSCH" , |
49 | [22][0] = "VCNU1" , |
50 | [23][0] = "VCN1" , |
51 | [32+20][0] = "VCN0" , |
52 | [2][1] = "DBGUNBIO" , |
53 | [3][1] = "DCEDWB" , |
54 | [4][1] = "DCEDMC" , |
55 | [5][1] = "DCEVGA" , |
56 | [6][1] = "MP0" , |
57 | [7][1] = "MP1" , |
58 | [8][1] = "MPIO" , |
59 | [10][1] = "DBGU0" , |
60 | [11][1] = "DBGU1" , |
61 | [12][1] = "DBGU2" , |
62 | [13][1] = "DBGU3" , |
63 | [14][1] = "XDP" , |
64 | [15][1] = "OSSSYS" , |
65 | [16][1] = "HDP" , |
66 | [17][1] = "LSDMA" , |
67 | [18][1] = "JPEG" , |
68 | [19][1] = "VCNU0" , |
69 | [20][1] = "VCN0" , |
70 | [21][1] = "VSCH" , |
71 | [22][1] = "VCNU1" , |
72 | [23][1] = "VCN1" , |
73 | }; |
74 | |
75 | static uint32_t mmhub_v4_1_0_get_invalidate_req(unsigned int vmid, |
76 | uint32_t flush_type) |
77 | { |
78 | u32 req = 0; |
79 | |
80 | /* invalidate using legacy mode on vmid*/ |
81 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, |
82 | PER_VMID_INVALIDATE_REQ, 1 << vmid); |
83 | /* Only use legacy inv on mmhub side */ |
84 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0); |
85 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); |
86 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); |
87 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); |
88 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE2, 1); |
89 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L1_PTES, 1); |
90 | req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, |
91 | CLEAR_PROTECTION_FAULT_STATUS_ADDR, 0); |
92 | |
93 | return req; |
94 | } |
95 | |
96 | static void |
97 | mmhub_v4_1_0_print_l2_protection_fault_status(struct amdgpu_device *adev, |
98 | uint32_t status) |
99 | { |
100 | uint32_t cid, rw; |
101 | const char *mmhub_cid = NULL; |
102 | |
103 | cid = REG_GET_FIELD(status, |
104 | MMVM_L2_PROTECTION_FAULT_STATUS_LO32, CID); |
105 | rw = REG_GET_FIELD(status, |
106 | MMVM_L2_PROTECTION_FAULT_STATUS_LO32, RW); |
107 | |
108 | dev_err(adev->dev, |
109 | "MMVM_L2_PROTECTION_FAULT_STATUS_LO32:0x%08X\n" , |
110 | status); |
111 | switch (amdgpu_ip_version(adev, ip: MMHUB_HWIP, inst: 0)) { |
112 | case IP_VERSION(4, 1, 0): |
113 | mmhub_cid = mmhub_client_ids_v4_1_0[cid][rw]; |
114 | break; |
115 | default: |
116 | mmhub_cid = NULL; |
117 | break; |
118 | } |
119 | dev_err(adev->dev, "\t Faulty UTCL2 client ID: %s (0x%x)\n" , |
120 | mmhub_cid ? mmhub_cid : "unknown" , cid); |
121 | dev_err(adev->dev, "\t MORE_FAULTS: 0x%lx\n" , |
122 | REG_GET_FIELD(status, |
123 | MMVM_L2_PROTECTION_FAULT_STATUS_LO32, MORE_FAULTS)); |
124 | dev_err(adev->dev, "\t WALKER_ERROR: 0x%lx\n" , |
125 | REG_GET_FIELD(status, |
126 | MMVM_L2_PROTECTION_FAULT_STATUS_LO32, WALKER_ERROR)); |
127 | dev_err(adev->dev, "\t PERMISSION_FAULTS: 0x%lx\n" , |
128 | REG_GET_FIELD(status, |
129 | MMVM_L2_PROTECTION_FAULT_STATUS_LO32, PERMISSION_FAULTS)); |
130 | dev_err(adev->dev, "\t MAPPING_ERROR: 0x%lx\n" , |
131 | REG_GET_FIELD(status, |
132 | MMVM_L2_PROTECTION_FAULT_STATUS_LO32, MAPPING_ERROR)); |
133 | dev_err(adev->dev, "\t RW: 0x%x\n" , rw); |
134 | } |
135 | |
136 | static void mmhub_v4_1_0_setup_vm_pt_regs(struct amdgpu_device *adev, |
137 | uint32_t vmid, uint64_t page_table_base) |
138 | { |
139 | struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; |
140 | |
141 | WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32, |
142 | hub->ctx_addr_distance * vmid, |
143 | lower_32_bits(page_table_base)); |
144 | |
145 | WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32, |
146 | hub->ctx_addr_distance * vmid, |
147 | upper_32_bits(page_table_base)); |
148 | } |
149 | |
150 | static void mmhub_v4_1_0_init_gart_aperture_regs(struct amdgpu_device *adev) |
151 | { |
152 | uint64_t pt_base = amdgpu_gmc_pd_addr(bo: adev->gart.bo); |
153 | |
154 | mmhub_v4_1_0_setup_vm_pt_regs(adev, vmid: 0, page_table_base: pt_base); |
155 | |
156 | WREG32_SOC15(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_LO32, |
157 | (u32)(adev->gmc.gart_start >> 12)); |
158 | WREG32_SOC15(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_START_ADDR_HI32, |
159 | (u32)(adev->gmc.gart_start >> 44)); |
160 | |
161 | WREG32_SOC15(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_LO32, |
162 | (u32)(adev->gmc.gart_end >> 12)); |
163 | WREG32_SOC15(MMHUB, 0, regMMVM_CONTEXT0_PAGE_TABLE_END_ADDR_HI32, |
164 | (u32)(adev->gmc.gart_end >> 44)); |
165 | } |
166 | |
167 | static void mmhub_v4_1_0_init_system_aperture_regs(struct amdgpu_device *adev) |
168 | { |
169 | uint64_t value; |
170 | uint32_t tmp; |
171 | |
172 | /* |
173 | * the new L1 policy will block SRIOV guest from writing |
174 | * these regs, and they will be programed at host. |
175 | * so skip programing these regs. |
176 | */ |
177 | if (amdgpu_sriov_vf(adev)) |
178 | return; |
179 | |
180 | /* Program the AGP BAR */ |
181 | WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BASE, 0); |
182 | WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_BOT, adev->gmc.agp_start >> 24); |
183 | WREG32_SOC15(MMHUB, 0, regMMMC_VM_AGP_TOP, adev->gmc.agp_end >> 24); |
184 | |
185 | /* Program the system aperture low logical page number. */ |
186 | WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_LOW_ADDR, |
187 | min(adev->gmc.fb_start, adev->gmc.agp_start) >> 18); |
188 | WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_HIGH_ADDR, |
189 | max(adev->gmc.fb_end, adev->gmc.agp_end) >> 18); |
190 | |
191 | /* Set default page address. */ |
192 | value = adev->mem_scratch.gpu_addr - adev->gmc.vram_start + |
193 | adev->vm_manager.vram_base_offset; |
194 | WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_LSB, |
195 | (u32)(value >> 12)); |
196 | WREG32_SOC15(MMHUB, 0, regMMMC_VM_SYSTEM_APERTURE_DEFAULT_ADDR_MSB, |
197 | (u32)(value >> 44)); |
198 | |
199 | /* Program "protection fault". */ |
200 | WREG32_SOC15(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_LO32, |
201 | (u32)(adev->dummy_page_addr >> 12)); |
202 | WREG32_SOC15(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_DEFAULT_ADDR_HI32, |
203 | (u32)((u64)adev->dummy_page_addr >> 44)); |
204 | |
205 | tmp = RREG32_SOC15(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_CNTL2); |
206 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL2, |
207 | ACTIVE_PAGE_MIGRATION_PTE_READ_RETRY, 1); |
208 | WREG32_SOC15(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_CNTL2, tmp); |
209 | } |
210 | |
211 | static void mmhub_v4_1_0_init_tlb_regs(struct amdgpu_device *adev) |
212 | { |
213 | uint32_t tmp; |
214 | |
215 | /* Setup TLB control */ |
216 | tmp = RREG32_SOC15(MMHUB, 0, regMMMC_VM_MX_L1_TLB_CNTL); |
217 | |
218 | tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 1); |
219 | tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, SYSTEM_ACCESS_MODE, 3); |
220 | tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, |
221 | ENABLE_ADVANCED_DRIVER_MODEL, 1); |
222 | tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, |
223 | SYSTEM_APERTURE_UNMAPPED_ACCESS, 0); |
224 | tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ECO_BITS, 0); |
225 | tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, |
226 | MTYPE, MTYPE_UC); /* UC, uncached */ |
227 | |
228 | WREG32_SOC15(MMHUB, 0, regMMMC_VM_MX_L1_TLB_CNTL, tmp); |
229 | } |
230 | |
231 | static void mmhub_v4_1_0_init_cache_regs(struct amdgpu_device *adev) |
232 | { |
233 | uint32_t tmp; |
234 | |
235 | /* These registers are not accessible to VF-SRIOV. |
236 | * The PF will program them instead. |
237 | */ |
238 | if (amdgpu_sriov_vf(adev)) |
239 | return; |
240 | |
241 | /* Setup L2 cache */ |
242 | tmp = RREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL); |
243 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 1); |
244 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_FRAGMENT_PROCESSING, 0); |
245 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, |
246 | ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1); |
247 | /* XXX for emulation, Refer to closed source code.*/ |
248 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, L2_PDE0_CACHE_TAG_GENERATION_MODE, |
249 | 0); |
250 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, PDE_FAULT_CLASSIFICATION, 0); |
251 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1); |
252 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, IDENTITY_MODE_FRAGMENT_SIZE, 0); |
253 | WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL, tmp); |
254 | |
255 | tmp = RREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL2); |
256 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1); |
257 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL2, INVALIDATE_L2_CACHE, 1); |
258 | WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL2, tmp); |
259 | |
260 | tmp = regMMVM_L2_CNTL3_DEFAULT; |
261 | if (adev->gmc.translate_further) { |
262 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 12); |
263 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, |
264 | L2_CACHE_BIGK_FRAGMENT_SIZE, 9); |
265 | } else { |
266 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, BANK_SELECT, 9); |
267 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL3, |
268 | L2_CACHE_BIGK_FRAGMENT_SIZE, 6); |
269 | } |
270 | WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL3, tmp); |
271 | |
272 | tmp = regMMVM_L2_CNTL4_DEFAULT; |
273 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PDE_REQUEST_PHYSICAL, 0); |
274 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL4, VMC_TAP_PTE_REQUEST_PHYSICAL, 0); |
275 | WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL4, tmp); |
276 | |
277 | tmp = regMMVM_L2_CNTL5_DEFAULT; |
278 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL5, L2_CACHE_SMALLK_FRAGMENT_SIZE, 0); |
279 | WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL5, tmp); |
280 | } |
281 | |
282 | static void mmhub_v4_1_0_enable_system_domain(struct amdgpu_device *adev) |
283 | { |
284 | uint32_t tmp; |
285 | |
286 | tmp = RREG32_SOC15(MMHUB, 0, regMMVM_CONTEXT0_CNTL); |
287 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, ENABLE_CONTEXT, 1); |
288 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, PAGE_TABLE_DEPTH, 0); |
289 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT0_CNTL, |
290 | RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, 0); |
291 | WREG32_SOC15(MMHUB, 0, regMMVM_CONTEXT0_CNTL, tmp); |
292 | } |
293 | |
294 | static void mmhub_v4_1_0_disable_identity_aperture(struct amdgpu_device *adev) |
295 | { |
296 | /* These registers are not accessible to VF-SRIOV. |
297 | * The PF will program them instead. |
298 | */ |
299 | if (amdgpu_sriov_vf(adev)) |
300 | return; |
301 | |
302 | WREG32_SOC15(MMHUB, 0, |
303 | regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_LO32, |
304 | 0xFFFFFFFF); |
305 | WREG32_SOC15(MMHUB, 0, |
306 | regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_LOW_ADDR_HI32, |
307 | 0x0000000F); |
308 | |
309 | WREG32_SOC15(MMHUB, 0, |
310 | regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_LO32, 0); |
311 | WREG32_SOC15(MMHUB, 0, |
312 | regMMVM_L2_CONTEXT1_IDENTITY_APERTURE_HIGH_ADDR_HI32, 0); |
313 | |
314 | WREG32_SOC15(MMHUB, 0, regMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_LO32, |
315 | 0); |
316 | WREG32_SOC15(MMHUB, 0, regMMVM_L2_CONTEXT_IDENTITY_PHYSICAL_OFFSET_HI32, |
317 | 0); |
318 | } |
319 | |
320 | static void mmhub_v4_1_0_setup_vmid_config(struct amdgpu_device *adev) |
321 | { |
322 | struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; |
323 | int i; |
324 | uint32_t tmp; |
325 | |
326 | for (i = 0; i <= 14; i++) { |
327 | tmp = RREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_CNTL, i); |
328 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, ENABLE_CONTEXT, 1); |
329 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, PAGE_TABLE_DEPTH, |
330 | adev->vm_manager.num_level); |
331 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, |
332 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
333 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, |
334 | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, |
335 | 1); |
336 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, |
337 | PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
338 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, |
339 | VALID_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
340 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, |
341 | READ_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
342 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, |
343 | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
344 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, |
345 | EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, 1); |
346 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, |
347 | PAGE_TABLE_BLOCK_SIZE, |
348 | adev->vm_manager.block_size - 9); |
349 | /* Send no-retry XNACK on fault to suppress VM fault storm. */ |
350 | tmp = REG_SET_FIELD(tmp, MMVM_CONTEXT1_CNTL, |
351 | RETRY_PERMISSION_OR_INVALID_PAGE_FAULT, |
352 | !amdgpu_noretry); |
353 | WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_CNTL, |
354 | i * hub->ctx_distance, tmp); |
355 | WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_LO32, |
356 | i * hub->ctx_addr_distance, 0); |
357 | WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_PAGE_TABLE_START_ADDR_HI32, |
358 | i * hub->ctx_addr_distance, 0); |
359 | WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_LO32, |
360 | i * hub->ctx_addr_distance, |
361 | lower_32_bits(adev->vm_manager.max_pfn - 1)); |
362 | WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT1_PAGE_TABLE_END_ADDR_HI32, |
363 | i * hub->ctx_addr_distance, |
364 | upper_32_bits(adev->vm_manager.max_pfn - 1)); |
365 | } |
366 | |
367 | hub->vm_cntx_cntl = tmp; |
368 | } |
369 | |
370 | static void mmhub_v4_1_0_program_invalidation(struct amdgpu_device *adev) |
371 | { |
372 | struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; |
373 | unsigned i; |
374 | |
375 | for (i = 0; i < 18; ++i) { |
376 | WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32, |
377 | i * hub->eng_addr_distance, 0xffffffff); |
378 | WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_INVALIDATE_ENG0_ADDR_RANGE_HI32, |
379 | i * hub->eng_addr_distance, 0x1f); |
380 | } |
381 | } |
382 | |
383 | static int mmhub_v4_1_0_gart_enable(struct amdgpu_device *adev) |
384 | { |
385 | /* GART Enable. */ |
386 | mmhub_v4_1_0_init_gart_aperture_regs(adev); |
387 | mmhub_v4_1_0_init_system_aperture_regs(adev); |
388 | mmhub_v4_1_0_init_tlb_regs(adev); |
389 | mmhub_v4_1_0_init_cache_regs(adev); |
390 | |
391 | mmhub_v4_1_0_enable_system_domain(adev); |
392 | mmhub_v4_1_0_disable_identity_aperture(adev); |
393 | mmhub_v4_1_0_setup_vmid_config(adev); |
394 | mmhub_v4_1_0_program_invalidation(adev); |
395 | |
396 | return 0; |
397 | } |
398 | |
399 | static void mmhub_v4_1_0_gart_disable(struct amdgpu_device *adev) |
400 | { |
401 | struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; |
402 | u32 tmp; |
403 | u32 i; |
404 | |
405 | /* Disable all tables */ |
406 | for (i = 0; i < 16; i++) |
407 | WREG32_SOC15_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_CNTL, |
408 | i * hub->ctx_distance, 0); |
409 | |
410 | /* Setup TLB control */ |
411 | tmp = RREG32_SOC15(MMHUB, 0, regMMMC_VM_MX_L1_TLB_CNTL); |
412 | tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, ENABLE_L1_TLB, 0); |
413 | tmp = REG_SET_FIELD(tmp, MMMC_VM_MX_L1_TLB_CNTL, |
414 | ENABLE_ADVANCED_DRIVER_MODEL, 0); |
415 | WREG32_SOC15(MMHUB, 0, regMMMC_VM_MX_L1_TLB_CNTL, tmp); |
416 | |
417 | /* Setup L2 cache */ |
418 | tmp = RREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL); |
419 | tmp = REG_SET_FIELD(tmp, MMVM_L2_CNTL, ENABLE_L2_CACHE, 0); |
420 | WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL, tmp); |
421 | WREG32_SOC15(MMHUB, 0, regMMVM_L2_CNTL3, 0); |
422 | } |
423 | |
424 | /** |
425 | * mmhub_v4_1_0_set_fault_enable_default - update GART/VM fault handling |
426 | * |
427 | * @adev: amdgpu_device pointer |
428 | * @value: true redirects VM faults to the default page |
429 | */ |
430 | static void |
431 | mmhub_v4_1_0_set_fault_enable_default(struct amdgpu_device *adev, bool value) |
432 | { |
433 | u32 tmp; |
434 | |
435 | /* These registers are not accessible to VF-SRIOV. |
436 | * The PF will program them instead. |
437 | */ |
438 | if (amdgpu_sriov_vf(adev)) |
439 | return; |
440 | |
441 | tmp = RREG32_SOC15(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_CNTL); |
442 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, |
443 | RANGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
444 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, |
445 | PDE0_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
446 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, |
447 | PDE1_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
448 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, |
449 | PDE2_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
450 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, |
451 | TRANSLATE_FURTHER_PROTECTION_FAULT_ENABLE_DEFAULT, |
452 | value); |
453 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, |
454 | NACK_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
455 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, |
456 | DUMMY_PAGE_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
457 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, |
458 | VALID_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
459 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, |
460 | READ_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
461 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, |
462 | WRITE_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
463 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, |
464 | EXECUTE_PROTECTION_FAULT_ENABLE_DEFAULT, value); |
465 | if (!value) { |
466 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, |
467 | CRASH_ON_NO_RETRY_FAULT, 1); |
468 | tmp = REG_SET_FIELD(tmp, MMVM_L2_PROTECTION_FAULT_CNTL, |
469 | CRASH_ON_RETRY_FAULT, 1); |
470 | } |
471 | WREG32_SOC15(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_CNTL, tmp); |
472 | } |
473 | |
474 | static const struct amdgpu_vmhub_funcs mmhub_v4_1_0_vmhub_funcs = { |
475 | .print_l2_protection_fault_status = mmhub_v4_1_0_print_l2_protection_fault_status, |
476 | .get_invalidate_req = mmhub_v4_1_0_get_invalidate_req, |
477 | }; |
478 | |
479 | static void mmhub_v4_1_0_init(struct amdgpu_device *adev) |
480 | { |
481 | struct amdgpu_vmhub *hub = &adev->vmhub[AMDGPU_MMHUB0(0)]; |
482 | |
483 | hub->ctx0_ptb_addr_lo32 = |
484 | SOC15_REG_OFFSET(MMHUB, 0, |
485 | regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32); |
486 | hub->ctx0_ptb_addr_hi32 = |
487 | SOC15_REG_OFFSET(MMHUB, 0, |
488 | regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_HI32); |
489 | hub->vm_inv_eng0_sem = |
490 | SOC15_REG_OFFSET(MMHUB, 0, regMMVM_INVALIDATE_ENG0_SEM); |
491 | hub->vm_inv_eng0_req = |
492 | SOC15_REG_OFFSET(MMHUB, 0, regMMVM_INVALIDATE_ENG0_REQ); |
493 | hub->vm_inv_eng0_ack = |
494 | SOC15_REG_OFFSET(MMHUB, 0, regMMVM_INVALIDATE_ENG0_ACK); |
495 | hub->vm_context0_cntl = |
496 | SOC15_REG_OFFSET(MMHUB, 0, regMMVM_CONTEXT0_CNTL); |
497 | hub->vm_l2_pro_fault_status = |
498 | SOC15_REG_OFFSET(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_STATUS_LO32); |
499 | hub->vm_l2_pro_fault_cntl = |
500 | SOC15_REG_OFFSET(MMHUB, 0, regMMVM_L2_PROTECTION_FAULT_CNTL); |
501 | |
502 | hub->ctx_distance = regMMVM_CONTEXT1_CNTL - regMMVM_CONTEXT0_CNTL; |
503 | hub->ctx_addr_distance = regMMVM_CONTEXT1_PAGE_TABLE_BASE_ADDR_LO32 - |
504 | regMMVM_CONTEXT0_PAGE_TABLE_BASE_ADDR_LO32; |
505 | hub->eng_distance = regMMVM_INVALIDATE_ENG1_REQ - |
506 | regMMVM_INVALIDATE_ENG0_REQ; |
507 | hub->eng_addr_distance = regMMVM_INVALIDATE_ENG1_ADDR_RANGE_LO32 - |
508 | regMMVM_INVALIDATE_ENG0_ADDR_RANGE_LO32; |
509 | |
510 | hub->vm_cntx_cntl_vm_fault = MMVM_CONTEXT1_CNTL__RANGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
511 | MMVM_CONTEXT1_CNTL__DUMMY_PAGE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
512 | MMVM_CONTEXT1_CNTL__PDE0_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
513 | MMVM_CONTEXT1_CNTL__VALID_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
514 | MMVM_CONTEXT1_CNTL__READ_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
515 | MMVM_CONTEXT1_CNTL__WRITE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK | |
516 | MMVM_CONTEXT1_CNTL__EXECUTE_PROTECTION_FAULT_ENABLE_INTERRUPT_MASK; |
517 | |
518 | hub->vm_l2_bank_select_reserved_cid2 = |
519 | SOC15_REG_OFFSET(MMHUB, 0, regMMVM_L2_BANK_SELECT_RESERVED_CID2); |
520 | |
521 | hub->vm_contexts_disable = |
522 | SOC15_REG_OFFSET(MMHUB, 0, regMMVM_CONTEXTS_DISABLE); |
523 | |
524 | hub->vmhub_funcs = &mmhub_v4_1_0_vmhub_funcs; |
525 | } |
526 | |
527 | static u64 mmhub_v4_1_0_get_fb_location(struct amdgpu_device *adev) |
528 | { |
529 | u64 base; |
530 | |
531 | base = RREG32_SOC15(MMHUB, 0, regMMMC_VM_FB_LOCATION_BASE); |
532 | |
533 | base &= MMMC_VM_FB_LOCATION_BASE__FB_BASE_MASK; |
534 | base <<= 24; |
535 | |
536 | return base; |
537 | } |
538 | |
539 | static u64 mmhub_v4_1_0_get_mc_fb_offset(struct amdgpu_device *adev) |
540 | { |
541 | return (u64)RREG32_SOC15(MMHUB, 0, regMMMC_VM_FB_OFFSET) << 24; |
542 | } |
543 | |
544 | static void |
545 | mmhub_v4_1_0_update_medium_grain_clock_gating(struct amdgpu_device *adev, |
546 | bool enable) |
547 | { |
548 | #if 0 |
549 | uint32_t def, data; |
550 | #endif |
551 | uint32_t def1, data1, def2 = 0, data2 = 0; |
552 | #if 0 |
553 | def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG); |
554 | #endif |
555 | def1 = data1 = RREG32_SOC15(MMHUB, 0, regDAGB0_CNTL_MISC2); |
556 | def2 = data2 = RREG32_SOC15(MMHUB, 0, regDAGB1_CNTL_MISC2); |
557 | |
558 | if (enable) { |
559 | #if 0 |
560 | data |= MM_ATC_L2_MISC_CG__ENABLE_MASK; |
561 | #endif |
562 | data1 &= ~(DAGB0_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK | |
563 | DAGB0_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK); |
564 | |
565 | data2 &= ~(DAGB1_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK | |
566 | DAGB1_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK); |
567 | } else { |
568 | #if 0 |
569 | data &= ~MM_ATC_L2_MISC_CG__ENABLE_MASK; |
570 | #endif |
571 | data1 |= (DAGB0_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK | |
572 | DAGB0_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK); |
573 | |
574 | data2 |= (DAGB1_CNTL_MISC2__DISABLE_RDRET_TAP_CHAIN_FGCG_MASK | |
575 | DAGB1_CNTL_MISC2__DISABLE_WRRET_TAP_CHAIN_FGCG_MASK); |
576 | } |
577 | |
578 | #if 0 |
579 | if (def != data) |
580 | WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data); |
581 | #endif |
582 | if (def1 != data1) |
583 | WREG32_SOC15(MMHUB, 0, regDAGB0_CNTL_MISC2, data1); |
584 | |
585 | if (def2 != data2) |
586 | WREG32_SOC15(MMHUB, 0, regDAGB1_CNTL_MISC2, data2); |
587 | } |
588 | |
589 | static void |
590 | mmhub_v4_1_0_update_medium_grain_light_sleep(struct amdgpu_device *adev, |
591 | bool enable) |
592 | { |
593 | #if 0 |
594 | uint32_t def, data; |
595 | |
596 | def = data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG); |
597 | |
598 | if (enable) |
599 | data |= MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; |
600 | else |
601 | data &= ~MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK; |
602 | |
603 | if (def != data) |
604 | WREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG, data); |
605 | #endif |
606 | } |
607 | |
608 | static int mmhub_v4_1_0_set_clockgating(struct amdgpu_device *adev, |
609 | enum amd_clockgating_state state) |
610 | { |
611 | if (amdgpu_sriov_vf(adev)) |
612 | return 0; |
613 | |
614 | if (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG) |
615 | mmhub_v4_1_0_update_medium_grain_clock_gating(adev, |
616 | enable: state == AMD_CG_STATE_GATE); |
617 | |
618 | if (adev->cg_flags & AMD_CG_SUPPORT_MC_LS) |
619 | mmhub_v4_1_0_update_medium_grain_light_sleep(adev, |
620 | enable: state == AMD_CG_STATE_GATE); |
621 | |
622 | return 0; |
623 | } |
624 | |
625 | static void mmhub_v4_1_0_get_clockgating(struct amdgpu_device *adev, u64 *flags) |
626 | { |
627 | #if 0 |
628 | int data; |
629 | |
630 | if (amdgpu_sriov_vf(adev)) |
631 | *flags = 0; |
632 | |
633 | data = RREG32_SOC15(MMHUB, 0, regMM_ATC_L2_MISC_CG); |
634 | |
635 | /* AMD_CG_SUPPORT_MC_MGCG */ |
636 | if (data & MM_ATC_L2_MISC_CG__ENABLE_MASK) |
637 | *flags |= AMD_CG_SUPPORT_MC_MGCG; |
638 | |
639 | /* AMD_CG_SUPPORT_MC_LS */ |
640 | if (data & MM_ATC_L2_MISC_CG__MEM_LS_ENABLE_MASK) |
641 | *flags |= AMD_CG_SUPPORT_MC_LS; |
642 | #endif |
643 | } |
644 | |
645 | const struct amdgpu_mmhub_funcs mmhub_v4_1_0_funcs = { |
646 | .init = mmhub_v4_1_0_init, |
647 | .get_fb_location = mmhub_v4_1_0_get_fb_location, |
648 | .get_mc_fb_offset = mmhub_v4_1_0_get_mc_fb_offset, |
649 | .gart_enable = mmhub_v4_1_0_gart_enable, |
650 | .set_fault_enable_default = mmhub_v4_1_0_set_fault_enable_default, |
651 | .gart_disable = mmhub_v4_1_0_gart_disable, |
652 | .set_clockgating = mmhub_v4_1_0_set_clockgating, |
653 | .get_clockgating = mmhub_v4_1_0_get_clockgating, |
654 | .setup_vm_pt_regs = mmhub_v4_1_0_setup_vm_pt_regs, |
655 | }; |
656 | |