1 | /* |
2 | * Copyright 2021 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | #include "amdgpu.h" |
24 | #include "amdgpu_atombios.h" |
25 | #include "nbio_v4_3.h" |
26 | |
27 | #include "nbio/nbio_4_3_0_offset.h" |
28 | #include "nbio/nbio_4_3_0_sh_mask.h" |
29 | #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" |
30 | #include <uapi/linux/kfd_ioctl.h> |
31 | |
32 | static void nbio_v4_3_remap_hdp_registers(struct amdgpu_device *adev) |
33 | { |
34 | WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_MEM_FLUSH_CNTL, |
35 | adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); |
36 | WREG32_SOC15(NBIO, 0, regBIF_BX0_REMAP_HDP_REG_FLUSH_CNTL, |
37 | adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); |
38 | } |
39 | |
40 | static u32 nbio_v4_3_get_rev_id(struct amdgpu_device *adev) |
41 | { |
42 | u32 tmp = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_DEV0_EPF0_STRAP0); |
43 | |
44 | tmp &= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; |
45 | tmp >>= RCC_STRAP0_RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; |
46 | |
47 | return tmp; |
48 | } |
49 | |
50 | static void nbio_v4_3_mc_access_enable(struct amdgpu_device *adev, bool enable) |
51 | { |
52 | if (enable) |
53 | WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, |
54 | BIF_BX0_BIF_FB_EN__FB_READ_EN_MASK | |
55 | BIF_BX0_BIF_FB_EN__FB_WRITE_EN_MASK); |
56 | else |
57 | WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_FB_EN, 0); |
58 | } |
59 | |
60 | static u32 nbio_v4_3_get_memsize(struct amdgpu_device *adev) |
61 | { |
62 | return RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF0_RCC_CONFIG_MEMSIZE); |
63 | } |
64 | |
65 | static void nbio_v4_3_sdma_doorbell_range(struct amdgpu_device *adev, int instance, |
66 | bool use_doorbell, int doorbell_index, |
67 | int doorbell_size) |
68 | { |
69 | if (instance == 0) { |
70 | u32 doorbell_range = RREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_2_CTRL); |
71 | |
72 | if (use_doorbell) { |
73 | doorbell_range = REG_SET_FIELD(doorbell_range, |
74 | S2A_DOORBELL_ENTRY_2_CTRL, |
75 | S2A_DOORBELL_PORT2_ENABLE, |
76 | 0x1); |
77 | doorbell_range = REG_SET_FIELD(doorbell_range, |
78 | S2A_DOORBELL_ENTRY_2_CTRL, |
79 | S2A_DOORBELL_PORT2_AWID, |
80 | 0xe); |
81 | doorbell_range = REG_SET_FIELD(doorbell_range, |
82 | S2A_DOORBELL_ENTRY_2_CTRL, |
83 | S2A_DOORBELL_PORT2_RANGE_OFFSET, |
84 | doorbell_index); |
85 | doorbell_range = REG_SET_FIELD(doorbell_range, |
86 | S2A_DOORBELL_ENTRY_2_CTRL, |
87 | S2A_DOORBELL_PORT2_RANGE_SIZE, |
88 | doorbell_size); |
89 | doorbell_range = REG_SET_FIELD(doorbell_range, |
90 | S2A_DOORBELL_ENTRY_2_CTRL, |
91 | S2A_DOORBELL_PORT2_AWADDR_31_28_VALUE, |
92 | 0x3); |
93 | } else |
94 | doorbell_range = REG_SET_FIELD(doorbell_range, |
95 | S2A_DOORBELL_ENTRY_2_CTRL, |
96 | S2A_DOORBELL_PORT2_RANGE_SIZE, |
97 | 0); |
98 | |
99 | WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_2_CTRL, doorbell_range); |
100 | } |
101 | } |
102 | |
103 | static void nbio_v4_3_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell, |
104 | int doorbell_index, int instance) |
105 | { |
106 | u32 doorbell_range; |
107 | |
108 | if (instance) |
109 | doorbell_range = RREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_5_CTRL); |
110 | else |
111 | doorbell_range = RREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL); |
112 | |
113 | if (use_doorbell) { |
114 | doorbell_range = REG_SET_FIELD(doorbell_range, |
115 | S2A_DOORBELL_ENTRY_4_CTRL, |
116 | S2A_DOORBELL_PORT4_ENABLE, |
117 | 0x1); |
118 | doorbell_range = REG_SET_FIELD(doorbell_range, |
119 | S2A_DOORBELL_ENTRY_4_CTRL, |
120 | S2A_DOORBELL_PORT4_AWID, |
121 | instance ? 0x7 : 0x4); |
122 | doorbell_range = REG_SET_FIELD(doorbell_range, |
123 | S2A_DOORBELL_ENTRY_4_CTRL, |
124 | S2A_DOORBELL_PORT4_RANGE_OFFSET, |
125 | doorbell_index); |
126 | doorbell_range = REG_SET_FIELD(doorbell_range, |
127 | S2A_DOORBELL_ENTRY_4_CTRL, |
128 | S2A_DOORBELL_PORT4_RANGE_SIZE, |
129 | 8); |
130 | doorbell_range = REG_SET_FIELD(doorbell_range, |
131 | S2A_DOORBELL_ENTRY_4_CTRL, |
132 | S2A_DOORBELL_PORT4_AWADDR_31_28_VALUE, |
133 | instance ? 0x7 : 0x4); |
134 | } else |
135 | doorbell_range = REG_SET_FIELD(doorbell_range, |
136 | S2A_DOORBELL_ENTRY_4_CTRL, |
137 | S2A_DOORBELL_PORT4_RANGE_SIZE, |
138 | 0); |
139 | |
140 | if (instance) |
141 | WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_5_CTRL, doorbell_range); |
142 | else |
143 | WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_4_CTRL, doorbell_range); |
144 | } |
145 | |
146 | static void nbio_v4_3_gc_doorbell_init(struct amdgpu_device *adev) |
147 | { |
148 | WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_0_CTRL, 0x30000007); |
149 | WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_3_CTRL, 0x3000000d); |
150 | } |
151 | |
152 | static void nbio_v4_3_enable_doorbell_aperture(struct amdgpu_device *adev, |
153 | bool enable) |
154 | { |
155 | WREG32_FIELD15_PREREG(NBIO, 0, RCC_DEV0_EPF0_RCC_DOORBELL_APER_EN, |
156 | BIF_DOORBELL_APER_EN, enable ? 1 : 0); |
157 | } |
158 | |
159 | static void nbio_v4_3_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, |
160 | bool enable) |
161 | { |
162 | u32 tmp = 0; |
163 | |
164 | if (enable) { |
165 | tmp = REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, |
166 | DOORBELL_SELFRING_GPA_APER_EN, 1) | |
167 | REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, |
168 | DOORBELL_SELFRING_GPA_APER_MODE, 1) | |
169 | REG_SET_FIELD(tmp, BIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, |
170 | DOORBELL_SELFRING_GPA_APER_SIZE, 0); |
171 | |
172 | WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_LOW, |
173 | lower_32_bits(adev->doorbell.base)); |
174 | WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_BASE_HIGH, |
175 | upper_32_bits(adev->doorbell.base)); |
176 | } |
177 | |
178 | WREG32_SOC15(NBIO, 0, regBIF_BX_PF0_DOORBELL_SELFRING_GPA_APER_CNTL, |
179 | tmp); |
180 | } |
181 | |
182 | static void nbio_v4_3_ih_doorbell_range(struct amdgpu_device *adev, |
183 | bool use_doorbell, int doorbell_index) |
184 | { |
185 | u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_1_CTRL); |
186 | |
187 | if (use_doorbell) { |
188 | ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, |
189 | S2A_DOORBELL_ENTRY_1_CTRL, |
190 | S2A_DOORBELL_PORT1_ENABLE, |
191 | 0x1); |
192 | ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, |
193 | S2A_DOORBELL_ENTRY_1_CTRL, |
194 | S2A_DOORBELL_PORT1_AWID, |
195 | 0x0); |
196 | ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, |
197 | S2A_DOORBELL_ENTRY_1_CTRL, |
198 | S2A_DOORBELL_PORT1_RANGE_OFFSET, |
199 | doorbell_index); |
200 | ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, |
201 | S2A_DOORBELL_ENTRY_1_CTRL, |
202 | S2A_DOORBELL_PORT1_RANGE_SIZE, |
203 | 2); |
204 | ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, |
205 | S2A_DOORBELL_ENTRY_1_CTRL, |
206 | S2A_DOORBELL_PORT1_AWADDR_31_28_VALUE, |
207 | 0x0); |
208 | } else |
209 | ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, |
210 | S2A_DOORBELL_ENTRY_1_CTRL, |
211 | S2A_DOORBELL_PORT1_RANGE_SIZE, |
212 | 0); |
213 | |
214 | WREG32_SOC15(NBIO, 0, regS2A_DOORBELL_ENTRY_1_CTRL, ih_doorbell_range); |
215 | } |
216 | |
217 | static void nbio_v4_3_ih_control(struct amdgpu_device *adev) |
218 | { |
219 | u32 interrupt_cntl; |
220 | |
221 | /* setup interrupt control */ |
222 | WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL2, adev->dummy_page_addr >> 8); |
223 | |
224 | interrupt_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL); |
225 | /* |
226 | * BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi |
227 | * BIF_BX0_INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN |
228 | */ |
229 | interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL, |
230 | IH_DUMMY_RD_OVERRIDE, 0); |
231 | |
232 | /* BIF_BX0_INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ |
233 | interrupt_cntl = REG_SET_FIELD(interrupt_cntl, BIF_BX0_INTERRUPT_CNTL, |
234 | IH_REQ_NONSNOOP_EN, 0); |
235 | |
236 | WREG32_SOC15(NBIO, 0, regBIF_BX0_INTERRUPT_CNTL, interrupt_cntl); |
237 | } |
238 | |
239 | static void nbio_v4_3_update_medium_grain_clock_gating(struct amdgpu_device *adev, |
240 | bool enable) |
241 | { |
242 | uint32_t def, data; |
243 | |
244 | if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_MGCG)) |
245 | return; |
246 | |
247 | def = data = RREG32_SOC15(NBIO, 0, regCPM_CONTROL); |
248 | if (enable) { |
249 | data |= (CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK | |
250 | CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK | |
251 | CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK | |
252 | CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK | |
253 | CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK | |
254 | CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK); |
255 | } else { |
256 | data &= ~(CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK | |
257 | CPM_CONTROL__TXCLK_DYN_GATE_ENABLE_MASK | |
258 | CPM_CONTROL__TXCLK_LCNT_GATE_ENABLE_MASK | |
259 | CPM_CONTROL__TXCLK_REGS_GATE_ENABLE_MASK | |
260 | CPM_CONTROL__TXCLK_PRBS_GATE_ENABLE_MASK | |
261 | CPM_CONTROL__REFCLK_REGS_GATE_ENABLE_MASK); |
262 | } |
263 | |
264 | if (def != data) |
265 | WREG32_SOC15(NBIO, 0, regCPM_CONTROL, data); |
266 | } |
267 | |
268 | static void nbio_v4_3_update_medium_grain_light_sleep(struct amdgpu_device *adev, |
269 | bool enable) |
270 | { |
271 | uint32_t def, data; |
272 | |
273 | if (enable && !(adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) |
274 | return; |
275 | |
276 | /* TODO: need update in future */ |
277 | def = data = RREG32_SOC15(NBIO, 0, regPCIE_CNTL2); |
278 | if (enable) { |
279 | data |= PCIE_CNTL2__SLV_MEM_LS_EN_MASK; |
280 | } else { |
281 | data &= ~PCIE_CNTL2__SLV_MEM_LS_EN_MASK; |
282 | } |
283 | |
284 | if (def != data) |
285 | WREG32_SOC15(NBIO, 0, regPCIE_CNTL2, data); |
286 | } |
287 | |
288 | static void nbio_v4_3_get_clockgating_state(struct amdgpu_device *adev, |
289 | u64 *flags) |
290 | { |
291 | int data; |
292 | |
293 | /* AMD_CG_SUPPORT_BIF_MGCG */ |
294 | data = RREG32_SOC15(NBIO, 0, regCPM_CONTROL); |
295 | if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK) |
296 | *flags |= AMD_CG_SUPPORT_BIF_MGCG; |
297 | |
298 | /* AMD_CG_SUPPORT_BIF_LS */ |
299 | data = RREG32_SOC15(NBIO, 0, regPCIE_CNTL2); |
300 | if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) |
301 | *flags |= AMD_CG_SUPPORT_BIF_LS; |
302 | } |
303 | |
304 | static u32 nbio_v4_3_get_hdp_flush_req_offset(struct amdgpu_device *adev) |
305 | { |
306 | return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_REQ); |
307 | } |
308 | |
309 | static u32 nbio_v4_3_get_hdp_flush_done_offset(struct amdgpu_device *adev) |
310 | { |
311 | return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_GPU_HDP_FLUSH_DONE); |
312 | } |
313 | |
314 | static u32 nbio_v4_3_get_pcie_index_offset(struct amdgpu_device *adev) |
315 | { |
316 | return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_INDEX); |
317 | } |
318 | |
319 | static u32 nbio_v4_3_get_pcie_data_offset(struct amdgpu_device *adev) |
320 | { |
321 | return SOC15_REG_OFFSET(NBIO, 0, regBIF_BX_PF0_RSMU_DATA); |
322 | } |
323 | |
324 | const struct nbio_hdp_flush_reg nbio_v4_3_hdp_flush_reg = { |
325 | .ref_and_mask_cp0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP0_MASK, |
326 | .ref_and_mask_cp1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP1_MASK, |
327 | .ref_and_mask_cp2 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP2_MASK, |
328 | .ref_and_mask_cp3 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP3_MASK, |
329 | .ref_and_mask_cp4 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP4_MASK, |
330 | .ref_and_mask_cp5 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP5_MASK, |
331 | .ref_and_mask_cp6 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP6_MASK, |
332 | .ref_and_mask_cp7 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP7_MASK, |
333 | .ref_and_mask_cp8 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP8_MASK, |
334 | .ref_and_mask_cp9 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__CP9_MASK, |
335 | .ref_and_mask_sdma0 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA0_MASK, |
336 | .ref_and_mask_sdma1 = BIF_BX_PF_GPU_HDP_FLUSH_DONE__SDMA1_MASK, |
337 | }; |
338 | |
339 | static void nbio_v4_3_init_registers(struct amdgpu_device *adev) |
340 | { |
341 | if (amdgpu_ip_version(adev, ip: NBIO_HWIP, inst: 0) == IP_VERSION(4, 3, 0)) { |
342 | uint32_t data; |
343 | |
344 | data = RREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2); |
345 | data &= ~RCC_DEV0_EPF2_STRAP2__STRAP_NO_SOFT_RESET_DEV0_F2_MASK; |
346 | WREG32_SOC15(NBIO, 0, regRCC_DEV0_EPF2_STRAP2, data); |
347 | } |
348 | if (amdgpu_sriov_vf(adev)) |
349 | adev->rmmio_remap.reg_offset = SOC15_REG_OFFSET(NBIO, 0, |
350 | regBIF_BX_DEV0_EPF0_VF0_HDP_MEM_COHERENCY_FLUSH_CNTL) << 2; |
351 | } |
352 | |
353 | static u32 nbio_v4_3_get_rom_offset(struct amdgpu_device *adev) |
354 | { |
355 | u32 data, rom_offset; |
356 | |
357 | data = RREG32_SOC15(NBIO, 0, regREGS_ROM_OFFSET_CTRL); |
358 | rom_offset = REG_GET_FIELD(data, REGS_ROM_OFFSET_CTRL, ROM_OFFSET); |
359 | |
360 | return rom_offset; |
361 | } |
362 | |
363 | #ifdef CONFIG_PCIEASPM |
364 | static void nbio_v4_3_program_ltr(struct amdgpu_device *adev) |
365 | { |
366 | uint32_t def, data; |
367 | |
368 | def = RREG32_SOC15(NBIO, 0, regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL); |
369 | data = 0x35EB; |
370 | data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_MSG_DIS_IN_PM_NON_D0_MASK; |
371 | data &= ~EP_PCIE_TX_LTR_CNTL__LTR_PRIV_RST_LTR_IN_DL_DOWN_MASK; |
372 | if (def != data) |
373 | WREG32_SOC15(NBIO, 0, regRCC_EP_DEV0_0_EP_PCIE_TX_LTR_CNTL, data); |
374 | |
375 | def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP2); |
376 | data &= ~RCC_BIF_STRAP2__STRAP_LTR_IN_ASPML1_DIS_MASK; |
377 | if (def != data) |
378 | WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP2, data); |
379 | |
380 | def = data = RREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); |
381 | if (adev->pdev->ltr_path) |
382 | data |= BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; |
383 | else |
384 | data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; |
385 | if (def != data) |
386 | WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); |
387 | } |
388 | #endif |
389 | |
390 | static void nbio_v4_3_program_aspm(struct amdgpu_device *adev) |
391 | { |
392 | #ifdef CONFIG_PCIEASPM |
393 | uint32_t def, data; |
394 | |
395 | if (!(amdgpu_ip_version(adev, ip: PCIE_HWIP, inst: 0) == IP_VERSION(7, 4, 0)) && |
396 | !(amdgpu_ip_version(adev, ip: PCIE_HWIP, inst: 0) == IP_VERSION(7, 6, 0))) |
397 | return; |
398 | |
399 | def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL); |
400 | data &= ~PCIE_LC_CNTL__LC_L1_INACTIVITY_MASK; |
401 | data &= ~PCIE_LC_CNTL__LC_L0S_INACTIVITY_MASK; |
402 | data |= PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; |
403 | if (def != data) |
404 | WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL, data); |
405 | |
406 | def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL7); |
407 | data |= PCIE_LC_CNTL7__LC_NBIF_ASPM_INPUT_EN_MASK; |
408 | if (def != data) |
409 | WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL7, data); |
410 | |
411 | def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL3); |
412 | data |= PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; |
413 | if (def != data) |
414 | WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL3, data); |
415 | |
416 | def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3); |
417 | data &= ~RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER_MASK; |
418 | data &= ~RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER_MASK; |
419 | if (def != data) |
420 | WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3, data); |
421 | |
422 | def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5); |
423 | data &= ~RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER_MASK; |
424 | if (def != data) |
425 | WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5, data); |
426 | |
427 | def = data = RREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2); |
428 | data &= ~BIF_CFG_DEV0_EPF0_DEVICE_CNTL2__LTR_EN_MASK; |
429 | if (def != data) |
430 | WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_DEVICE_CNTL2, data); |
431 | |
432 | WREG32_SOC15(NBIO, 0, regBIF_CFG_DEV0_EPF0_PCIE_LTR_CAP, 0x10011001); |
433 | |
434 | def = data = RREG32_SOC15(NBIO, 0, regPSWUSP0_PCIE_LC_CNTL2); |
435 | data |= PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L1_MASK | |
436 | PSWUSP0_PCIE_LC_CNTL2__LC_ALLOW_PDWN_IN_L23_MASK; |
437 | data &= ~PSWUSP0_PCIE_LC_CNTL2__LC_RCV_L0_TO_RCV_L0S_DIS_MASK; |
438 | if (def != data) |
439 | WREG32_SOC15(NBIO, 0, regPSWUSP0_PCIE_LC_CNTL2, data); |
440 | |
441 | def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL4); |
442 | data |= PCIE_LC_CNTL4__LC_L1_POWERDOWN_MASK; |
443 | if (def != data) |
444 | WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL4, data); |
445 | |
446 | def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_RXRECOVER_RXSTANDBY_CNTL); |
447 | data |= PCIE_LC_RXRECOVER_RXSTANDBY_CNTL__LC_RX_L0S_STANDBY_EN_MASK; |
448 | if (def != data) |
449 | WREG32_SOC15(NBIO, 0, regPCIE_LC_RXRECOVER_RXSTANDBY_CNTL, data); |
450 | |
451 | nbio_v4_3_program_ltr(adev); |
452 | |
453 | def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3); |
454 | data |= 0x5DE0 << RCC_BIF_STRAP3__STRAP_VLINK_ASPM_IDLE_TIMER__SHIFT; |
455 | data |= 0x0010 << RCC_BIF_STRAP3__STRAP_VLINK_PM_L1_ENTRY_TIMER__SHIFT; |
456 | if (def != data) |
457 | WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP3, data); |
458 | |
459 | def = data = RREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5); |
460 | data |= 0x0010 << RCC_BIF_STRAP5__STRAP_VLINK_LDN_ENTRY_TIMER__SHIFT; |
461 | if (def != data) |
462 | WREG32_SOC15(NBIO, 0, regRCC_STRAP0_RCC_BIF_STRAP5, data); |
463 | |
464 | def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL); |
465 | data |= 0x0 << PCIE_LC_CNTL__LC_L0S_INACTIVITY__SHIFT; |
466 | data |= 0x9 << PCIE_LC_CNTL__LC_L1_INACTIVITY__SHIFT; |
467 | data &= ~PCIE_LC_CNTL__LC_PMI_TO_L1_DIS_MASK; |
468 | if (def != data) |
469 | WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL, data); |
470 | |
471 | def = data = RREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL3); |
472 | data &= ~PCIE_LC_CNTL3__LC_DSC_DONT_ENTER_L23_AFTER_PME_ACK_MASK; |
473 | if (def != data) |
474 | WREG32_SOC15(NBIO, 0, regPCIE_LC_CNTL3, data); |
475 | #endif |
476 | } |
477 | |
478 | const struct amdgpu_nbio_funcs nbio_v4_3_funcs = { |
479 | .get_hdp_flush_req_offset = nbio_v4_3_get_hdp_flush_req_offset, |
480 | .get_hdp_flush_done_offset = nbio_v4_3_get_hdp_flush_done_offset, |
481 | .get_pcie_index_offset = nbio_v4_3_get_pcie_index_offset, |
482 | .get_pcie_data_offset = nbio_v4_3_get_pcie_data_offset, |
483 | .get_rev_id = nbio_v4_3_get_rev_id, |
484 | .mc_access_enable = nbio_v4_3_mc_access_enable, |
485 | .get_memsize = nbio_v4_3_get_memsize, |
486 | .sdma_doorbell_range = nbio_v4_3_sdma_doorbell_range, |
487 | .vcn_doorbell_range = nbio_v4_3_vcn_doorbell_range, |
488 | .gc_doorbell_init = nbio_v4_3_gc_doorbell_init, |
489 | .enable_doorbell_aperture = nbio_v4_3_enable_doorbell_aperture, |
490 | .enable_doorbell_selfring_aperture = nbio_v4_3_enable_doorbell_selfring_aperture, |
491 | .ih_doorbell_range = nbio_v4_3_ih_doorbell_range, |
492 | .update_medium_grain_clock_gating = nbio_v4_3_update_medium_grain_clock_gating, |
493 | .update_medium_grain_light_sleep = nbio_v4_3_update_medium_grain_light_sleep, |
494 | .get_clockgating_state = nbio_v4_3_get_clockgating_state, |
495 | .ih_control = nbio_v4_3_ih_control, |
496 | .init_registers = nbio_v4_3_init_registers, |
497 | .remap_hdp_registers = nbio_v4_3_remap_hdp_registers, |
498 | .get_rom_offset = nbio_v4_3_get_rom_offset, |
499 | .program_aspm = nbio_v4_3_program_aspm, |
500 | }; |
501 | |
502 | |
503 | static void nbio_v4_3_sriov_ih_doorbell_range(struct amdgpu_device *adev, |
504 | bool use_doorbell, int doorbell_index) |
505 | { |
506 | } |
507 | |
508 | static void nbio_v4_3_sriov_sdma_doorbell_range(struct amdgpu_device *adev, int instance, |
509 | bool use_doorbell, int doorbell_index, |
510 | int doorbell_size) |
511 | { |
512 | } |
513 | |
514 | static void nbio_v4_3_sriov_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell, |
515 | int doorbell_index, int instance) |
516 | { |
517 | } |
518 | |
519 | static void nbio_v4_3_sriov_gc_doorbell_init(struct amdgpu_device *adev) |
520 | { |
521 | } |
522 | |
523 | const struct amdgpu_nbio_funcs nbio_v4_3_sriov_funcs = { |
524 | .get_hdp_flush_req_offset = nbio_v4_3_get_hdp_flush_req_offset, |
525 | .get_hdp_flush_done_offset = nbio_v4_3_get_hdp_flush_done_offset, |
526 | .get_pcie_index_offset = nbio_v4_3_get_pcie_index_offset, |
527 | .get_pcie_data_offset = nbio_v4_3_get_pcie_data_offset, |
528 | .get_rev_id = nbio_v4_3_get_rev_id, |
529 | .mc_access_enable = nbio_v4_3_mc_access_enable, |
530 | .get_memsize = nbio_v4_3_get_memsize, |
531 | .sdma_doorbell_range = nbio_v4_3_sriov_sdma_doorbell_range, |
532 | .vcn_doorbell_range = nbio_v4_3_sriov_vcn_doorbell_range, |
533 | .gc_doorbell_init = nbio_v4_3_sriov_gc_doorbell_init, |
534 | .enable_doorbell_aperture = nbio_v4_3_enable_doorbell_aperture, |
535 | .enable_doorbell_selfring_aperture = nbio_v4_3_enable_doorbell_selfring_aperture, |
536 | .ih_doorbell_range = nbio_v4_3_sriov_ih_doorbell_range, |
537 | .update_medium_grain_clock_gating = nbio_v4_3_update_medium_grain_clock_gating, |
538 | .update_medium_grain_light_sleep = nbio_v4_3_update_medium_grain_light_sleep, |
539 | .get_clockgating_state = nbio_v4_3_get_clockgating_state, |
540 | .ih_control = nbio_v4_3_ih_control, |
541 | .init_registers = nbio_v4_3_init_registers, |
542 | .remap_hdp_registers = nbio_v4_3_remap_hdp_registers, |
543 | .get_rom_offset = nbio_v4_3_get_rom_offset, |
544 | }; |
545 | |
546 | static int nbio_v4_3_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev, |
547 | struct amdgpu_irq_src *src, |
548 | unsigned type, |
549 | enum amdgpu_interrupt_state state) |
550 | { |
551 | /* The ras_controller_irq enablement should be done in psp bl when it |
552 | * tries to enable ras feature. Driver only need to set the correct interrupt |
553 | * vector for bare-metal and sriov use case respectively |
554 | */ |
555 | uint32_t bif_doorbell_int_cntl; |
556 | |
557 | bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); |
558 | bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl, |
559 | BIF_BX0_BIF_DOORBELL_INT_CNTL, |
560 | RAS_ATHUB_ERR_EVENT_INTERRUPT_DISABLE, |
561 | (state == AMDGPU_IRQ_STATE_ENABLE) ? 0 : 1); |
562 | WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl); |
563 | |
564 | return 0; |
565 | } |
566 | |
567 | static int nbio_v4_3_process_err_event_athub_irq(struct amdgpu_device *adev, |
568 | struct amdgpu_irq_src *source, |
569 | struct amdgpu_iv_entry *entry) |
570 | { |
571 | /* By design, the ih cookie for err_event_athub_irq should be written |
572 | * to bif ring. since bif ring is not enabled, just leave process callback |
573 | * as a dummy one. |
574 | */ |
575 | return 0; |
576 | } |
577 | |
578 | static const struct amdgpu_irq_src_funcs nbio_v4_3_ras_err_event_athub_irq_funcs = { |
579 | .set = nbio_v4_3_set_ras_err_event_athub_irq_state, |
580 | .process = nbio_v4_3_process_err_event_athub_irq, |
581 | }; |
582 | |
583 | static void nbio_v4_3_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev) |
584 | { |
585 | uint32_t bif_doorbell_int_cntl; |
586 | |
587 | bif_doorbell_int_cntl = RREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL); |
588 | if (REG_GET_FIELD(bif_doorbell_int_cntl, |
589 | BIF_DOORBELL_INT_CNTL, |
590 | RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) { |
591 | /* driver has to clear the interrupt status when bif ring is disabled */ |
592 | bif_doorbell_int_cntl = REG_SET_FIELD(bif_doorbell_int_cntl, |
593 | BIF_DOORBELL_INT_CNTL, |
594 | RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1); |
595 | WREG32_SOC15(NBIO, 0, regBIF_BX0_BIF_DOORBELL_INT_CNTL, bif_doorbell_int_cntl); |
596 | amdgpu_ras_global_ras_isr(adev); |
597 | } |
598 | } |
599 | |
600 | static int nbio_v4_3_init_ras_err_event_athub_interrupt(struct amdgpu_device *adev) |
601 | { |
602 | |
603 | int r; |
604 | |
605 | /* init the irq funcs */ |
606 | adev->nbio.ras_err_event_athub_irq.funcs = |
607 | &nbio_v4_3_ras_err_event_athub_irq_funcs; |
608 | adev->nbio.ras_err_event_athub_irq.num_types = 1; |
609 | |
610 | /* register ras err event athub interrupt |
611 | * nbio v4_3 uses the same irq source as nbio v7_4 */ |
612 | r = amdgpu_irq_add_id(adev, client_id: SOC21_IH_CLIENTID_BIF, |
613 | NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, |
614 | source: &adev->nbio.ras_err_event_athub_irq); |
615 | |
616 | return r; |
617 | } |
618 | |
619 | struct amdgpu_nbio_ras nbio_v4_3_ras = { |
620 | .handle_ras_err_event_athub_intr_no_bifring = nbio_v4_3_handle_ras_err_event_athub_intr_no_bifring, |
621 | .init_ras_err_event_athub_interrupt = nbio_v4_3_init_ras_err_event_athub_interrupt, |
622 | }; |
623 | |