1 | /* |
2 | * Copyright 2016 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | #include <linux/firmware.h> |
24 | #include <linux/slab.h> |
25 | #include <linux/module.h> |
26 | #include <linux/pci.h> |
27 | |
28 | #include <drm/amdgpu_drm.h> |
29 | |
30 | #include "amdgpu.h" |
31 | #include "amdgpu_atombios.h" |
32 | #include "amdgpu_ih.h" |
33 | #include "amdgpu_uvd.h" |
34 | #include "amdgpu_vce.h" |
35 | #include "amdgpu_ucode.h" |
36 | #include "amdgpu_psp.h" |
37 | #include "atom.h" |
38 | #include "amd_pcie.h" |
39 | |
40 | #include "uvd/uvd_7_0_offset.h" |
41 | #include "gc/gc_9_0_offset.h" |
42 | #include "gc/gc_9_0_sh_mask.h" |
43 | #include "sdma0/sdma0_4_0_offset.h" |
44 | #include "sdma1/sdma1_4_0_offset.h" |
45 | #include "nbio/nbio_7_0_default.h" |
46 | #include "nbio/nbio_7_0_offset.h" |
47 | #include "nbio/nbio_7_0_sh_mask.h" |
48 | #include "nbio/nbio_7_0_smn.h" |
49 | #include "mp/mp_9_0_offset.h" |
50 | |
51 | #include "soc15.h" |
52 | #include "soc15_common.h" |
53 | #include "gfx_v9_0.h" |
54 | #include "gmc_v9_0.h" |
55 | #include "gfxhub_v1_0.h" |
56 | #include "mmhub_v1_0.h" |
57 | #include "df_v1_7.h" |
58 | #include "df_v3_6.h" |
59 | #include "nbio_v6_1.h" |
60 | #include "nbio_v7_0.h" |
61 | #include "nbio_v7_4.h" |
62 | #include "hdp_v4_0.h" |
63 | #include "vega10_ih.h" |
64 | #include "vega20_ih.h" |
65 | #include "navi10_ih.h" |
66 | #include "sdma_v4_0.h" |
67 | #include "uvd_v7_0.h" |
68 | #include "vce_v4_0.h" |
69 | #include "vcn_v1_0.h" |
70 | #include "vcn_v2_0.h" |
71 | #include "jpeg_v2_0.h" |
72 | #include "vcn_v2_5.h" |
73 | #include "jpeg_v2_5.h" |
74 | #include "smuio_v9_0.h" |
75 | #include "smuio_v11_0.h" |
76 | #include "smuio_v13_0.h" |
77 | #include "amdgpu_vkms.h" |
78 | #include "mxgpu_ai.h" |
79 | #include "amdgpu_ras.h" |
80 | #include "amdgpu_xgmi.h" |
81 | #include <uapi/linux/kfd_ioctl.h> |
82 | |
83 | #define mmMP0_MISC_CGTT_CTRL0 0x01b9 |
84 | #define mmMP0_MISC_CGTT_CTRL0_BASE_IDX 0 |
85 | #define mmMP0_MISC_LIGHT_SLEEP_CTRL 0x01ba |
86 | #define mmMP0_MISC_LIGHT_SLEEP_CTRL_BASE_IDX 0 |
87 | |
88 | static const struct amd_ip_funcs soc15_common_ip_funcs; |
89 | |
90 | /* Vega, Raven, Arcturus */ |
91 | static const struct amdgpu_video_codec_info vega_video_codecs_encode_array[] = |
92 | { |
93 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 2304, 0)}, |
94 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 2304, 0)}, |
95 | }; |
96 | |
97 | static const struct amdgpu_video_codecs vega_video_codecs_encode = |
98 | { |
99 | .codec_count = ARRAY_SIZE(vega_video_codecs_encode_array), |
100 | .codec_array = vega_video_codecs_encode_array, |
101 | }; |
102 | |
103 | /* Vega */ |
104 | static const struct amdgpu_video_codec_info vega_video_codecs_decode_array[] = |
105 | { |
106 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, |
107 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, |
108 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, |
109 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, |
110 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)}, |
111 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, |
112 | }; |
113 | |
114 | static const struct amdgpu_video_codecs vega_video_codecs_decode = |
115 | { |
116 | .codec_count = ARRAY_SIZE(vega_video_codecs_decode_array), |
117 | .codec_array = vega_video_codecs_decode_array, |
118 | }; |
119 | |
120 | /* Raven */ |
121 | static const struct amdgpu_video_codec_info rv_video_codecs_decode_array[] = |
122 | { |
123 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, |
124 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, |
125 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, |
126 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, |
127 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 4096, 4096, 186)}, |
128 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, |
129 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 4096, 4096, 0)}, |
130 | }; |
131 | |
132 | static const struct amdgpu_video_codecs rv_video_codecs_decode = |
133 | { |
134 | .codec_count = ARRAY_SIZE(rv_video_codecs_decode_array), |
135 | .codec_array = rv_video_codecs_decode_array, |
136 | }; |
137 | |
138 | /* Renoir, Arcturus */ |
139 | static const struct amdgpu_video_codec_info rn_video_codecs_decode_array[] = |
140 | { |
141 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG2, 4096, 4096, 3)}, |
142 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4, 4096, 4096, 5)}, |
143 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, |
144 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VC1, 4096, 4096, 4)}, |
145 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, |
146 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, |
147 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, |
148 | }; |
149 | |
150 | static const struct amdgpu_video_codecs rn_video_codecs_decode = |
151 | { |
152 | .codec_count = ARRAY_SIZE(rn_video_codecs_decode_array), |
153 | .codec_array = rn_video_codecs_decode_array, |
154 | }; |
155 | |
156 | static const struct amdgpu_video_codec_info vcn_4_0_3_video_codecs_decode_array[] = { |
157 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_MPEG4_AVC, 4096, 4096, 52)}, |
158 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_HEVC, 8192, 4352, 186)}, |
159 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_JPEG, 4096, 4096, 0)}, |
160 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_VP9, 8192, 4352, 0)}, |
161 | {codec_info_build(AMDGPU_INFO_VIDEO_CAPS_CODEC_IDX_AV1, 8192, 4352, 0)}, |
162 | }; |
163 | |
164 | static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_decode = { |
165 | .codec_count = ARRAY_SIZE(vcn_4_0_3_video_codecs_decode_array), |
166 | .codec_array = vcn_4_0_3_video_codecs_decode_array, |
167 | }; |
168 | |
169 | static const struct amdgpu_video_codecs vcn_4_0_3_video_codecs_encode = { |
170 | .codec_count = 0, |
171 | .codec_array = NULL, |
172 | }; |
173 | |
174 | static int soc15_query_video_codecs(struct amdgpu_device *adev, bool encode, |
175 | const struct amdgpu_video_codecs **codecs) |
176 | { |
177 | if (amdgpu_ip_version(adev, ip: VCE_HWIP, inst: 0)) { |
178 | switch (amdgpu_ip_version(adev, ip: VCE_HWIP, inst: 0)) { |
179 | case IP_VERSION(4, 0, 0): |
180 | case IP_VERSION(4, 1, 0): |
181 | if (encode) |
182 | *codecs = &vega_video_codecs_encode; |
183 | else |
184 | *codecs = &vega_video_codecs_decode; |
185 | return 0; |
186 | default: |
187 | return -EINVAL; |
188 | } |
189 | } else { |
190 | switch (amdgpu_ip_version(adev, ip: UVD_HWIP, inst: 0)) { |
191 | case IP_VERSION(1, 0, 0): |
192 | case IP_VERSION(1, 0, 1): |
193 | if (encode) |
194 | *codecs = &vega_video_codecs_encode; |
195 | else |
196 | *codecs = &rv_video_codecs_decode; |
197 | return 0; |
198 | case IP_VERSION(2, 5, 0): |
199 | case IP_VERSION(2, 6, 0): |
200 | case IP_VERSION(2, 2, 0): |
201 | if (encode) |
202 | *codecs = &vega_video_codecs_encode; |
203 | else |
204 | *codecs = &rn_video_codecs_decode; |
205 | return 0; |
206 | case IP_VERSION(4, 0, 3): |
207 | if (encode) |
208 | *codecs = &vcn_4_0_3_video_codecs_encode; |
209 | else |
210 | *codecs = &vcn_4_0_3_video_codecs_decode; |
211 | return 0; |
212 | default: |
213 | return -EINVAL; |
214 | } |
215 | } |
216 | } |
217 | |
218 | static u32 soc15_uvd_ctx_rreg(struct amdgpu_device *adev, u32 reg) |
219 | { |
220 | unsigned long flags, address, data; |
221 | u32 r; |
222 | |
223 | address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); |
224 | data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); |
225 | |
226 | spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); |
227 | WREG32(address, ((reg) & 0x1ff)); |
228 | r = RREG32(data); |
229 | spin_unlock_irqrestore(lock: &adev->uvd_ctx_idx_lock, flags); |
230 | return r; |
231 | } |
232 | |
233 | static void soc15_uvd_ctx_wreg(struct amdgpu_device *adev, u32 reg, u32 v) |
234 | { |
235 | unsigned long flags, address, data; |
236 | |
237 | address = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_INDEX); |
238 | data = SOC15_REG_OFFSET(UVD, 0, mmUVD_CTX_DATA); |
239 | |
240 | spin_lock_irqsave(&adev->uvd_ctx_idx_lock, flags); |
241 | WREG32(address, ((reg) & 0x1ff)); |
242 | WREG32(data, (v)); |
243 | spin_unlock_irqrestore(lock: &adev->uvd_ctx_idx_lock, flags); |
244 | } |
245 | |
246 | static u32 soc15_didt_rreg(struct amdgpu_device *adev, u32 reg) |
247 | { |
248 | unsigned long flags, address, data; |
249 | u32 r; |
250 | |
251 | address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); |
252 | data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); |
253 | |
254 | spin_lock_irqsave(&adev->didt_idx_lock, flags); |
255 | WREG32(address, (reg)); |
256 | r = RREG32(data); |
257 | spin_unlock_irqrestore(lock: &adev->didt_idx_lock, flags); |
258 | return r; |
259 | } |
260 | |
261 | static void soc15_didt_wreg(struct amdgpu_device *adev, u32 reg, u32 v) |
262 | { |
263 | unsigned long flags, address, data; |
264 | |
265 | address = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_INDEX); |
266 | data = SOC15_REG_OFFSET(GC, 0, mmDIDT_IND_DATA); |
267 | |
268 | spin_lock_irqsave(&adev->didt_idx_lock, flags); |
269 | WREG32(address, (reg)); |
270 | WREG32(data, (v)); |
271 | spin_unlock_irqrestore(lock: &adev->didt_idx_lock, flags); |
272 | } |
273 | |
274 | static u32 soc15_gc_cac_rreg(struct amdgpu_device *adev, u32 reg) |
275 | { |
276 | unsigned long flags; |
277 | u32 r; |
278 | |
279 | spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); |
280 | WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); |
281 | r = RREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA); |
282 | spin_unlock_irqrestore(lock: &adev->gc_cac_idx_lock, flags); |
283 | return r; |
284 | } |
285 | |
286 | static void soc15_gc_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) |
287 | { |
288 | unsigned long flags; |
289 | |
290 | spin_lock_irqsave(&adev->gc_cac_idx_lock, flags); |
291 | WREG32_SOC15(GC, 0, mmGC_CAC_IND_INDEX, (reg)); |
292 | WREG32_SOC15(GC, 0, mmGC_CAC_IND_DATA, (v)); |
293 | spin_unlock_irqrestore(lock: &adev->gc_cac_idx_lock, flags); |
294 | } |
295 | |
296 | static u32 soc15_se_cac_rreg(struct amdgpu_device *adev, u32 reg) |
297 | { |
298 | unsigned long flags; |
299 | u32 r; |
300 | |
301 | spin_lock_irqsave(&adev->se_cac_idx_lock, flags); |
302 | WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); |
303 | r = RREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA); |
304 | spin_unlock_irqrestore(lock: &adev->se_cac_idx_lock, flags); |
305 | return r; |
306 | } |
307 | |
308 | static void soc15_se_cac_wreg(struct amdgpu_device *adev, u32 reg, u32 v) |
309 | { |
310 | unsigned long flags; |
311 | |
312 | spin_lock_irqsave(&adev->se_cac_idx_lock, flags); |
313 | WREG32_SOC15(GC, 0, mmSE_CAC_IND_INDEX, (reg)); |
314 | WREG32_SOC15(GC, 0, mmSE_CAC_IND_DATA, (v)); |
315 | spin_unlock_irqrestore(lock: &adev->se_cac_idx_lock, flags); |
316 | } |
317 | |
318 | static u32 soc15_get_config_memsize(struct amdgpu_device *adev) |
319 | { |
320 | return adev->nbio.funcs->get_memsize(adev); |
321 | } |
322 | |
323 | static u32 soc15_get_xclk(struct amdgpu_device *adev) |
324 | { |
325 | u32 reference_clock = adev->clock.spll.reference_freq; |
326 | |
327 | if (amdgpu_ip_version(adev, ip: MP1_HWIP, inst: 0) == IP_VERSION(12, 0, 0) || |
328 | amdgpu_ip_version(adev, ip: MP1_HWIP, inst: 0) == IP_VERSION(12, 0, 1) || |
329 | amdgpu_ip_version(adev, ip: MP1_HWIP, inst: 0) == IP_VERSION(13, 0, 6)) |
330 | return 10000; |
331 | if (amdgpu_ip_version(adev, ip: MP1_HWIP, inst: 0) == IP_VERSION(10, 0, 0) || |
332 | amdgpu_ip_version(adev, ip: MP1_HWIP, inst: 0) == IP_VERSION(10, 0, 1)) |
333 | return reference_clock / 4; |
334 | |
335 | return reference_clock; |
336 | } |
337 | |
338 | |
339 | void soc15_grbm_select(struct amdgpu_device *adev, |
340 | u32 me, u32 pipe, u32 queue, u32 vmid, int xcc_id) |
341 | { |
342 | u32 grbm_gfx_cntl = 0; |
343 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, PIPEID, pipe); |
344 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, MEID, me); |
345 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, VMID, vmid); |
346 | grbm_gfx_cntl = REG_SET_FIELD(grbm_gfx_cntl, GRBM_GFX_CNTL, QUEUEID, queue); |
347 | |
348 | WREG32_SOC15_RLC_SHADOW(GC, xcc_id, mmGRBM_GFX_CNTL, grbm_gfx_cntl); |
349 | } |
350 | |
351 | static bool soc15_read_disabled_bios(struct amdgpu_device *adev) |
352 | { |
353 | /* todo */ |
354 | return false; |
355 | } |
356 | |
357 | static struct soc15_allowed_register_entry soc15_allowed_read_registers[] = { |
358 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS)}, |
359 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS2)}, |
360 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE0)}, |
361 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE1)}, |
362 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE2)}, |
363 | { SOC15_REG_ENTRY(GC, 0, mmGRBM_STATUS_SE3)}, |
364 | { SOC15_REG_ENTRY(SDMA0, 0, mmSDMA0_STATUS_REG)}, |
365 | { SOC15_REG_ENTRY(SDMA1, 0, mmSDMA1_STATUS_REG)}, |
366 | { SOC15_REG_ENTRY(GC, 0, mmCP_STAT)}, |
367 | { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT1)}, |
368 | { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT2)}, |
369 | { SOC15_REG_ENTRY(GC, 0, mmCP_STALLED_STAT3)}, |
370 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_BUSY_STAT)}, |
371 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STALLED_STAT1)}, |
372 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPF_STATUS)}, |
373 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_BUSY_STAT)}, |
374 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STALLED_STAT1)}, |
375 | { SOC15_REG_ENTRY(GC, 0, mmCP_CPC_STATUS)}, |
376 | { SOC15_REG_ENTRY(GC, 0, mmGB_ADDR_CONFIG)}, |
377 | { SOC15_REG_ENTRY(GC, 0, mmDB_DEBUG2)}, |
378 | }; |
379 | |
380 | static uint32_t soc15_read_indexed_register(struct amdgpu_device *adev, u32 se_num, |
381 | u32 sh_num, u32 reg_offset) |
382 | { |
383 | uint32_t val; |
384 | |
385 | mutex_lock(&adev->grbm_idx_mutex); |
386 | if (se_num != 0xffffffff || sh_num != 0xffffffff) |
387 | amdgpu_gfx_select_se_sh(adev, se_num, sh_num, 0xffffffff, 0); |
388 | |
389 | val = RREG32(reg_offset); |
390 | |
391 | if (se_num != 0xffffffff || sh_num != 0xffffffff) |
392 | amdgpu_gfx_select_se_sh(adev, 0xffffffff, 0xffffffff, 0xffffffff, 0); |
393 | mutex_unlock(lock: &adev->grbm_idx_mutex); |
394 | return val; |
395 | } |
396 | |
397 | static uint32_t soc15_get_register_value(struct amdgpu_device *adev, |
398 | bool indexed, u32 se_num, |
399 | u32 sh_num, u32 reg_offset) |
400 | { |
401 | if (indexed) { |
402 | return soc15_read_indexed_register(adev, se_num, sh_num, reg_offset); |
403 | } else { |
404 | if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmGB_ADDR_CONFIG)) |
405 | return adev->gfx.config.gb_addr_config; |
406 | else if (reg_offset == SOC15_REG_OFFSET(GC, 0, mmDB_DEBUG2)) |
407 | return adev->gfx.config.db_debug2; |
408 | return RREG32(reg_offset); |
409 | } |
410 | } |
411 | |
412 | static int soc15_read_register(struct amdgpu_device *adev, u32 se_num, |
413 | u32 sh_num, u32 reg_offset, u32 *value) |
414 | { |
415 | uint32_t i; |
416 | struct soc15_allowed_register_entry *en; |
417 | |
418 | *value = 0; |
419 | for (i = 0; i < ARRAY_SIZE(soc15_allowed_read_registers); i++) { |
420 | en = &soc15_allowed_read_registers[i]; |
421 | if (!adev->reg_offset[en->hwip][en->inst]) |
422 | continue; |
423 | else if (reg_offset != (adev->reg_offset[en->hwip][en->inst][en->seg] |
424 | + en->reg_offset)) |
425 | continue; |
426 | |
427 | *value = soc15_get_register_value(adev, |
428 | indexed: soc15_allowed_read_registers[i].grbm_indexed, |
429 | se_num, sh_num, reg_offset); |
430 | return 0; |
431 | } |
432 | return -EINVAL; |
433 | } |
434 | |
435 | |
436 | /** |
437 | * soc15_program_register_sequence - program an array of registers. |
438 | * |
439 | * @adev: amdgpu_device pointer |
440 | * @regs: pointer to the register array |
441 | * @array_size: size of the register array |
442 | * |
443 | * Programs an array or registers with and and or masks. |
444 | * This is a helper for setting golden registers. |
445 | */ |
446 | |
447 | void soc15_program_register_sequence(struct amdgpu_device *adev, |
448 | const struct soc15_reg_golden *regs, |
449 | const u32 array_size) |
450 | { |
451 | const struct soc15_reg_golden *entry; |
452 | u32 tmp, reg; |
453 | int i; |
454 | |
455 | for (i = 0; i < array_size; ++i) { |
456 | entry = ®s[i]; |
457 | reg = adev->reg_offset[entry->hwip][entry->instance][entry->segment] + entry->reg; |
458 | |
459 | if (entry->and_mask == 0xffffffff) { |
460 | tmp = entry->or_mask; |
461 | } else { |
462 | tmp = (entry->hwip == GC_HWIP) ? |
463 | RREG32_SOC15_IP(GC, reg) : RREG32(reg); |
464 | |
465 | tmp &= ~(entry->and_mask); |
466 | tmp |= (entry->or_mask & entry->and_mask); |
467 | } |
468 | |
469 | if (reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_BINNER_EVENT_CNTL_3) || |
470 | reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE) || |
471 | reg == SOC15_REG_OFFSET(GC, 0, mmPA_SC_ENHANCE_1) || |
472 | reg == SOC15_REG_OFFSET(GC, 0, mmSH_MEM_CONFIG)) |
473 | WREG32_RLC(reg, tmp); |
474 | else |
475 | (entry->hwip == GC_HWIP) ? |
476 | WREG32_SOC15_IP(GC, reg, tmp) : WREG32(reg, tmp); |
477 | |
478 | } |
479 | |
480 | } |
481 | |
482 | static int soc15_asic_baco_reset(struct amdgpu_device *adev) |
483 | { |
484 | struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); |
485 | int ret = 0; |
486 | |
487 | /* avoid NBIF got stuck when do RAS recovery in BACO reset */ |
488 | if (ras && adev->ras_enabled) |
489 | adev->nbio.funcs->enable_doorbell_interrupt(adev, false); |
490 | |
491 | ret = amdgpu_dpm_baco_reset(adev); |
492 | if (ret) |
493 | return ret; |
494 | |
495 | /* re-enable doorbell interrupt after BACO exit */ |
496 | if (ras && adev->ras_enabled) |
497 | adev->nbio.funcs->enable_doorbell_interrupt(adev, true); |
498 | |
499 | return 0; |
500 | } |
501 | |
502 | static enum amd_reset_method |
503 | soc15_asic_reset_method(struct amdgpu_device *adev) |
504 | { |
505 | bool baco_reset = false; |
506 | bool connected_to_cpu = false; |
507 | struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); |
508 | |
509 | if (adev->gmc.xgmi.supported && adev->gmc.xgmi.connected_to_cpu) |
510 | connected_to_cpu = true; |
511 | |
512 | if (amdgpu_reset_method == AMD_RESET_METHOD_MODE1 || |
513 | amdgpu_reset_method == AMD_RESET_METHOD_MODE2 || |
514 | amdgpu_reset_method == AMD_RESET_METHOD_BACO || |
515 | amdgpu_reset_method == AMD_RESET_METHOD_PCI) { |
516 | /* If connected to cpu, driver only support mode2 */ |
517 | if (connected_to_cpu) |
518 | return AMD_RESET_METHOD_MODE2; |
519 | return amdgpu_reset_method; |
520 | } |
521 | |
522 | if (amdgpu_reset_method != -1) |
523 | dev_warn(adev->dev, "Specified reset method:%d isn't supported, using AUTO instead.\n" , |
524 | amdgpu_reset_method); |
525 | |
526 | switch (amdgpu_ip_version(adev, ip: MP1_HWIP, inst: 0)) { |
527 | case IP_VERSION(10, 0, 0): |
528 | case IP_VERSION(10, 0, 1): |
529 | case IP_VERSION(12, 0, 0): |
530 | case IP_VERSION(12, 0, 1): |
531 | return AMD_RESET_METHOD_MODE2; |
532 | case IP_VERSION(9, 0, 0): |
533 | case IP_VERSION(11, 0, 2): |
534 | if (adev->asic_type == CHIP_VEGA20) { |
535 | if (adev->psp.sos.fw_version >= 0x80067) |
536 | baco_reset = amdgpu_dpm_is_baco_supported(adev); |
537 | /* |
538 | * 1. PMFW version > 0x284300: all cases use baco |
539 | * 2. PMFW version <= 0x284300: only sGPU w/o RAS use baco |
540 | */ |
541 | if (ras && adev->ras_enabled && |
542 | adev->pm.fw_version <= 0x283400) |
543 | baco_reset = false; |
544 | } else { |
545 | baco_reset = amdgpu_dpm_is_baco_supported(adev); |
546 | } |
547 | break; |
548 | case IP_VERSION(13, 0, 2): |
549 | /* |
550 | * 1.connected to cpu: driver issue mode2 reset |
551 | * 2.discret gpu: driver issue mode1 reset |
552 | */ |
553 | if (connected_to_cpu) |
554 | return AMD_RESET_METHOD_MODE2; |
555 | break; |
556 | case IP_VERSION(13, 0, 6): |
557 | /* Use gpu_recovery param to target a reset method. |
558 | * Enable triggering of GPU reset only if specified |
559 | * by module parameter. |
560 | */ |
561 | if (amdgpu_gpu_recovery == 4 || amdgpu_gpu_recovery == 5) |
562 | return AMD_RESET_METHOD_MODE2; |
563 | else if (!(adev->flags & AMD_IS_APU)) |
564 | return AMD_RESET_METHOD_MODE1; |
565 | else |
566 | return AMD_RESET_METHOD_MODE2; |
567 | default: |
568 | break; |
569 | } |
570 | |
571 | if (baco_reset) |
572 | return AMD_RESET_METHOD_BACO; |
573 | else |
574 | return AMD_RESET_METHOD_MODE1; |
575 | } |
576 | |
577 | static bool soc15_need_reset_on_resume(struct amdgpu_device *adev) |
578 | { |
579 | u32 sol_reg; |
580 | |
581 | sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); |
582 | |
583 | /* Will reset for the following suspend abort cases. |
584 | * 1) Only reset limit on APU side, dGPU hasn't checked yet. |
585 | * 2) S3 suspend abort and TOS already launched. |
586 | */ |
587 | if (adev->flags & AMD_IS_APU && adev->in_s3 && |
588 | !adev->suspend_complete && |
589 | sol_reg) |
590 | return true; |
591 | |
592 | return false; |
593 | } |
594 | |
595 | static int soc15_asic_reset(struct amdgpu_device *adev) |
596 | { |
597 | /* original raven doesn't have full asic reset */ |
598 | /* On the latest Raven, the GPU reset can be performed |
599 | * successfully. So now, temporarily enable it for the |
600 | * S3 suspend abort case. |
601 | */ |
602 | if (((adev->apu_flags & AMD_APU_IS_RAVEN) || |
603 | (adev->apu_flags & AMD_APU_IS_RAVEN2)) && |
604 | !soc15_need_reset_on_resume(adev)) |
605 | return 0; |
606 | |
607 | switch (soc15_asic_reset_method(adev)) { |
608 | case AMD_RESET_METHOD_PCI: |
609 | dev_info(adev->dev, "PCI reset\n" ); |
610 | return amdgpu_device_pci_reset(adev); |
611 | case AMD_RESET_METHOD_BACO: |
612 | dev_info(adev->dev, "BACO reset\n" ); |
613 | return soc15_asic_baco_reset(adev); |
614 | case AMD_RESET_METHOD_MODE2: |
615 | dev_info(adev->dev, "MODE2 reset\n" ); |
616 | return amdgpu_dpm_mode2_reset(adev); |
617 | default: |
618 | dev_info(adev->dev, "MODE1 reset\n" ); |
619 | return amdgpu_device_mode1_reset(adev); |
620 | } |
621 | } |
622 | |
623 | static bool soc15_supports_baco(struct amdgpu_device *adev) |
624 | { |
625 | switch (amdgpu_ip_version(adev, ip: MP1_HWIP, inst: 0)) { |
626 | case IP_VERSION(9, 0, 0): |
627 | case IP_VERSION(11, 0, 2): |
628 | if (adev->asic_type == CHIP_VEGA20) { |
629 | if (adev->psp.sos.fw_version >= 0x80067) |
630 | return amdgpu_dpm_is_baco_supported(adev); |
631 | return false; |
632 | } else { |
633 | return amdgpu_dpm_is_baco_supported(adev); |
634 | } |
635 | break; |
636 | default: |
637 | return false; |
638 | } |
639 | } |
640 | |
641 | /*static int soc15_set_uvd_clock(struct amdgpu_device *adev, u32 clock, |
642 | u32 cntl_reg, u32 status_reg) |
643 | { |
644 | return 0; |
645 | }*/ |
646 | |
647 | static int soc15_set_uvd_clocks(struct amdgpu_device *adev, u32 vclk, u32 dclk) |
648 | { |
649 | /*int r; |
650 | |
651 | r = soc15_set_uvd_clock(adev, vclk, ixCG_VCLK_CNTL, ixCG_VCLK_STATUS); |
652 | if (r) |
653 | return r; |
654 | |
655 | r = soc15_set_uvd_clock(adev, dclk, ixCG_DCLK_CNTL, ixCG_DCLK_STATUS); |
656 | */ |
657 | return 0; |
658 | } |
659 | |
660 | static int soc15_set_vce_clocks(struct amdgpu_device *adev, u32 evclk, u32 ecclk) |
661 | { |
662 | /* todo */ |
663 | |
664 | return 0; |
665 | } |
666 | |
667 | static void soc15_program_aspm(struct amdgpu_device *adev) |
668 | { |
669 | if (!amdgpu_device_should_use_aspm(adev)) |
670 | return; |
671 | |
672 | if (adev->nbio.funcs->program_aspm) |
673 | adev->nbio.funcs->program_aspm(adev); |
674 | } |
675 | |
676 | const struct amdgpu_ip_block_version vega10_common_ip_block = |
677 | { |
678 | .type = AMD_IP_BLOCK_TYPE_COMMON, |
679 | .major = 2, |
680 | .minor = 0, |
681 | .rev = 0, |
682 | .funcs = &soc15_common_ip_funcs, |
683 | }; |
684 | |
685 | static void soc15_reg_base_init(struct amdgpu_device *adev) |
686 | { |
687 | /* Set IP register base before any HW register access */ |
688 | switch (adev->asic_type) { |
689 | case CHIP_VEGA10: |
690 | case CHIP_VEGA12: |
691 | case CHIP_RAVEN: |
692 | case CHIP_RENOIR: |
693 | vega10_reg_base_init(adev); |
694 | break; |
695 | case CHIP_VEGA20: |
696 | vega20_reg_base_init(adev); |
697 | break; |
698 | case CHIP_ARCTURUS: |
699 | arct_reg_base_init(adev); |
700 | break; |
701 | case CHIP_ALDEBARAN: |
702 | aldebaran_reg_base_init(adev); |
703 | break; |
704 | default: |
705 | DRM_ERROR("Unsupported asic type: %d!\n" , adev->asic_type); |
706 | break; |
707 | } |
708 | } |
709 | |
710 | void soc15_set_virt_ops(struct amdgpu_device *adev) |
711 | { |
712 | adev->virt.ops = &xgpu_ai_virt_ops; |
713 | |
714 | /* init soc15 reg base early enough so we can |
715 | * request request full access for sriov before |
716 | * set_ip_blocks. */ |
717 | soc15_reg_base_init(adev); |
718 | } |
719 | |
720 | static bool soc15_need_full_reset(struct amdgpu_device *adev) |
721 | { |
722 | /* change this when we implement soft reset */ |
723 | return true; |
724 | } |
725 | |
726 | static void soc15_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, |
727 | uint64_t *count1) |
728 | { |
729 | uint32_t perfctr = 0; |
730 | uint64_t cnt0_of, cnt1_of; |
731 | int tmp; |
732 | |
733 | /* This reports 0 on APUs, so return to avoid writing/reading registers |
734 | * that may or may not be different from their GPU counterparts |
735 | */ |
736 | if (adev->flags & AMD_IS_APU) |
737 | return; |
738 | |
739 | /* Set the 2 events that we wish to watch, defined above */ |
740 | /* Reg 40 is # received msgs */ |
741 | /* Reg 104 is # of posted requests sent */ |
742 | perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT0_SEL, 40); |
743 | perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK, EVENT1_SEL, 104); |
744 | |
745 | /* Write to enable desired perf counters */ |
746 | WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK, perfctr); |
747 | /* Zero out and enable the perf counters |
748 | * Write 0x5: |
749 | * Bit 0 = Start all counters(1) |
750 | * Bit 2 = Global counter reset enable(1) |
751 | */ |
752 | WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); |
753 | |
754 | msleep(msecs: 1000); |
755 | |
756 | /* Load the shadow and disable the perf counters |
757 | * Write 0x2: |
758 | * Bit 0 = Stop counters(0) |
759 | * Bit 1 = Load the shadow counters(1) |
760 | */ |
761 | WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); |
762 | |
763 | /* Read register values to get any >32bit overflow */ |
764 | tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK); |
765 | cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER0_UPPER); |
766 | cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK, COUNTER1_UPPER); |
767 | |
768 | /* Get the values and add the overflow */ |
769 | *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK) | (cnt0_of << 32); |
770 | *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK) | (cnt1_of << 32); |
771 | } |
772 | |
773 | static void vega20_get_pcie_usage(struct amdgpu_device *adev, uint64_t *count0, |
774 | uint64_t *count1) |
775 | { |
776 | uint32_t perfctr = 0; |
777 | uint64_t cnt0_of, cnt1_of; |
778 | int tmp; |
779 | |
780 | /* This reports 0 on APUs, so return to avoid writing/reading registers |
781 | * that may or may not be different from their GPU counterparts |
782 | */ |
783 | if (adev->flags & AMD_IS_APU) |
784 | return; |
785 | |
786 | /* Set the 2 events that we wish to watch, defined above */ |
787 | /* Reg 40 is # received msgs */ |
788 | /* Reg 108 is # of posted requests sent on VG20 */ |
789 | perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3, |
790 | EVENT0_SEL, 40); |
791 | perfctr = REG_SET_FIELD(perfctr, PCIE_PERF_CNTL_TXCLK3, |
792 | EVENT1_SEL, 108); |
793 | |
794 | /* Write to enable desired perf counters */ |
795 | WREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3, perfctr); |
796 | /* Zero out and enable the perf counters |
797 | * Write 0x5: |
798 | * Bit 0 = Start all counters(1) |
799 | * Bit 2 = Global counter reset enable(1) |
800 | */ |
801 | WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000005); |
802 | |
803 | msleep(msecs: 1000); |
804 | |
805 | /* Load the shadow and disable the perf counters |
806 | * Write 0x2: |
807 | * Bit 0 = Stop counters(0) |
808 | * Bit 1 = Load the shadow counters(1) |
809 | */ |
810 | WREG32_PCIE(smnPCIE_PERF_COUNT_CNTL, 0x00000002); |
811 | |
812 | /* Read register values to get any >32bit overflow */ |
813 | tmp = RREG32_PCIE(smnPCIE_PERF_CNTL_TXCLK3); |
814 | cnt0_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER0_UPPER); |
815 | cnt1_of = REG_GET_FIELD(tmp, PCIE_PERF_CNTL_TXCLK3, COUNTER1_UPPER); |
816 | |
817 | /* Get the values and add the overflow */ |
818 | *count0 = RREG32_PCIE(smnPCIE_PERF_COUNT0_TXCLK3) | (cnt0_of << 32); |
819 | *count1 = RREG32_PCIE(smnPCIE_PERF_COUNT1_TXCLK3) | (cnt1_of << 32); |
820 | } |
821 | |
822 | static bool soc15_need_reset_on_init(struct amdgpu_device *adev) |
823 | { |
824 | u32 sol_reg; |
825 | |
826 | /* CP hangs in IGT reloading test on RN, reset to WA */ |
827 | if (adev->asic_type == CHIP_RENOIR) |
828 | return true; |
829 | |
830 | /* Just return false for soc15 GPUs. Reset does not seem to |
831 | * be necessary. |
832 | */ |
833 | if (!amdgpu_passthrough(adev)) |
834 | return false; |
835 | |
836 | if (adev->flags & AMD_IS_APU) |
837 | return false; |
838 | |
839 | /* Check sOS sign of life register to confirm sys driver and sOS |
840 | * are already been loaded. |
841 | */ |
842 | sol_reg = RREG32_SOC15(MP0, 0, mmMP0_SMN_C2PMSG_81); |
843 | if (sol_reg) |
844 | return true; |
845 | |
846 | return false; |
847 | } |
848 | |
849 | static uint64_t soc15_get_pcie_replay_count(struct amdgpu_device *adev) |
850 | { |
851 | uint64_t nak_r, nak_g; |
852 | |
853 | /* Get the number of NAKs received and generated */ |
854 | nak_r = RREG32_PCIE(smnPCIE_RX_NUM_NAK); |
855 | nak_g = RREG32_PCIE(smnPCIE_RX_NUM_NAK_GENERATED); |
856 | |
857 | /* Add the total number of NAKs, i.e the number of replays */ |
858 | return (nak_r + nak_g); |
859 | } |
860 | |
861 | static void soc15_pre_asic_init(struct amdgpu_device *adev) |
862 | { |
863 | gmc_v9_0_restore_registers(adev); |
864 | } |
865 | |
866 | static const struct amdgpu_asic_funcs soc15_asic_funcs = |
867 | { |
868 | .read_disabled_bios = &soc15_read_disabled_bios, |
869 | .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom, |
870 | .read_register = &soc15_read_register, |
871 | .reset = &soc15_asic_reset, |
872 | .reset_method = &soc15_asic_reset_method, |
873 | .get_xclk = &soc15_get_xclk, |
874 | .set_uvd_clocks = &soc15_set_uvd_clocks, |
875 | .set_vce_clocks = &soc15_set_vce_clocks, |
876 | .get_config_memsize = &soc15_get_config_memsize, |
877 | .need_full_reset = &soc15_need_full_reset, |
878 | .init_doorbell_index = &vega10_doorbell_index_init, |
879 | .get_pcie_usage = &soc15_get_pcie_usage, |
880 | .need_reset_on_init = &soc15_need_reset_on_init, |
881 | .get_pcie_replay_count = &soc15_get_pcie_replay_count, |
882 | .supports_baco = &soc15_supports_baco, |
883 | .pre_asic_init = &soc15_pre_asic_init, |
884 | .query_video_codecs = &soc15_query_video_codecs, |
885 | }; |
886 | |
887 | static const struct amdgpu_asic_funcs vega20_asic_funcs = |
888 | { |
889 | .read_disabled_bios = &soc15_read_disabled_bios, |
890 | .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom, |
891 | .read_register = &soc15_read_register, |
892 | .reset = &soc15_asic_reset, |
893 | .reset_method = &soc15_asic_reset_method, |
894 | .get_xclk = &soc15_get_xclk, |
895 | .set_uvd_clocks = &soc15_set_uvd_clocks, |
896 | .set_vce_clocks = &soc15_set_vce_clocks, |
897 | .get_config_memsize = &soc15_get_config_memsize, |
898 | .need_full_reset = &soc15_need_full_reset, |
899 | .init_doorbell_index = &vega20_doorbell_index_init, |
900 | .get_pcie_usage = &vega20_get_pcie_usage, |
901 | .need_reset_on_init = &soc15_need_reset_on_init, |
902 | .get_pcie_replay_count = &soc15_get_pcie_replay_count, |
903 | .supports_baco = &soc15_supports_baco, |
904 | .pre_asic_init = &soc15_pre_asic_init, |
905 | .query_video_codecs = &soc15_query_video_codecs, |
906 | }; |
907 | |
908 | static const struct amdgpu_asic_funcs aqua_vanjaram_asic_funcs = |
909 | { |
910 | .read_disabled_bios = &soc15_read_disabled_bios, |
911 | .read_bios_from_rom = &amdgpu_soc15_read_bios_from_rom, |
912 | .read_register = &soc15_read_register, |
913 | .reset = &soc15_asic_reset, |
914 | .reset_method = &soc15_asic_reset_method, |
915 | .get_xclk = &soc15_get_xclk, |
916 | .set_uvd_clocks = &soc15_set_uvd_clocks, |
917 | .set_vce_clocks = &soc15_set_vce_clocks, |
918 | .get_config_memsize = &soc15_get_config_memsize, |
919 | .need_full_reset = &soc15_need_full_reset, |
920 | .init_doorbell_index = &aqua_vanjaram_doorbell_index_init, |
921 | .need_reset_on_init = &soc15_need_reset_on_init, |
922 | .get_pcie_replay_count = &amdgpu_nbio_get_pcie_replay_count, |
923 | .supports_baco = &soc15_supports_baco, |
924 | .pre_asic_init = &soc15_pre_asic_init, |
925 | .query_video_codecs = &soc15_query_video_codecs, |
926 | .encode_ext_smn_addressing = &aqua_vanjaram_encode_ext_smn_addressing, |
927 | .get_reg_state = &aqua_vanjaram_get_reg_state, |
928 | }; |
929 | |
930 | static int soc15_common_early_init(void *handle) |
931 | { |
932 | #define MMIO_REG_HOLE_OFFSET (0x80000 - PAGE_SIZE) |
933 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
934 | |
935 | if (!amdgpu_sriov_vf(adev)) { |
936 | adev->rmmio_remap.reg_offset = MMIO_REG_HOLE_OFFSET; |
937 | adev->rmmio_remap.bus_addr = adev->rmmio_base + MMIO_REG_HOLE_OFFSET; |
938 | } |
939 | adev->smc_rreg = NULL; |
940 | adev->smc_wreg = NULL; |
941 | adev->pcie_rreg = &amdgpu_device_indirect_rreg; |
942 | adev->pcie_wreg = &amdgpu_device_indirect_wreg; |
943 | adev->pcie_rreg_ext = &amdgpu_device_indirect_rreg_ext; |
944 | adev->pcie_wreg_ext = &amdgpu_device_indirect_wreg_ext; |
945 | adev->pcie_rreg64 = &amdgpu_device_indirect_rreg64; |
946 | adev->pcie_wreg64 = &amdgpu_device_indirect_wreg64; |
947 | adev->pcie_rreg64_ext = &amdgpu_device_indirect_rreg64_ext; |
948 | adev->pcie_wreg64_ext = &amdgpu_device_indirect_wreg64_ext; |
949 | adev->uvd_ctx_rreg = &soc15_uvd_ctx_rreg; |
950 | adev->uvd_ctx_wreg = &soc15_uvd_ctx_wreg; |
951 | adev->didt_rreg = &soc15_didt_rreg; |
952 | adev->didt_wreg = &soc15_didt_wreg; |
953 | adev->gc_cac_rreg = &soc15_gc_cac_rreg; |
954 | adev->gc_cac_wreg = &soc15_gc_cac_wreg; |
955 | adev->se_cac_rreg = &soc15_se_cac_rreg; |
956 | adev->se_cac_wreg = &soc15_se_cac_wreg; |
957 | |
958 | adev->rev_id = amdgpu_device_get_rev_id(adev); |
959 | adev->external_rev_id = 0xFF; |
960 | /* TODO: split the GC and PG flags based on the relevant IP version for which |
961 | * they are relevant. |
962 | */ |
963 | switch (amdgpu_ip_version(adev, ip: GC_HWIP, inst: 0)) { |
964 | case IP_VERSION(9, 0, 1): |
965 | adev->asic_funcs = &soc15_asic_funcs; |
966 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
967 | AMD_CG_SUPPORT_GFX_MGLS | |
968 | AMD_CG_SUPPORT_GFX_RLC_LS | |
969 | AMD_CG_SUPPORT_GFX_CP_LS | |
970 | AMD_CG_SUPPORT_GFX_3D_CGCG | |
971 | AMD_CG_SUPPORT_GFX_3D_CGLS | |
972 | AMD_CG_SUPPORT_GFX_CGCG | |
973 | AMD_CG_SUPPORT_GFX_CGLS | |
974 | AMD_CG_SUPPORT_BIF_MGCG | |
975 | AMD_CG_SUPPORT_BIF_LS | |
976 | AMD_CG_SUPPORT_HDP_LS | |
977 | AMD_CG_SUPPORT_DRM_MGCG | |
978 | AMD_CG_SUPPORT_DRM_LS | |
979 | AMD_CG_SUPPORT_ROM_MGCG | |
980 | AMD_CG_SUPPORT_DF_MGCG | |
981 | AMD_CG_SUPPORT_SDMA_MGCG | |
982 | AMD_CG_SUPPORT_SDMA_LS | |
983 | AMD_CG_SUPPORT_MC_MGCG | |
984 | AMD_CG_SUPPORT_MC_LS; |
985 | adev->pg_flags = 0; |
986 | adev->external_rev_id = 0x1; |
987 | break; |
988 | case IP_VERSION(9, 2, 1): |
989 | adev->asic_funcs = &soc15_asic_funcs; |
990 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
991 | AMD_CG_SUPPORT_GFX_MGLS | |
992 | AMD_CG_SUPPORT_GFX_CGCG | |
993 | AMD_CG_SUPPORT_GFX_CGLS | |
994 | AMD_CG_SUPPORT_GFX_3D_CGCG | |
995 | AMD_CG_SUPPORT_GFX_3D_CGLS | |
996 | AMD_CG_SUPPORT_GFX_CP_LS | |
997 | AMD_CG_SUPPORT_MC_LS | |
998 | AMD_CG_SUPPORT_MC_MGCG | |
999 | AMD_CG_SUPPORT_SDMA_MGCG | |
1000 | AMD_CG_SUPPORT_SDMA_LS | |
1001 | AMD_CG_SUPPORT_BIF_MGCG | |
1002 | AMD_CG_SUPPORT_BIF_LS | |
1003 | AMD_CG_SUPPORT_HDP_MGCG | |
1004 | AMD_CG_SUPPORT_HDP_LS | |
1005 | AMD_CG_SUPPORT_ROM_MGCG | |
1006 | AMD_CG_SUPPORT_VCE_MGCG | |
1007 | AMD_CG_SUPPORT_UVD_MGCG; |
1008 | adev->pg_flags = 0; |
1009 | adev->external_rev_id = adev->rev_id + 0x14; |
1010 | break; |
1011 | case IP_VERSION(9, 4, 0): |
1012 | adev->asic_funcs = &vega20_asic_funcs; |
1013 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
1014 | AMD_CG_SUPPORT_GFX_MGLS | |
1015 | AMD_CG_SUPPORT_GFX_CGCG | |
1016 | AMD_CG_SUPPORT_GFX_CGLS | |
1017 | AMD_CG_SUPPORT_GFX_3D_CGCG | |
1018 | AMD_CG_SUPPORT_GFX_3D_CGLS | |
1019 | AMD_CG_SUPPORT_GFX_CP_LS | |
1020 | AMD_CG_SUPPORT_MC_LS | |
1021 | AMD_CG_SUPPORT_MC_MGCG | |
1022 | AMD_CG_SUPPORT_SDMA_MGCG | |
1023 | AMD_CG_SUPPORT_SDMA_LS | |
1024 | AMD_CG_SUPPORT_BIF_MGCG | |
1025 | AMD_CG_SUPPORT_BIF_LS | |
1026 | AMD_CG_SUPPORT_HDP_MGCG | |
1027 | AMD_CG_SUPPORT_HDP_LS | |
1028 | AMD_CG_SUPPORT_ROM_MGCG | |
1029 | AMD_CG_SUPPORT_VCE_MGCG | |
1030 | AMD_CG_SUPPORT_UVD_MGCG; |
1031 | adev->pg_flags = 0; |
1032 | adev->external_rev_id = adev->rev_id + 0x28; |
1033 | break; |
1034 | case IP_VERSION(9, 1, 0): |
1035 | case IP_VERSION(9, 2, 2): |
1036 | adev->asic_funcs = &soc15_asic_funcs; |
1037 | |
1038 | if (adev->rev_id >= 0x8) |
1039 | adev->apu_flags |= AMD_APU_IS_RAVEN2; |
1040 | |
1041 | if (adev->apu_flags & AMD_APU_IS_RAVEN2) |
1042 | adev->external_rev_id = adev->rev_id + 0x79; |
1043 | else if (adev->apu_flags & AMD_APU_IS_PICASSO) |
1044 | adev->external_rev_id = adev->rev_id + 0x41; |
1045 | else if (adev->rev_id == 1) |
1046 | adev->external_rev_id = adev->rev_id + 0x20; |
1047 | else |
1048 | adev->external_rev_id = adev->rev_id + 0x01; |
1049 | |
1050 | if (adev->apu_flags & AMD_APU_IS_RAVEN2) { |
1051 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
1052 | AMD_CG_SUPPORT_GFX_MGLS | |
1053 | AMD_CG_SUPPORT_GFX_CP_LS | |
1054 | AMD_CG_SUPPORT_GFX_3D_CGCG | |
1055 | AMD_CG_SUPPORT_GFX_3D_CGLS | |
1056 | AMD_CG_SUPPORT_GFX_CGCG | |
1057 | AMD_CG_SUPPORT_GFX_CGLS | |
1058 | AMD_CG_SUPPORT_BIF_LS | |
1059 | AMD_CG_SUPPORT_HDP_LS | |
1060 | AMD_CG_SUPPORT_MC_MGCG | |
1061 | AMD_CG_SUPPORT_MC_LS | |
1062 | AMD_CG_SUPPORT_SDMA_MGCG | |
1063 | AMD_CG_SUPPORT_SDMA_LS | |
1064 | AMD_CG_SUPPORT_VCN_MGCG; |
1065 | |
1066 | adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; |
1067 | } else if (adev->apu_flags & AMD_APU_IS_PICASSO) { |
1068 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
1069 | AMD_CG_SUPPORT_GFX_MGLS | |
1070 | AMD_CG_SUPPORT_GFX_CP_LS | |
1071 | AMD_CG_SUPPORT_GFX_3D_CGLS | |
1072 | AMD_CG_SUPPORT_GFX_CGCG | |
1073 | AMD_CG_SUPPORT_GFX_CGLS | |
1074 | AMD_CG_SUPPORT_BIF_LS | |
1075 | AMD_CG_SUPPORT_HDP_LS | |
1076 | AMD_CG_SUPPORT_MC_MGCG | |
1077 | AMD_CG_SUPPORT_MC_LS | |
1078 | AMD_CG_SUPPORT_SDMA_MGCG | |
1079 | AMD_CG_SUPPORT_SDMA_LS | |
1080 | AMD_CG_SUPPORT_VCN_MGCG; |
1081 | |
1082 | /* |
1083 | * MMHUB PG needs to be disabled for Picasso for |
1084 | * stability reasons. |
1085 | */ |
1086 | adev->pg_flags = AMD_PG_SUPPORT_SDMA | |
1087 | AMD_PG_SUPPORT_VCN; |
1088 | } else { |
1089 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
1090 | AMD_CG_SUPPORT_GFX_MGLS | |
1091 | AMD_CG_SUPPORT_GFX_RLC_LS | |
1092 | AMD_CG_SUPPORT_GFX_CP_LS | |
1093 | AMD_CG_SUPPORT_GFX_3D_CGLS | |
1094 | AMD_CG_SUPPORT_GFX_CGCG | |
1095 | AMD_CG_SUPPORT_GFX_CGLS | |
1096 | AMD_CG_SUPPORT_BIF_MGCG | |
1097 | AMD_CG_SUPPORT_BIF_LS | |
1098 | AMD_CG_SUPPORT_HDP_MGCG | |
1099 | AMD_CG_SUPPORT_HDP_LS | |
1100 | AMD_CG_SUPPORT_DRM_MGCG | |
1101 | AMD_CG_SUPPORT_DRM_LS | |
1102 | AMD_CG_SUPPORT_MC_MGCG | |
1103 | AMD_CG_SUPPORT_MC_LS | |
1104 | AMD_CG_SUPPORT_SDMA_MGCG | |
1105 | AMD_CG_SUPPORT_SDMA_LS | |
1106 | AMD_CG_SUPPORT_VCN_MGCG; |
1107 | |
1108 | adev->pg_flags = AMD_PG_SUPPORT_SDMA | AMD_PG_SUPPORT_VCN; |
1109 | } |
1110 | break; |
1111 | case IP_VERSION(9, 4, 1): |
1112 | adev->asic_funcs = &vega20_asic_funcs; |
1113 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
1114 | AMD_CG_SUPPORT_GFX_MGLS | |
1115 | AMD_CG_SUPPORT_GFX_CGCG | |
1116 | AMD_CG_SUPPORT_GFX_CGLS | |
1117 | AMD_CG_SUPPORT_GFX_CP_LS | |
1118 | AMD_CG_SUPPORT_HDP_MGCG | |
1119 | AMD_CG_SUPPORT_HDP_LS | |
1120 | AMD_CG_SUPPORT_SDMA_MGCG | |
1121 | AMD_CG_SUPPORT_SDMA_LS | |
1122 | AMD_CG_SUPPORT_MC_MGCG | |
1123 | AMD_CG_SUPPORT_MC_LS | |
1124 | AMD_CG_SUPPORT_IH_CG | |
1125 | AMD_CG_SUPPORT_VCN_MGCG | |
1126 | AMD_CG_SUPPORT_JPEG_MGCG; |
1127 | adev->pg_flags = AMD_PG_SUPPORT_VCN | AMD_PG_SUPPORT_VCN_DPG; |
1128 | adev->external_rev_id = adev->rev_id + 0x32; |
1129 | break; |
1130 | case IP_VERSION(9, 3, 0): |
1131 | adev->asic_funcs = &soc15_asic_funcs; |
1132 | |
1133 | if (adev->apu_flags & AMD_APU_IS_RENOIR) |
1134 | adev->external_rev_id = adev->rev_id + 0x91; |
1135 | else |
1136 | adev->external_rev_id = adev->rev_id + 0xa1; |
1137 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
1138 | AMD_CG_SUPPORT_GFX_MGLS | |
1139 | AMD_CG_SUPPORT_GFX_3D_CGCG | |
1140 | AMD_CG_SUPPORT_GFX_3D_CGLS | |
1141 | AMD_CG_SUPPORT_GFX_CGCG | |
1142 | AMD_CG_SUPPORT_GFX_CGLS | |
1143 | AMD_CG_SUPPORT_GFX_CP_LS | |
1144 | AMD_CG_SUPPORT_MC_MGCG | |
1145 | AMD_CG_SUPPORT_MC_LS | |
1146 | AMD_CG_SUPPORT_SDMA_MGCG | |
1147 | AMD_CG_SUPPORT_SDMA_LS | |
1148 | AMD_CG_SUPPORT_BIF_LS | |
1149 | AMD_CG_SUPPORT_HDP_LS | |
1150 | AMD_CG_SUPPORT_VCN_MGCG | |
1151 | AMD_CG_SUPPORT_JPEG_MGCG | |
1152 | AMD_CG_SUPPORT_IH_CG | |
1153 | AMD_CG_SUPPORT_ATHUB_LS | |
1154 | AMD_CG_SUPPORT_ATHUB_MGCG | |
1155 | AMD_CG_SUPPORT_DF_MGCG; |
1156 | adev->pg_flags = AMD_PG_SUPPORT_SDMA | |
1157 | AMD_PG_SUPPORT_VCN | |
1158 | AMD_PG_SUPPORT_JPEG | |
1159 | AMD_PG_SUPPORT_VCN_DPG; |
1160 | break; |
1161 | case IP_VERSION(9, 4, 2): |
1162 | adev->asic_funcs = &vega20_asic_funcs; |
1163 | adev->cg_flags = AMD_CG_SUPPORT_GFX_MGCG | |
1164 | AMD_CG_SUPPORT_GFX_MGLS | |
1165 | AMD_CG_SUPPORT_GFX_CP_LS | |
1166 | AMD_CG_SUPPORT_HDP_LS | |
1167 | AMD_CG_SUPPORT_SDMA_MGCG | |
1168 | AMD_CG_SUPPORT_SDMA_LS | |
1169 | AMD_CG_SUPPORT_IH_CG | |
1170 | AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG; |
1171 | adev->pg_flags = AMD_PG_SUPPORT_VCN_DPG; |
1172 | adev->external_rev_id = adev->rev_id + 0x3c; |
1173 | break; |
1174 | case IP_VERSION(9, 4, 3): |
1175 | adev->asic_funcs = &aqua_vanjaram_asic_funcs; |
1176 | adev->cg_flags = |
1177 | AMD_CG_SUPPORT_GFX_MGCG | AMD_CG_SUPPORT_GFX_CGCG | |
1178 | AMD_CG_SUPPORT_GFX_CGLS | AMD_CG_SUPPORT_SDMA_MGCG | |
1179 | AMD_CG_SUPPORT_GFX_FGCG | AMD_CG_SUPPORT_REPEATER_FGCG | |
1180 | AMD_CG_SUPPORT_VCN_MGCG | AMD_CG_SUPPORT_JPEG_MGCG | |
1181 | AMD_CG_SUPPORT_IH_CG; |
1182 | adev->pg_flags = |
1183 | AMD_PG_SUPPORT_VCN | |
1184 | AMD_PG_SUPPORT_VCN_DPG | |
1185 | AMD_PG_SUPPORT_JPEG; |
1186 | adev->external_rev_id = adev->rev_id + 0x46; |
1187 | /* GC 9.4.3 uses MMIO register region hole at a different offset */ |
1188 | if (!amdgpu_sriov_vf(adev)) { |
1189 | adev->rmmio_remap.reg_offset = 0x1A000; |
1190 | adev->rmmio_remap.bus_addr = adev->rmmio_base + 0x1A000; |
1191 | } |
1192 | break; |
1193 | default: |
1194 | /* FIXME: not supported yet */ |
1195 | return -EINVAL; |
1196 | } |
1197 | |
1198 | if (amdgpu_sriov_vf(adev)) { |
1199 | amdgpu_virt_init_setting(adev); |
1200 | xgpu_ai_mailbox_set_irq_funcs(adev); |
1201 | } |
1202 | |
1203 | return 0; |
1204 | } |
1205 | |
1206 | static int soc15_common_late_init(void *handle) |
1207 | { |
1208 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1209 | |
1210 | if (amdgpu_sriov_vf(adev)) |
1211 | xgpu_ai_mailbox_get_irq(adev); |
1212 | |
1213 | /* Enable selfring doorbell aperture late because doorbell BAR |
1214 | * aperture will change if resize BAR successfully in gmc sw_init. |
1215 | */ |
1216 | adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, true); |
1217 | |
1218 | return 0; |
1219 | } |
1220 | |
1221 | static int soc15_common_sw_init(void *handle) |
1222 | { |
1223 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1224 | |
1225 | if (amdgpu_sriov_vf(adev)) |
1226 | xgpu_ai_mailbox_add_irq_id(adev); |
1227 | |
1228 | if (adev->df.funcs && |
1229 | adev->df.funcs->sw_init) |
1230 | adev->df.funcs->sw_init(adev); |
1231 | |
1232 | return 0; |
1233 | } |
1234 | |
1235 | static int soc15_common_sw_fini(void *handle) |
1236 | { |
1237 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1238 | |
1239 | if (adev->df.funcs && |
1240 | adev->df.funcs->sw_fini) |
1241 | adev->df.funcs->sw_fini(adev); |
1242 | return 0; |
1243 | } |
1244 | |
1245 | static void soc15_sdma_doorbell_range_init(struct amdgpu_device *adev) |
1246 | { |
1247 | int i; |
1248 | |
1249 | /* sdma doorbell range is programed by hypervisor */ |
1250 | if (!amdgpu_sriov_vf(adev)) { |
1251 | for (i = 0; i < adev->sdma.num_instances; i++) { |
1252 | adev->nbio.funcs->sdma_doorbell_range(adev, i, |
1253 | true, adev->doorbell_index.sdma_engine[i] << 1, |
1254 | adev->doorbell_index.sdma_doorbell_range); |
1255 | } |
1256 | } |
1257 | } |
1258 | |
1259 | static int soc15_common_hw_init(void *handle) |
1260 | { |
1261 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1262 | |
1263 | /* enable aspm */ |
1264 | soc15_program_aspm(adev); |
1265 | /* setup nbio registers */ |
1266 | adev->nbio.funcs->init_registers(adev); |
1267 | /* remap HDP registers to a hole in mmio space, |
1268 | * for the purpose of expose those registers |
1269 | * to process space |
1270 | */ |
1271 | if (adev->nbio.funcs->remap_hdp_registers && !amdgpu_sriov_vf(adev)) |
1272 | adev->nbio.funcs->remap_hdp_registers(adev); |
1273 | |
1274 | /* enable the doorbell aperture */ |
1275 | adev->nbio.funcs->enable_doorbell_aperture(adev, true); |
1276 | |
1277 | /* HW doorbell routing policy: doorbell writing not |
1278 | * in SDMA/IH/MM/ACV range will be routed to CP. So |
1279 | * we need to init SDMA doorbell range prior |
1280 | * to CP ip block init and ring test. IH already |
1281 | * happens before CP. |
1282 | */ |
1283 | soc15_sdma_doorbell_range_init(adev); |
1284 | |
1285 | return 0; |
1286 | } |
1287 | |
1288 | static int soc15_common_hw_fini(void *handle) |
1289 | { |
1290 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1291 | |
1292 | /* Disable the doorbell aperture and selfring doorbell aperture |
1293 | * separately in hw_fini because soc15_enable_doorbell_aperture |
1294 | * has been removed and there is no need to delay disabling |
1295 | * selfring doorbell. |
1296 | */ |
1297 | adev->nbio.funcs->enable_doorbell_aperture(adev, false); |
1298 | adev->nbio.funcs->enable_doorbell_selfring_aperture(adev, false); |
1299 | |
1300 | if (amdgpu_sriov_vf(adev)) |
1301 | xgpu_ai_mailbox_put_irq(adev); |
1302 | |
1303 | if ((!amdgpu_sriov_vf(adev)) && |
1304 | adev->nbio.ras_if && |
1305 | amdgpu_ras_is_supported(adev, block: adev->nbio.ras_if->block)) { |
1306 | if (adev->nbio.ras && |
1307 | adev->nbio.ras->init_ras_controller_interrupt) |
1308 | amdgpu_irq_put(adev, src: &adev->nbio.ras_controller_irq, type: 0); |
1309 | if (adev->nbio.ras && |
1310 | adev->nbio.ras->init_ras_err_event_athub_interrupt) |
1311 | amdgpu_irq_put(adev, src: &adev->nbio.ras_err_event_athub_irq, type: 0); |
1312 | } |
1313 | |
1314 | return 0; |
1315 | } |
1316 | |
1317 | static int soc15_common_suspend(void *handle) |
1318 | { |
1319 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1320 | |
1321 | return soc15_common_hw_fini(handle: adev); |
1322 | } |
1323 | |
1324 | static int soc15_common_resume(void *handle) |
1325 | { |
1326 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1327 | |
1328 | if (soc15_need_reset_on_resume(adev)) { |
1329 | dev_info(adev->dev, "S3 suspend abort case, let's reset ASIC.\n" ); |
1330 | soc15_asic_reset(adev); |
1331 | } |
1332 | return soc15_common_hw_init(handle: adev); |
1333 | } |
1334 | |
1335 | static bool soc15_common_is_idle(void *handle) |
1336 | { |
1337 | return true; |
1338 | } |
1339 | |
1340 | static int soc15_common_wait_for_idle(void *handle) |
1341 | { |
1342 | return 0; |
1343 | } |
1344 | |
1345 | static int soc15_common_soft_reset(void *handle) |
1346 | { |
1347 | return 0; |
1348 | } |
1349 | |
1350 | static void soc15_update_drm_clock_gating(struct amdgpu_device *adev, bool enable) |
1351 | { |
1352 | uint32_t def, data; |
1353 | |
1354 | def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); |
1355 | |
1356 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_MGCG)) |
1357 | data &= ~(0x01000000 | |
1358 | 0x02000000 | |
1359 | 0x04000000 | |
1360 | 0x08000000 | |
1361 | 0x10000000 | |
1362 | 0x20000000 | |
1363 | 0x40000000 | |
1364 | 0x80000000); |
1365 | else |
1366 | data |= (0x01000000 | |
1367 | 0x02000000 | |
1368 | 0x04000000 | |
1369 | 0x08000000 | |
1370 | 0x10000000 | |
1371 | 0x20000000 | |
1372 | 0x40000000 | |
1373 | 0x80000000); |
1374 | |
1375 | if (def != data) |
1376 | WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0), data); |
1377 | } |
1378 | |
1379 | static void soc15_update_drm_light_sleep(struct amdgpu_device *adev, bool enable) |
1380 | { |
1381 | uint32_t def, data; |
1382 | |
1383 | def = data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); |
1384 | |
1385 | if (enable && (adev->cg_flags & AMD_CG_SUPPORT_DRM_LS)) |
1386 | data |= 1; |
1387 | else |
1388 | data &= ~1; |
1389 | |
1390 | if (def != data) |
1391 | WREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL), data); |
1392 | } |
1393 | |
1394 | static int soc15_common_set_clockgating_state(void *handle, |
1395 | enum amd_clockgating_state state) |
1396 | { |
1397 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1398 | |
1399 | if (amdgpu_sriov_vf(adev)) |
1400 | return 0; |
1401 | |
1402 | switch (amdgpu_ip_version(adev, ip: NBIO_HWIP, inst: 0)) { |
1403 | case IP_VERSION(6, 1, 0): |
1404 | case IP_VERSION(6, 2, 0): |
1405 | case IP_VERSION(7, 4, 0): |
1406 | adev->nbio.funcs->update_medium_grain_clock_gating(adev, |
1407 | state == AMD_CG_STATE_GATE); |
1408 | adev->nbio.funcs->update_medium_grain_light_sleep(adev, |
1409 | state == AMD_CG_STATE_GATE); |
1410 | adev->hdp.funcs->update_clock_gating(adev, |
1411 | state == AMD_CG_STATE_GATE); |
1412 | soc15_update_drm_clock_gating(adev, |
1413 | enable: state == AMD_CG_STATE_GATE); |
1414 | soc15_update_drm_light_sleep(adev, |
1415 | enable: state == AMD_CG_STATE_GATE); |
1416 | adev->smuio.funcs->update_rom_clock_gating(adev, |
1417 | state == AMD_CG_STATE_GATE); |
1418 | adev->df.funcs->update_medium_grain_clock_gating(adev, |
1419 | state == AMD_CG_STATE_GATE); |
1420 | break; |
1421 | case IP_VERSION(7, 0, 0): |
1422 | case IP_VERSION(7, 0, 1): |
1423 | case IP_VERSION(2, 5, 0): |
1424 | adev->nbio.funcs->update_medium_grain_clock_gating(adev, |
1425 | state == AMD_CG_STATE_GATE); |
1426 | adev->nbio.funcs->update_medium_grain_light_sleep(adev, |
1427 | state == AMD_CG_STATE_GATE); |
1428 | adev->hdp.funcs->update_clock_gating(adev, |
1429 | state == AMD_CG_STATE_GATE); |
1430 | soc15_update_drm_clock_gating(adev, |
1431 | enable: state == AMD_CG_STATE_GATE); |
1432 | soc15_update_drm_light_sleep(adev, |
1433 | enable: state == AMD_CG_STATE_GATE); |
1434 | break; |
1435 | case IP_VERSION(7, 4, 1): |
1436 | case IP_VERSION(7, 4, 4): |
1437 | adev->hdp.funcs->update_clock_gating(adev, |
1438 | state == AMD_CG_STATE_GATE); |
1439 | break; |
1440 | default: |
1441 | break; |
1442 | } |
1443 | return 0; |
1444 | } |
1445 | |
1446 | static void soc15_common_get_clockgating_state(void *handle, u64 *flags) |
1447 | { |
1448 | struct amdgpu_device *adev = (struct amdgpu_device *)handle; |
1449 | int data; |
1450 | |
1451 | if (amdgpu_sriov_vf(adev)) |
1452 | *flags = 0; |
1453 | |
1454 | if (adev->nbio.funcs && adev->nbio.funcs->get_clockgating_state) |
1455 | adev->nbio.funcs->get_clockgating_state(adev, flags); |
1456 | |
1457 | if (adev->hdp.funcs && adev->hdp.funcs->get_clock_gating_state) |
1458 | adev->hdp.funcs->get_clock_gating_state(adev, flags); |
1459 | |
1460 | if ((amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) != IP_VERSION(13, 0, 2)) && |
1461 | (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) != IP_VERSION(13, 0, 6))) { |
1462 | /* AMD_CG_SUPPORT_DRM_MGCG */ |
1463 | data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_CGTT_CTRL0)); |
1464 | if (!(data & 0x01000000)) |
1465 | *flags |= AMD_CG_SUPPORT_DRM_MGCG; |
1466 | |
1467 | /* AMD_CG_SUPPORT_DRM_LS */ |
1468 | data = RREG32(SOC15_REG_OFFSET(MP0, 0, mmMP0_MISC_LIGHT_SLEEP_CTRL)); |
1469 | if (data & 0x1) |
1470 | *flags |= AMD_CG_SUPPORT_DRM_LS; |
1471 | } |
1472 | |
1473 | /* AMD_CG_SUPPORT_ROM_MGCG */ |
1474 | if (adev->smuio.funcs && adev->smuio.funcs->get_clock_gating_state) |
1475 | adev->smuio.funcs->get_clock_gating_state(adev, flags); |
1476 | |
1477 | if (adev->df.funcs && adev->df.funcs->get_clockgating_state) |
1478 | adev->df.funcs->get_clockgating_state(adev, flags); |
1479 | } |
1480 | |
1481 | static int soc15_common_set_powergating_state(void *handle, |
1482 | enum amd_powergating_state state) |
1483 | { |
1484 | /* todo */ |
1485 | return 0; |
1486 | } |
1487 | |
1488 | static const struct amd_ip_funcs soc15_common_ip_funcs = { |
1489 | .name = "soc15_common" , |
1490 | .early_init = soc15_common_early_init, |
1491 | .late_init = soc15_common_late_init, |
1492 | .sw_init = soc15_common_sw_init, |
1493 | .sw_fini = soc15_common_sw_fini, |
1494 | .hw_init = soc15_common_hw_init, |
1495 | .hw_fini = soc15_common_hw_fini, |
1496 | .suspend = soc15_common_suspend, |
1497 | .resume = soc15_common_resume, |
1498 | .is_idle = soc15_common_is_idle, |
1499 | .wait_for_idle = soc15_common_wait_for_idle, |
1500 | .soft_reset = soc15_common_soft_reset, |
1501 | .set_clockgating_state = soc15_common_set_clockgating_state, |
1502 | .set_powergating_state = soc15_common_set_powergating_state, |
1503 | .get_clockgating_state= soc15_common_get_clockgating_state, |
1504 | }; |
1505 | |