1 | /* |
2 | * Copyright 2018 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | * |
23 | */ |
24 | #include <linux/debugfs.h> |
25 | #include <linux/list.h> |
26 | #include <linux/module.h> |
27 | #include <linux/uaccess.h> |
28 | #include <linux/reboot.h> |
29 | #include <linux/syscalls.h> |
30 | #include <linux/pm_runtime.h> |
31 | #include <linux/list_sort.h> |
32 | |
33 | #include "amdgpu.h" |
34 | #include "amdgpu_ras.h" |
35 | #include "amdgpu_atomfirmware.h" |
36 | #include "amdgpu_xgmi.h" |
37 | #include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" |
38 | #include "nbio_v4_3.h" |
39 | #include "nbio_v7_9.h" |
40 | #include "atom.h" |
41 | #include "amdgpu_reset.h" |
42 | #include "amdgpu_psp.h" |
43 | |
44 | #ifdef CONFIG_X86_MCE_AMD |
45 | #include <asm/mce.h> |
46 | |
47 | static bool notifier_registered; |
48 | #endif |
49 | static const char *RAS_FS_NAME = "ras" ; |
50 | |
51 | const char *ras_error_string[] = { |
52 | "none" , |
53 | "parity" , |
54 | "single_correctable" , |
55 | "multi_uncorrectable" , |
56 | "poison" , |
57 | }; |
58 | |
59 | const char *ras_block_string[] = { |
60 | "umc" , |
61 | "sdma" , |
62 | "gfx" , |
63 | "mmhub" , |
64 | "athub" , |
65 | "pcie_bif" , |
66 | "hdp" , |
67 | "xgmi_wafl" , |
68 | "df" , |
69 | "smn" , |
70 | "sem" , |
71 | "mp0" , |
72 | "mp1" , |
73 | "fuse" , |
74 | "mca" , |
75 | "vcn" , |
76 | "jpeg" , |
77 | "ih" , |
78 | "mpio" , |
79 | }; |
80 | |
81 | const char *ras_mca_block_string[] = { |
82 | "mca_mp0" , |
83 | "mca_mp1" , |
84 | "mca_mpio" , |
85 | "mca_iohc" , |
86 | }; |
87 | |
88 | struct amdgpu_ras_block_list { |
89 | /* ras block link */ |
90 | struct list_head node; |
91 | |
92 | struct amdgpu_ras_block_object *ras_obj; |
93 | }; |
94 | |
95 | const char *get_ras_block_str(struct ras_common_if *ras_block) |
96 | { |
97 | if (!ras_block) |
98 | return "NULL" ; |
99 | |
100 | if (ras_block->block >= AMDGPU_RAS_BLOCK_COUNT || |
101 | ras_block->block >= ARRAY_SIZE(ras_block_string)) |
102 | return "OUT OF RANGE" ; |
103 | |
104 | if (ras_block->block == AMDGPU_RAS_BLOCK__MCA) |
105 | return ras_mca_block_string[ras_block->sub_block_index]; |
106 | |
107 | return ras_block_string[ras_block->block]; |
108 | } |
109 | |
110 | #define ras_block_str(_BLOCK_) \ |
111 | (((_BLOCK_) < ARRAY_SIZE(ras_block_string)) ? ras_block_string[_BLOCK_] : "Out Of Range") |
112 | |
113 | #define ras_err_str(i) (ras_error_string[ffs(i)]) |
114 | |
115 | #define RAS_DEFAULT_FLAGS (AMDGPU_RAS_FLAG_INIT_BY_VBIOS) |
116 | |
117 | /* inject address is 52 bits */ |
118 | #define RAS_UMC_INJECT_ADDR_LIMIT (0x1ULL << 52) |
119 | |
120 | /* typical ECC bad page rate is 1 bad page per 100MB VRAM */ |
121 | #define RAS_BAD_PAGE_COVER (100 * 1024 * 1024ULL) |
122 | |
123 | #define MAX_UMC_POISON_POLLING_TIME_ASYNC 100 //ms |
124 | |
125 | enum amdgpu_ras_retire_page_reservation { |
126 | AMDGPU_RAS_RETIRE_PAGE_RESERVED, |
127 | AMDGPU_RAS_RETIRE_PAGE_PENDING, |
128 | AMDGPU_RAS_RETIRE_PAGE_FAULT, |
129 | }; |
130 | |
131 | atomic_t amdgpu_ras_in_intr = ATOMIC_INIT(0); |
132 | |
133 | static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, |
134 | uint64_t addr); |
135 | static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, |
136 | uint64_t addr); |
137 | #ifdef CONFIG_X86_MCE_AMD |
138 | static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev); |
139 | struct mce_notifier_adev_list { |
140 | struct amdgpu_device *devs[MAX_GPU_INSTANCE]; |
141 | int num_gpu; |
142 | }; |
143 | static struct mce_notifier_adev_list mce_adev_list; |
144 | #endif |
145 | |
146 | void amdgpu_ras_set_error_query_ready(struct amdgpu_device *adev, bool ready) |
147 | { |
148 | if (adev && amdgpu_ras_get_context(adev)) |
149 | amdgpu_ras_get_context(adev)->error_query_ready = ready; |
150 | } |
151 | |
152 | static bool amdgpu_ras_get_error_query_ready(struct amdgpu_device *adev) |
153 | { |
154 | if (adev && amdgpu_ras_get_context(adev)) |
155 | return amdgpu_ras_get_context(adev)->error_query_ready; |
156 | |
157 | return false; |
158 | } |
159 | |
160 | static int amdgpu_reserve_page_direct(struct amdgpu_device *adev, uint64_t address) |
161 | { |
162 | struct ras_err_data err_data; |
163 | struct eeprom_table_record err_rec; |
164 | int ret; |
165 | |
166 | if ((address >= adev->gmc.mc_vram_size) || |
167 | (address >= RAS_UMC_INJECT_ADDR_LIMIT)) { |
168 | dev_warn(adev->dev, |
169 | "RAS WARN: input address 0x%llx is invalid.\n" , |
170 | address); |
171 | return -EINVAL; |
172 | } |
173 | |
174 | if (amdgpu_ras_check_bad_page(adev, addr: address)) { |
175 | dev_warn(adev->dev, |
176 | "RAS WARN: 0x%llx has already been marked as bad page!\n" , |
177 | address); |
178 | return 0; |
179 | } |
180 | |
181 | ret = amdgpu_ras_error_data_init(err_data: &err_data); |
182 | if (ret) |
183 | return ret; |
184 | |
185 | memset(&err_rec, 0x0, sizeof(struct eeprom_table_record)); |
186 | err_data.err_addr = &err_rec; |
187 | amdgpu_umc_fill_error_record(err_data: &err_data, err_addr: address, retired_page: address, channel_index: 0, umc_inst: 0); |
188 | |
189 | if (amdgpu_bad_page_threshold != 0) { |
190 | amdgpu_ras_add_bad_pages(adev, bps: err_data.err_addr, |
191 | pages: err_data.err_addr_cnt); |
192 | amdgpu_ras_save_bad_pages(adev, NULL); |
193 | } |
194 | |
195 | amdgpu_ras_error_data_fini(err_data: &err_data); |
196 | |
197 | dev_warn(adev->dev, "WARNING: THIS IS ONLY FOR TEST PURPOSES AND WILL CORRUPT RAS EEPROM\n" ); |
198 | dev_warn(adev->dev, "Clear EEPROM:\n" ); |
199 | dev_warn(adev->dev, " echo 1 > /sys/kernel/debug/dri/0/ras/ras_eeprom_reset\n" ); |
200 | |
201 | return 0; |
202 | } |
203 | |
204 | static ssize_t amdgpu_ras_debugfs_read(struct file *f, char __user *buf, |
205 | size_t size, loff_t *pos) |
206 | { |
207 | struct ras_manager *obj = (struct ras_manager *)file_inode(f)->i_private; |
208 | struct ras_query_if info = { |
209 | .head = obj->head, |
210 | }; |
211 | ssize_t s; |
212 | char val[128]; |
213 | |
214 | if (amdgpu_ras_query_error_status(adev: obj->adev, info: &info)) |
215 | return -EINVAL; |
216 | |
217 | /* Hardware counter will be reset automatically after the query on Vega20 and Arcturus */ |
218 | if (amdgpu_ip_version(adev: obj->adev, ip: MP0_HWIP, inst: 0) != IP_VERSION(11, 0, 2) && |
219 | amdgpu_ip_version(adev: obj->adev, ip: MP0_HWIP, inst: 0) != IP_VERSION(11, 0, 4)) { |
220 | if (amdgpu_ras_reset_error_status(adev: obj->adev, block: info.head.block)) |
221 | dev_warn(obj->adev->dev, "Failed to reset error counter and error status" ); |
222 | } |
223 | |
224 | s = snprintf(buf: val, size: sizeof(val), fmt: "%s: %lu\n%s: %lu\n" , |
225 | "ue" , info.ue_count, |
226 | "ce" , info.ce_count); |
227 | if (*pos >= s) |
228 | return 0; |
229 | |
230 | s -= *pos; |
231 | s = min_t(u64, s, size); |
232 | |
233 | |
234 | if (copy_to_user(to: buf, from: &val[*pos], n: s)) |
235 | return -EINVAL; |
236 | |
237 | *pos += s; |
238 | |
239 | return s; |
240 | } |
241 | |
242 | static const struct file_operations amdgpu_ras_debugfs_ops = { |
243 | .owner = THIS_MODULE, |
244 | .read = amdgpu_ras_debugfs_read, |
245 | .write = NULL, |
246 | .llseek = default_llseek |
247 | }; |
248 | |
249 | static int amdgpu_ras_find_block_id_by_name(const char *name, int *block_id) |
250 | { |
251 | int i; |
252 | |
253 | for (i = 0; i < ARRAY_SIZE(ras_block_string); i++) { |
254 | *block_id = i; |
255 | if (strcmp(name, ras_block_string[i]) == 0) |
256 | return 0; |
257 | } |
258 | return -EINVAL; |
259 | } |
260 | |
261 | static int amdgpu_ras_debugfs_ctrl_parse_data(struct file *f, |
262 | const char __user *buf, size_t size, |
263 | loff_t *pos, struct ras_debug_if *data) |
264 | { |
265 | ssize_t s = min_t(u64, 64, size); |
266 | char str[65]; |
267 | char block_name[33]; |
268 | char err[9] = "ue" ; |
269 | int op = -1; |
270 | int block_id; |
271 | uint32_t sub_block; |
272 | u64 address, value; |
273 | /* default value is 0 if the mask is not set by user */ |
274 | u32 instance_mask = 0; |
275 | |
276 | if (*pos) |
277 | return -EINVAL; |
278 | *pos = size; |
279 | |
280 | memset(str, 0, sizeof(str)); |
281 | memset(data, 0, sizeof(*data)); |
282 | |
283 | if (copy_from_user(to: str, from: buf, n: s)) |
284 | return -EINVAL; |
285 | |
286 | if (sscanf(str, "disable %32s" , block_name) == 1) |
287 | op = 0; |
288 | else if (sscanf(str, "enable %32s %8s" , block_name, err) == 2) |
289 | op = 1; |
290 | else if (sscanf(str, "inject %32s %8s" , block_name, err) == 2) |
291 | op = 2; |
292 | else if (strstr(str, "retire_page" ) != NULL) |
293 | op = 3; |
294 | else if (str[0] && str[1] && str[2] && str[3]) |
295 | /* ascii string, but commands are not matched. */ |
296 | return -EINVAL; |
297 | |
298 | if (op != -1) { |
299 | if (op == 3) { |
300 | if (sscanf(str, "%*s 0x%llx" , &address) != 1 && |
301 | sscanf(str, "%*s %llu" , &address) != 1) |
302 | return -EINVAL; |
303 | |
304 | data->op = op; |
305 | data->inject.address = address; |
306 | |
307 | return 0; |
308 | } |
309 | |
310 | if (amdgpu_ras_find_block_id_by_name(name: block_name, block_id: &block_id)) |
311 | return -EINVAL; |
312 | |
313 | data->head.block = block_id; |
314 | /* only ue, ce and poison errors are supported */ |
315 | if (!memcmp(p: "ue" , q: err, size: 2)) |
316 | data->head.type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE; |
317 | else if (!memcmp(p: "ce" , q: err, size: 2)) |
318 | data->head.type = AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE; |
319 | else if (!memcmp(p: "poison" , q: err, size: 6)) |
320 | data->head.type = AMDGPU_RAS_ERROR__POISON; |
321 | else |
322 | return -EINVAL; |
323 | |
324 | data->op = op; |
325 | |
326 | if (op == 2) { |
327 | if (sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx 0x%x" , |
328 | &sub_block, &address, &value, &instance_mask) != 4 && |
329 | sscanf(str, "%*s %*s %*s %u %llu %llu %u" , |
330 | &sub_block, &address, &value, &instance_mask) != 4 && |
331 | sscanf(str, "%*s %*s %*s 0x%x 0x%llx 0x%llx" , |
332 | &sub_block, &address, &value) != 3 && |
333 | sscanf(str, "%*s %*s %*s %u %llu %llu" , |
334 | &sub_block, &address, &value) != 3) |
335 | return -EINVAL; |
336 | data->head.sub_block_index = sub_block; |
337 | data->inject.address = address; |
338 | data->inject.value = value; |
339 | data->inject.instance_mask = instance_mask; |
340 | } |
341 | } else { |
342 | if (size < sizeof(*data)) |
343 | return -EINVAL; |
344 | |
345 | if (copy_from_user(to: data, from: buf, n: sizeof(*data))) |
346 | return -EINVAL; |
347 | } |
348 | |
349 | return 0; |
350 | } |
351 | |
352 | static void amdgpu_ras_instance_mask_check(struct amdgpu_device *adev, |
353 | struct ras_debug_if *data) |
354 | { |
355 | int num_xcc = adev->gfx.xcc_mask ? NUM_XCC(adev->gfx.xcc_mask) : 1; |
356 | uint32_t mask, inst_mask = data->inject.instance_mask; |
357 | |
358 | /* no need to set instance mask if there is only one instance */ |
359 | if (num_xcc <= 1 && inst_mask) { |
360 | data->inject.instance_mask = 0; |
361 | dev_dbg(adev->dev, |
362 | "RAS inject mask(0x%x) isn't supported and force it to 0.\n" , |
363 | inst_mask); |
364 | |
365 | return; |
366 | } |
367 | |
368 | switch (data->head.block) { |
369 | case AMDGPU_RAS_BLOCK__GFX: |
370 | mask = GENMASK(num_xcc - 1, 0); |
371 | break; |
372 | case AMDGPU_RAS_BLOCK__SDMA: |
373 | mask = GENMASK(adev->sdma.num_instances - 1, 0); |
374 | break; |
375 | case AMDGPU_RAS_BLOCK__VCN: |
376 | case AMDGPU_RAS_BLOCK__JPEG: |
377 | mask = GENMASK(adev->vcn.num_vcn_inst - 1, 0); |
378 | break; |
379 | default: |
380 | mask = inst_mask; |
381 | break; |
382 | } |
383 | |
384 | /* remove invalid bits in instance mask */ |
385 | data->inject.instance_mask &= mask; |
386 | if (inst_mask != data->inject.instance_mask) |
387 | dev_dbg(adev->dev, |
388 | "Adjust RAS inject mask 0x%x to 0x%x\n" , |
389 | inst_mask, data->inject.instance_mask); |
390 | } |
391 | |
392 | /** |
393 | * DOC: AMDGPU RAS debugfs control interface |
394 | * |
395 | * The control interface accepts struct ras_debug_if which has two members. |
396 | * |
397 | * First member: ras_debug_if::head or ras_debug_if::inject. |
398 | * |
399 | * head is used to indicate which IP block will be under control. |
400 | * |
401 | * head has four members, they are block, type, sub_block_index, name. |
402 | * block: which IP will be under control. |
403 | * type: what kind of error will be enabled/disabled/injected. |
404 | * sub_block_index: some IPs have subcomponets. say, GFX, sDMA. |
405 | * name: the name of IP. |
406 | * |
407 | * inject has three more members than head, they are address, value and mask. |
408 | * As their names indicate, inject operation will write the |
409 | * value to the address. |
410 | * |
411 | * The second member: struct ras_debug_if::op. |
412 | * It has three kinds of operations. |
413 | * |
414 | * - 0: disable RAS on the block. Take ::head as its data. |
415 | * - 1: enable RAS on the block. Take ::head as its data. |
416 | * - 2: inject errors on the block. Take ::inject as its data. |
417 | * |
418 | * How to use the interface? |
419 | * |
420 | * In a program |
421 | * |
422 | * Copy the struct ras_debug_if in your code and initialize it. |
423 | * Write the struct to the control interface. |
424 | * |
425 | * From shell |
426 | * |
427 | * .. code-block:: bash |
428 | * |
429 | * echo "disable <block>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl |
430 | * echo "enable <block> <error>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl |
431 | * echo "inject <block> <error> <sub-block> <address> <value> <mask>" > /sys/kernel/debug/dri/<N>/ras/ras_ctrl |
432 | * |
433 | * Where N, is the card which you want to affect. |
434 | * |
435 | * "disable" requires only the block. |
436 | * "enable" requires the block and error type. |
437 | * "inject" requires the block, error type, address, and value. |
438 | * |
439 | * The block is one of: umc, sdma, gfx, etc. |
440 | * see ras_block_string[] for details |
441 | * |
442 | * The error type is one of: ue, ce and poison where, |
443 | * ue is multi-uncorrectable |
444 | * ce is single-correctable |
445 | * poison is poison |
446 | * |
447 | * The sub-block is a the sub-block index, pass 0 if there is no sub-block. |
448 | * The address and value are hexadecimal numbers, leading 0x is optional. |
449 | * The mask means instance mask, is optional, default value is 0x1. |
450 | * |
451 | * For instance, |
452 | * |
453 | * .. code-block:: bash |
454 | * |
455 | * echo inject umc ue 0x0 0x0 0x0 > /sys/kernel/debug/dri/0/ras/ras_ctrl |
456 | * echo inject umc ce 0 0 0 3 > /sys/kernel/debug/dri/0/ras/ras_ctrl |
457 | * echo disable umc > /sys/kernel/debug/dri/0/ras/ras_ctrl |
458 | * |
459 | * How to check the result of the operation? |
460 | * |
461 | * To check disable/enable, see "ras" features at, |
462 | * /sys/class/drm/card[0/1/2...]/device/ras/features |
463 | * |
464 | * To check inject, see the corresponding error count at, |
465 | * /sys/class/drm/card[0/1/2...]/device/ras/[gfx|sdma|umc|...]_err_count |
466 | * |
467 | * .. note:: |
468 | * Operations are only allowed on blocks which are supported. |
469 | * Check the "ras" mask at /sys/module/amdgpu/parameters/ras_mask |
470 | * to see which blocks support RAS on a particular asic. |
471 | * |
472 | */ |
473 | static ssize_t amdgpu_ras_debugfs_ctrl_write(struct file *f, |
474 | const char __user *buf, |
475 | size_t size, loff_t *pos) |
476 | { |
477 | struct amdgpu_device *adev = (struct amdgpu_device *)file_inode(f)->i_private; |
478 | struct ras_debug_if data; |
479 | int ret = 0; |
480 | |
481 | if (!amdgpu_ras_get_error_query_ready(adev)) { |
482 | dev_warn(adev->dev, "RAS WARN: error injection " |
483 | "currently inaccessible\n" ); |
484 | return size; |
485 | } |
486 | |
487 | ret = amdgpu_ras_debugfs_ctrl_parse_data(f, buf, size, pos, data: &data); |
488 | if (ret) |
489 | return ret; |
490 | |
491 | if (data.op == 3) { |
492 | ret = amdgpu_reserve_page_direct(adev, address: data.inject.address); |
493 | if (!ret) |
494 | return size; |
495 | else |
496 | return ret; |
497 | } |
498 | |
499 | if (!amdgpu_ras_is_supported(adev, block: data.head.block)) |
500 | return -EINVAL; |
501 | |
502 | switch (data.op) { |
503 | case 0: |
504 | ret = amdgpu_ras_feature_enable(adev, head: &data.head, enable: 0); |
505 | break; |
506 | case 1: |
507 | ret = amdgpu_ras_feature_enable(adev, head: &data.head, enable: 1); |
508 | break; |
509 | case 2: |
510 | if ((data.inject.address >= adev->gmc.mc_vram_size && |
511 | adev->gmc.mc_vram_size) || |
512 | (data.inject.address >= RAS_UMC_INJECT_ADDR_LIMIT)) { |
513 | dev_warn(adev->dev, "RAS WARN: input address " |
514 | "0x%llx is invalid." , |
515 | data.inject.address); |
516 | ret = -EINVAL; |
517 | break; |
518 | } |
519 | |
520 | /* umc ce/ue error injection for a bad page is not allowed */ |
521 | if ((data.head.block == AMDGPU_RAS_BLOCK__UMC) && |
522 | amdgpu_ras_check_bad_page(adev, addr: data.inject.address)) { |
523 | dev_warn(adev->dev, "RAS WARN: inject: 0x%llx has " |
524 | "already been marked as bad!\n" , |
525 | data.inject.address); |
526 | break; |
527 | } |
528 | |
529 | amdgpu_ras_instance_mask_check(adev, data: &data); |
530 | |
531 | /* data.inject.address is offset instead of absolute gpu address */ |
532 | ret = amdgpu_ras_error_inject(adev, info: &data.inject); |
533 | break; |
534 | default: |
535 | ret = -EINVAL; |
536 | break; |
537 | } |
538 | |
539 | if (ret) |
540 | return ret; |
541 | |
542 | return size; |
543 | } |
544 | |
545 | /** |
546 | * DOC: AMDGPU RAS debugfs EEPROM table reset interface |
547 | * |
548 | * Some boards contain an EEPROM which is used to persistently store a list of |
549 | * bad pages which experiences ECC errors in vram. This interface provides |
550 | * a way to reset the EEPROM, e.g., after testing error injection. |
551 | * |
552 | * Usage: |
553 | * |
554 | * .. code-block:: bash |
555 | * |
556 | * echo 1 > ../ras/ras_eeprom_reset |
557 | * |
558 | * will reset EEPROM table to 0 entries. |
559 | * |
560 | */ |
561 | static ssize_t amdgpu_ras_debugfs_eeprom_write(struct file *f, |
562 | const char __user *buf, |
563 | size_t size, loff_t *pos) |
564 | { |
565 | struct amdgpu_device *adev = |
566 | (struct amdgpu_device *)file_inode(f)->i_private; |
567 | int ret; |
568 | |
569 | ret = amdgpu_ras_eeprom_reset_table( |
570 | control: &(amdgpu_ras_get_context(adev)->eeprom_control)); |
571 | |
572 | if (!ret) { |
573 | /* Something was written to EEPROM. |
574 | */ |
575 | amdgpu_ras_get_context(adev)->flags = RAS_DEFAULT_FLAGS; |
576 | return size; |
577 | } else { |
578 | return ret; |
579 | } |
580 | } |
581 | |
582 | static const struct file_operations amdgpu_ras_debugfs_ctrl_ops = { |
583 | .owner = THIS_MODULE, |
584 | .read = NULL, |
585 | .write = amdgpu_ras_debugfs_ctrl_write, |
586 | .llseek = default_llseek |
587 | }; |
588 | |
589 | static const struct file_operations amdgpu_ras_debugfs_eeprom_ops = { |
590 | .owner = THIS_MODULE, |
591 | .read = NULL, |
592 | .write = amdgpu_ras_debugfs_eeprom_write, |
593 | .llseek = default_llseek |
594 | }; |
595 | |
596 | /** |
597 | * DOC: AMDGPU RAS sysfs Error Count Interface |
598 | * |
599 | * It allows the user to read the error count for each IP block on the gpu through |
600 | * /sys/class/drm/card[0/1/2...]/device/ras/[gfx/sdma/...]_err_count |
601 | * |
602 | * It outputs the multiple lines which report the uncorrected (ue) and corrected |
603 | * (ce) error counts. |
604 | * |
605 | * The format of one line is below, |
606 | * |
607 | * [ce|ue]: count |
608 | * |
609 | * Example: |
610 | * |
611 | * .. code-block:: bash |
612 | * |
613 | * ue: 0 |
614 | * ce: 1 |
615 | * |
616 | */ |
617 | static ssize_t amdgpu_ras_sysfs_read(struct device *dev, |
618 | struct device_attribute *attr, char *buf) |
619 | { |
620 | struct ras_manager *obj = container_of(attr, struct ras_manager, sysfs_attr); |
621 | struct ras_query_if info = { |
622 | .head = obj->head, |
623 | }; |
624 | |
625 | if (!amdgpu_ras_get_error_query_ready(adev: obj->adev)) |
626 | return sysfs_emit(buf, fmt: "Query currently inaccessible\n" ); |
627 | |
628 | if (amdgpu_ras_query_error_status(adev: obj->adev, info: &info)) |
629 | return -EINVAL; |
630 | |
631 | if (amdgpu_ip_version(adev: obj->adev, ip: MP0_HWIP, inst: 0) != IP_VERSION(11, 0, 2) && |
632 | amdgpu_ip_version(adev: obj->adev, ip: MP0_HWIP, inst: 0) != IP_VERSION(11, 0, 4)) { |
633 | if (amdgpu_ras_reset_error_status(adev: obj->adev, block: info.head.block)) |
634 | dev_warn(obj->adev->dev, "Failed to reset error counter and error status" ); |
635 | } |
636 | |
637 | if (info.head.block == AMDGPU_RAS_BLOCK__UMC) |
638 | return sysfs_emit(buf, fmt: "%s: %lu\n%s: %lu\n%s: %lu\n" , "ue" , info.ue_count, |
639 | "ce" , info.ce_count, "de" , info.de_count); |
640 | else |
641 | return sysfs_emit(buf, fmt: "%s: %lu\n%s: %lu\n" , "ue" , info.ue_count, |
642 | "ce" , info.ce_count); |
643 | } |
644 | |
645 | /* obj begin */ |
646 | |
647 | #define get_obj(obj) do { (obj)->use++; } while (0) |
648 | #define alive_obj(obj) ((obj)->use) |
649 | |
650 | static inline void put_obj(struct ras_manager *obj) |
651 | { |
652 | if (obj && (--obj->use == 0)) { |
653 | list_del(entry: &obj->node); |
654 | amdgpu_ras_error_data_fini(err_data: &obj->err_data); |
655 | } |
656 | |
657 | if (obj && (obj->use < 0)) |
658 | DRM_ERROR("RAS ERROR: Unbalance obj(%s) use\n" , get_ras_block_str(&obj->head)); |
659 | } |
660 | |
661 | /* make one obj and return it. */ |
662 | static struct ras_manager *amdgpu_ras_create_obj(struct amdgpu_device *adev, |
663 | struct ras_common_if *head) |
664 | { |
665 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
666 | struct ras_manager *obj; |
667 | |
668 | if (!adev->ras_enabled || !con) |
669 | return NULL; |
670 | |
671 | if (head->block >= AMDGPU_RAS_BLOCK_COUNT) |
672 | return NULL; |
673 | |
674 | if (head->block == AMDGPU_RAS_BLOCK__MCA) { |
675 | if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) |
676 | return NULL; |
677 | |
678 | obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; |
679 | } else |
680 | obj = &con->objs[head->block]; |
681 | |
682 | /* already exist. return obj? */ |
683 | if (alive_obj(obj)) |
684 | return NULL; |
685 | |
686 | if (amdgpu_ras_error_data_init(err_data: &obj->err_data)) |
687 | return NULL; |
688 | |
689 | obj->head = *head; |
690 | obj->adev = adev; |
691 | list_add(new: &obj->node, head: &con->head); |
692 | get_obj(obj); |
693 | |
694 | return obj; |
695 | } |
696 | |
697 | /* return an obj equal to head, or the first when head is NULL */ |
698 | struct ras_manager *amdgpu_ras_find_obj(struct amdgpu_device *adev, |
699 | struct ras_common_if *head) |
700 | { |
701 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
702 | struct ras_manager *obj; |
703 | int i; |
704 | |
705 | if (!adev->ras_enabled || !con) |
706 | return NULL; |
707 | |
708 | if (head) { |
709 | if (head->block >= AMDGPU_RAS_BLOCK_COUNT) |
710 | return NULL; |
711 | |
712 | if (head->block == AMDGPU_RAS_BLOCK__MCA) { |
713 | if (head->sub_block_index >= AMDGPU_RAS_MCA_BLOCK__LAST) |
714 | return NULL; |
715 | |
716 | obj = &con->objs[AMDGPU_RAS_BLOCK__LAST + head->sub_block_index]; |
717 | } else |
718 | obj = &con->objs[head->block]; |
719 | |
720 | if (alive_obj(obj)) |
721 | return obj; |
722 | } else { |
723 | for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT + AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { |
724 | obj = &con->objs[i]; |
725 | if (alive_obj(obj)) |
726 | return obj; |
727 | } |
728 | } |
729 | |
730 | return NULL; |
731 | } |
732 | /* obj end */ |
733 | |
734 | /* feature ctl begin */ |
735 | static int amdgpu_ras_is_feature_allowed(struct amdgpu_device *adev, |
736 | struct ras_common_if *head) |
737 | { |
738 | return adev->ras_hw_enabled & BIT(head->block); |
739 | } |
740 | |
741 | static int amdgpu_ras_is_feature_enabled(struct amdgpu_device *adev, |
742 | struct ras_common_if *head) |
743 | { |
744 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
745 | |
746 | return con->features & BIT(head->block); |
747 | } |
748 | |
749 | /* |
750 | * if obj is not created, then create one. |
751 | * set feature enable flag. |
752 | */ |
753 | static int __amdgpu_ras_feature_enable(struct amdgpu_device *adev, |
754 | struct ras_common_if *head, int enable) |
755 | { |
756 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
757 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); |
758 | |
759 | /* If hardware does not support ras, then do not create obj. |
760 | * But if hardware support ras, we can create the obj. |
761 | * Ras framework checks con->hw_supported to see if it need do |
762 | * corresponding initialization. |
763 | * IP checks con->support to see if it need disable ras. |
764 | */ |
765 | if (!amdgpu_ras_is_feature_allowed(adev, head)) |
766 | return 0; |
767 | |
768 | if (enable) { |
769 | if (!obj) { |
770 | obj = amdgpu_ras_create_obj(adev, head); |
771 | if (!obj) |
772 | return -EINVAL; |
773 | } else { |
774 | /* In case we create obj somewhere else */ |
775 | get_obj(obj); |
776 | } |
777 | con->features |= BIT(head->block); |
778 | } else { |
779 | if (obj && amdgpu_ras_is_feature_enabled(adev, head)) { |
780 | con->features &= ~BIT(head->block); |
781 | put_obj(obj); |
782 | } |
783 | } |
784 | |
785 | return 0; |
786 | } |
787 | |
788 | /* wrapper of psp_ras_enable_features */ |
789 | int amdgpu_ras_feature_enable(struct amdgpu_device *adev, |
790 | struct ras_common_if *head, bool enable) |
791 | { |
792 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
793 | union ta_ras_cmd_input *info; |
794 | int ret; |
795 | |
796 | if (!con) |
797 | return -EINVAL; |
798 | |
799 | /* For non-gfx ip, do not enable ras feature if it is not allowed */ |
800 | /* For gfx ip, regardless of feature support status, */ |
801 | /* Force issue enable or disable ras feature commands */ |
802 | if (head->block != AMDGPU_RAS_BLOCK__GFX && |
803 | !amdgpu_ras_is_feature_allowed(adev, head)) |
804 | return 0; |
805 | |
806 | /* Only enable gfx ras feature from host side */ |
807 | if (head->block == AMDGPU_RAS_BLOCK__GFX && |
808 | !amdgpu_sriov_vf(adev) && |
809 | !amdgpu_ras_intr_triggered()) { |
810 | info = kzalloc(size: sizeof(union ta_ras_cmd_input), GFP_KERNEL); |
811 | if (!info) |
812 | return -ENOMEM; |
813 | |
814 | if (!enable) { |
815 | info->disable_features = (struct ta_ras_disable_features_input) { |
816 | .block_id = amdgpu_ras_block_to_ta(block: head->block), |
817 | .error_type = amdgpu_ras_error_to_ta(error: head->type), |
818 | }; |
819 | } else { |
820 | info->enable_features = (struct ta_ras_enable_features_input) { |
821 | .block_id = amdgpu_ras_block_to_ta(block: head->block), |
822 | .error_type = amdgpu_ras_error_to_ta(error: head->type), |
823 | }; |
824 | } |
825 | |
826 | ret = psp_ras_enable_features(psp: &adev->psp, info, enable); |
827 | if (ret) { |
828 | dev_err(adev->dev, "ras %s %s failed poison:%d ret:%d\n" , |
829 | enable ? "enable" :"disable" , |
830 | get_ras_block_str(head), |
831 | amdgpu_ras_is_poison_mode_supported(adev), ret); |
832 | kfree(objp: info); |
833 | return ret; |
834 | } |
835 | |
836 | kfree(objp: info); |
837 | } |
838 | |
839 | /* setup the obj */ |
840 | __amdgpu_ras_feature_enable(adev, head, enable); |
841 | |
842 | return 0; |
843 | } |
844 | |
845 | /* Only used in device probe stage and called only once. */ |
846 | int amdgpu_ras_feature_enable_on_boot(struct amdgpu_device *adev, |
847 | struct ras_common_if *head, bool enable) |
848 | { |
849 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
850 | int ret; |
851 | |
852 | if (!con) |
853 | return -EINVAL; |
854 | |
855 | if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { |
856 | if (enable) { |
857 | /* There is no harm to issue a ras TA cmd regardless of |
858 | * the currecnt ras state. |
859 | * If current state == target state, it will do nothing |
860 | * But sometimes it requests driver to reset and repost |
861 | * with error code -EAGAIN. |
862 | */ |
863 | ret = amdgpu_ras_feature_enable(adev, head, enable: 1); |
864 | /* With old ras TA, we might fail to enable ras. |
865 | * Log it and just setup the object. |
866 | * TODO need remove this WA in the future. |
867 | */ |
868 | if (ret == -EINVAL) { |
869 | ret = __amdgpu_ras_feature_enable(adev, head, enable: 1); |
870 | if (!ret) |
871 | dev_info(adev->dev, |
872 | "RAS INFO: %s setup object\n" , |
873 | get_ras_block_str(head)); |
874 | } |
875 | } else { |
876 | /* setup the object then issue a ras TA disable cmd.*/ |
877 | ret = __amdgpu_ras_feature_enable(adev, head, enable: 1); |
878 | if (ret) |
879 | return ret; |
880 | |
881 | /* gfx block ras dsiable cmd must send to ras-ta */ |
882 | if (head->block == AMDGPU_RAS_BLOCK__GFX) |
883 | con->features |= BIT(head->block); |
884 | |
885 | ret = amdgpu_ras_feature_enable(adev, head, enable: 0); |
886 | |
887 | /* clean gfx block ras features flag */ |
888 | if (adev->ras_enabled && head->block == AMDGPU_RAS_BLOCK__GFX) |
889 | con->features &= ~BIT(head->block); |
890 | } |
891 | } else |
892 | ret = amdgpu_ras_feature_enable(adev, head, enable); |
893 | |
894 | return ret; |
895 | } |
896 | |
897 | static int amdgpu_ras_disable_all_features(struct amdgpu_device *adev, |
898 | bool bypass) |
899 | { |
900 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
901 | struct ras_manager *obj, *tmp; |
902 | |
903 | list_for_each_entry_safe(obj, tmp, &con->head, node) { |
904 | /* bypass psp. |
905 | * aka just release the obj and corresponding flags |
906 | */ |
907 | if (bypass) { |
908 | if (__amdgpu_ras_feature_enable(adev, head: &obj->head, enable: 0)) |
909 | break; |
910 | } else { |
911 | if (amdgpu_ras_feature_enable(adev, head: &obj->head, enable: 0)) |
912 | break; |
913 | } |
914 | } |
915 | |
916 | return con->features; |
917 | } |
918 | |
919 | static int amdgpu_ras_enable_all_features(struct amdgpu_device *adev, |
920 | bool bypass) |
921 | { |
922 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
923 | int i; |
924 | const enum amdgpu_ras_error_type default_ras_type = AMDGPU_RAS_ERROR__NONE; |
925 | |
926 | for (i = 0; i < AMDGPU_RAS_BLOCK_COUNT; i++) { |
927 | struct ras_common_if head = { |
928 | .block = i, |
929 | .type = default_ras_type, |
930 | .sub_block_index = 0, |
931 | }; |
932 | |
933 | if (i == AMDGPU_RAS_BLOCK__MCA) |
934 | continue; |
935 | |
936 | if (bypass) { |
937 | /* |
938 | * bypass psp. vbios enable ras for us. |
939 | * so just create the obj |
940 | */ |
941 | if (__amdgpu_ras_feature_enable(adev, head: &head, enable: 1)) |
942 | break; |
943 | } else { |
944 | if (amdgpu_ras_feature_enable(adev, head: &head, enable: 1)) |
945 | break; |
946 | } |
947 | } |
948 | |
949 | for (i = 0; i < AMDGPU_RAS_MCA_BLOCK_COUNT; i++) { |
950 | struct ras_common_if head = { |
951 | .block = AMDGPU_RAS_BLOCK__MCA, |
952 | .type = default_ras_type, |
953 | .sub_block_index = i, |
954 | }; |
955 | |
956 | if (bypass) { |
957 | /* |
958 | * bypass psp. vbios enable ras for us. |
959 | * so just create the obj |
960 | */ |
961 | if (__amdgpu_ras_feature_enable(adev, head: &head, enable: 1)) |
962 | break; |
963 | } else { |
964 | if (amdgpu_ras_feature_enable(adev, head: &head, enable: 1)) |
965 | break; |
966 | } |
967 | } |
968 | |
969 | return con->features; |
970 | } |
971 | /* feature ctl end */ |
972 | |
973 | static int amdgpu_ras_block_match_default(struct amdgpu_ras_block_object *block_obj, |
974 | enum amdgpu_ras_block block) |
975 | { |
976 | if (!block_obj) |
977 | return -EINVAL; |
978 | |
979 | if (block_obj->ras_comm.block == block) |
980 | return 0; |
981 | |
982 | return -EINVAL; |
983 | } |
984 | |
985 | static struct amdgpu_ras_block_object *amdgpu_ras_get_ras_block(struct amdgpu_device *adev, |
986 | enum amdgpu_ras_block block, uint32_t sub_block_index) |
987 | { |
988 | struct amdgpu_ras_block_list *node, *tmp; |
989 | struct amdgpu_ras_block_object *obj; |
990 | |
991 | if (block >= AMDGPU_RAS_BLOCK__LAST) |
992 | return NULL; |
993 | |
994 | list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { |
995 | if (!node->ras_obj) { |
996 | dev_warn(adev->dev, "Warning: abnormal ras list node.\n" ); |
997 | continue; |
998 | } |
999 | |
1000 | obj = node->ras_obj; |
1001 | if (obj->ras_block_match) { |
1002 | if (obj->ras_block_match(obj, block, sub_block_index) == 0) |
1003 | return obj; |
1004 | } else { |
1005 | if (amdgpu_ras_block_match_default(block_obj: obj, block) == 0) |
1006 | return obj; |
1007 | } |
1008 | } |
1009 | |
1010 | return NULL; |
1011 | } |
1012 | |
1013 | static void amdgpu_ras_get_ecc_info(struct amdgpu_device *adev, struct ras_err_data *err_data) |
1014 | { |
1015 | struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); |
1016 | int ret = 0; |
1017 | |
1018 | /* |
1019 | * choosing right query method according to |
1020 | * whether smu support query error information |
1021 | */ |
1022 | ret = amdgpu_dpm_get_ecc_info(adev, umc_ecc: (void *)&(ras->umc_ecc)); |
1023 | if (ret == -EOPNOTSUPP) { |
1024 | if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && |
1025 | adev->umc.ras->ras_block.hw_ops->query_ras_error_count) |
1026 | adev->umc.ras->ras_block.hw_ops->query_ras_error_count(adev, err_data); |
1027 | |
1028 | /* umc query_ras_error_address is also responsible for clearing |
1029 | * error status |
1030 | */ |
1031 | if (adev->umc.ras && adev->umc.ras->ras_block.hw_ops && |
1032 | adev->umc.ras->ras_block.hw_ops->query_ras_error_address) |
1033 | adev->umc.ras->ras_block.hw_ops->query_ras_error_address(adev, err_data); |
1034 | } else if (!ret) { |
1035 | if (adev->umc.ras && |
1036 | adev->umc.ras->ecc_info_query_ras_error_count) |
1037 | adev->umc.ras->ecc_info_query_ras_error_count(adev, err_data); |
1038 | |
1039 | if (adev->umc.ras && |
1040 | adev->umc.ras->ecc_info_query_ras_error_address) |
1041 | adev->umc.ras->ecc_info_query_ras_error_address(adev, err_data); |
1042 | } |
1043 | } |
1044 | |
1045 | static void amdgpu_ras_error_print_error_data(struct amdgpu_device *adev, |
1046 | struct ras_manager *ras_mgr, |
1047 | struct ras_err_data *err_data, |
1048 | const char *blk_name, |
1049 | bool is_ue, |
1050 | bool is_de) |
1051 | { |
1052 | struct amdgpu_smuio_mcm_config_info *mcm_info; |
1053 | struct ras_err_node *err_node; |
1054 | struct ras_err_info *err_info; |
1055 | |
1056 | if (is_ue) { |
1057 | for_each_ras_error(err_node, err_data) { |
1058 | err_info = &err_node->err_info; |
1059 | mcm_info = &err_info->mcm_info; |
1060 | if (err_info->ue_count) { |
1061 | dev_info(adev->dev, "socket: %d, die: %d, " |
1062 | "%lld new uncorrectable hardware errors detected in %s block\n" , |
1063 | mcm_info->socket_id, |
1064 | mcm_info->die_id, |
1065 | err_info->ue_count, |
1066 | blk_name); |
1067 | } |
1068 | } |
1069 | |
1070 | for_each_ras_error(err_node, &ras_mgr->err_data) { |
1071 | err_info = &err_node->err_info; |
1072 | mcm_info = &err_info->mcm_info; |
1073 | dev_info(adev->dev, "socket: %d, die: %d, " |
1074 | "%lld uncorrectable hardware errors detected in total in %s block\n" , |
1075 | mcm_info->socket_id, mcm_info->die_id, err_info->ue_count, blk_name); |
1076 | } |
1077 | |
1078 | } else { |
1079 | if (is_de) { |
1080 | for_each_ras_error(err_node, err_data) { |
1081 | err_info = &err_node->err_info; |
1082 | mcm_info = &err_info->mcm_info; |
1083 | if (err_info->de_count) { |
1084 | dev_info(adev->dev, "socket: %d, die: %d, " |
1085 | "%lld new deferred hardware errors detected in %s block\n" , |
1086 | mcm_info->socket_id, |
1087 | mcm_info->die_id, |
1088 | err_info->de_count, |
1089 | blk_name); |
1090 | } |
1091 | } |
1092 | |
1093 | for_each_ras_error(err_node, &ras_mgr->err_data) { |
1094 | err_info = &err_node->err_info; |
1095 | mcm_info = &err_info->mcm_info; |
1096 | dev_info(adev->dev, "socket: %d, die: %d, " |
1097 | "%lld deferred hardware errors detected in total in %s block\n" , |
1098 | mcm_info->socket_id, mcm_info->die_id, |
1099 | err_info->de_count, blk_name); |
1100 | } |
1101 | } else { |
1102 | for_each_ras_error(err_node, err_data) { |
1103 | err_info = &err_node->err_info; |
1104 | mcm_info = &err_info->mcm_info; |
1105 | if (err_info->ce_count) { |
1106 | dev_info(adev->dev, "socket: %d, die: %d, " |
1107 | "%lld new correctable hardware errors detected in %s block\n" , |
1108 | mcm_info->socket_id, |
1109 | mcm_info->die_id, |
1110 | err_info->ce_count, |
1111 | blk_name); |
1112 | } |
1113 | } |
1114 | |
1115 | for_each_ras_error(err_node, &ras_mgr->err_data) { |
1116 | err_info = &err_node->err_info; |
1117 | mcm_info = &err_info->mcm_info; |
1118 | dev_info(adev->dev, "socket: %d, die: %d, " |
1119 | "%lld correctable hardware errors detected in total in %s block\n" , |
1120 | mcm_info->socket_id, mcm_info->die_id, |
1121 | err_info->ce_count, blk_name); |
1122 | } |
1123 | } |
1124 | } |
1125 | } |
1126 | |
1127 | static inline bool err_data_has_source_info(struct ras_err_data *data) |
1128 | { |
1129 | return !list_empty(head: &data->err_node_list); |
1130 | } |
1131 | |
1132 | static void amdgpu_ras_error_generate_report(struct amdgpu_device *adev, |
1133 | struct ras_query_if *query_if, |
1134 | struct ras_err_data *err_data) |
1135 | { |
1136 | struct ras_manager *ras_mgr = amdgpu_ras_find_obj(adev, head: &query_if->head); |
1137 | const char *blk_name = get_ras_block_str(ras_block: &query_if->head); |
1138 | |
1139 | if (err_data->ce_count) { |
1140 | if (err_data_has_source_info(data: err_data)) { |
1141 | amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, |
1142 | blk_name, is_ue: false, is_de: false); |
1143 | } else if (!adev->aid_mask && |
1144 | adev->smuio.funcs && |
1145 | adev->smuio.funcs->get_socket_id && |
1146 | adev->smuio.funcs->get_die_id) { |
1147 | dev_info(adev->dev, "socket: %d, die: %d " |
1148 | "%ld correctable hardware errors " |
1149 | "detected in %s block\n" , |
1150 | adev->smuio.funcs->get_socket_id(adev), |
1151 | adev->smuio.funcs->get_die_id(adev), |
1152 | ras_mgr->err_data.ce_count, |
1153 | blk_name); |
1154 | } else { |
1155 | dev_info(adev->dev, "%ld correctable hardware errors " |
1156 | "detected in %s block\n" , |
1157 | ras_mgr->err_data.ce_count, |
1158 | blk_name); |
1159 | } |
1160 | } |
1161 | |
1162 | if (err_data->ue_count) { |
1163 | if (err_data_has_source_info(data: err_data)) { |
1164 | amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, |
1165 | blk_name, is_ue: true, is_de: false); |
1166 | } else if (!adev->aid_mask && |
1167 | adev->smuio.funcs && |
1168 | adev->smuio.funcs->get_socket_id && |
1169 | adev->smuio.funcs->get_die_id) { |
1170 | dev_info(adev->dev, "socket: %d, die: %d " |
1171 | "%ld uncorrectable hardware errors " |
1172 | "detected in %s block\n" , |
1173 | adev->smuio.funcs->get_socket_id(adev), |
1174 | adev->smuio.funcs->get_die_id(adev), |
1175 | ras_mgr->err_data.ue_count, |
1176 | blk_name); |
1177 | } else { |
1178 | dev_info(adev->dev, "%ld uncorrectable hardware errors " |
1179 | "detected in %s block\n" , |
1180 | ras_mgr->err_data.ue_count, |
1181 | blk_name); |
1182 | } |
1183 | } |
1184 | |
1185 | if (err_data->de_count) { |
1186 | if (err_data_has_source_info(data: err_data)) { |
1187 | amdgpu_ras_error_print_error_data(adev, ras_mgr, err_data, |
1188 | blk_name, is_ue: false, is_de: true); |
1189 | } else if (!adev->aid_mask && |
1190 | adev->smuio.funcs && |
1191 | adev->smuio.funcs->get_socket_id && |
1192 | adev->smuio.funcs->get_die_id) { |
1193 | dev_info(adev->dev, "socket: %d, die: %d " |
1194 | "%ld deferred hardware errors " |
1195 | "detected in %s block\n" , |
1196 | adev->smuio.funcs->get_socket_id(adev), |
1197 | adev->smuio.funcs->get_die_id(adev), |
1198 | ras_mgr->err_data.de_count, |
1199 | blk_name); |
1200 | } else { |
1201 | dev_info(adev->dev, "%ld deferred hardware errors " |
1202 | "detected in %s block\n" , |
1203 | ras_mgr->err_data.de_count, |
1204 | blk_name); |
1205 | } |
1206 | } |
1207 | } |
1208 | |
1209 | static void amdgpu_rasmgr_error_data_statistic_update(struct ras_manager *obj, struct ras_err_data *err_data) |
1210 | { |
1211 | struct ras_err_node *err_node; |
1212 | struct ras_err_info *err_info; |
1213 | |
1214 | if (err_data_has_source_info(data: err_data)) { |
1215 | for_each_ras_error(err_node, err_data) { |
1216 | err_info = &err_node->err_info; |
1217 | amdgpu_ras_error_statistic_de_count(err_data: &obj->err_data, |
1218 | mcm_info: &err_info->mcm_info, NULL, count: err_info->de_count); |
1219 | amdgpu_ras_error_statistic_ce_count(err_data: &obj->err_data, |
1220 | mcm_info: &err_info->mcm_info, NULL, count: err_info->ce_count); |
1221 | amdgpu_ras_error_statistic_ue_count(err_data: &obj->err_data, |
1222 | mcm_info: &err_info->mcm_info, NULL, count: err_info->ue_count); |
1223 | } |
1224 | } else { |
1225 | /* for legacy asic path which doesn't has error source info */ |
1226 | obj->err_data.ue_count += err_data->ue_count; |
1227 | obj->err_data.ce_count += err_data->ce_count; |
1228 | obj->err_data.de_count += err_data->de_count; |
1229 | } |
1230 | } |
1231 | |
1232 | static struct ras_manager *get_ras_manager(struct amdgpu_device *adev, enum amdgpu_ras_block blk) |
1233 | { |
1234 | struct ras_common_if head; |
1235 | |
1236 | memset(&head, 0, sizeof(head)); |
1237 | head.block = blk; |
1238 | |
1239 | return amdgpu_ras_find_obj(adev, head: &head); |
1240 | } |
1241 | |
1242 | int amdgpu_ras_bind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk, |
1243 | const struct aca_info *aca_info, void *data) |
1244 | { |
1245 | struct ras_manager *obj; |
1246 | |
1247 | obj = get_ras_manager(adev, blk); |
1248 | if (!obj) |
1249 | return -EINVAL; |
1250 | |
1251 | return amdgpu_aca_add_handle(adev, handle: &obj->aca_handle, ras_block_str(blk), aca_info, data); |
1252 | } |
1253 | |
1254 | int amdgpu_ras_unbind_aca(struct amdgpu_device *adev, enum amdgpu_ras_block blk) |
1255 | { |
1256 | struct ras_manager *obj; |
1257 | |
1258 | obj = get_ras_manager(adev, blk); |
1259 | if (!obj) |
1260 | return -EINVAL; |
1261 | |
1262 | amdgpu_aca_remove_handle(handle: &obj->aca_handle); |
1263 | |
1264 | return 0; |
1265 | } |
1266 | |
1267 | static int amdgpu_aca_log_ras_error_data(struct amdgpu_device *adev, enum amdgpu_ras_block blk, |
1268 | enum aca_error_type type, struct ras_err_data *err_data) |
1269 | { |
1270 | struct ras_manager *obj; |
1271 | |
1272 | obj = get_ras_manager(adev, blk); |
1273 | if (!obj) |
1274 | return -EINVAL; |
1275 | |
1276 | return amdgpu_aca_get_error_data(adev, handle: &obj->aca_handle, type, data: err_data); |
1277 | } |
1278 | |
1279 | ssize_t amdgpu_ras_aca_sysfs_read(struct device *dev, struct device_attribute *attr, |
1280 | struct aca_handle *handle, char *buf, void *data) |
1281 | { |
1282 | struct ras_manager *obj = container_of(handle, struct ras_manager, aca_handle); |
1283 | struct ras_query_if info = { |
1284 | .head = obj->head, |
1285 | }; |
1286 | |
1287 | if (amdgpu_ras_query_error_status(adev: obj->adev, info: &info)) |
1288 | return -EINVAL; |
1289 | |
1290 | return sysfs_emit(buf, fmt: "%s: %lu\n%s: %lu\n" , "ue" , info.ue_count, |
1291 | "ce" , info.ce_count); |
1292 | } |
1293 | |
1294 | static int amdgpu_ras_query_error_status_helper(struct amdgpu_device *adev, |
1295 | struct ras_query_if *info, |
1296 | struct ras_err_data *err_data, |
1297 | unsigned int error_query_mode) |
1298 | { |
1299 | enum amdgpu_ras_block blk = info ? info->head.block : AMDGPU_RAS_BLOCK_COUNT; |
1300 | struct amdgpu_ras_block_object *block_obj = NULL; |
1301 | int ret; |
1302 | |
1303 | if (blk == AMDGPU_RAS_BLOCK_COUNT) |
1304 | return -EINVAL; |
1305 | |
1306 | if (error_query_mode == AMDGPU_RAS_INVALID_ERROR_QUERY) |
1307 | return -EINVAL; |
1308 | |
1309 | if (error_query_mode == AMDGPU_RAS_DIRECT_ERROR_QUERY) { |
1310 | if (info->head.block == AMDGPU_RAS_BLOCK__UMC) { |
1311 | amdgpu_ras_get_ecc_info(adev, err_data); |
1312 | } else { |
1313 | block_obj = amdgpu_ras_get_ras_block(adev, block: info->head.block, sub_block_index: 0); |
1314 | if (!block_obj || !block_obj->hw_ops) { |
1315 | dev_dbg_once(adev->dev, "%s doesn't config RAS function\n" , |
1316 | get_ras_block_str(&info->head)); |
1317 | return -EINVAL; |
1318 | } |
1319 | |
1320 | if (block_obj->hw_ops->query_ras_error_count) |
1321 | block_obj->hw_ops->query_ras_error_count(adev, err_data); |
1322 | |
1323 | if ((info->head.block == AMDGPU_RAS_BLOCK__SDMA) || |
1324 | (info->head.block == AMDGPU_RAS_BLOCK__GFX) || |
1325 | (info->head.block == AMDGPU_RAS_BLOCK__MMHUB)) { |
1326 | if (block_obj->hw_ops->query_ras_error_status) |
1327 | block_obj->hw_ops->query_ras_error_status(adev); |
1328 | } |
1329 | } |
1330 | } else { |
1331 | if (amdgpu_aca_is_enabled(adev)) { |
1332 | ret = amdgpu_aca_log_ras_error_data(adev, blk, type: ACA_ERROR_TYPE_UE, err_data); |
1333 | if (ret) |
1334 | return ret; |
1335 | |
1336 | ret = amdgpu_aca_log_ras_error_data(adev, blk, type: ACA_ERROR_TYPE_CE, err_data); |
1337 | if (ret) |
1338 | return ret; |
1339 | } else { |
1340 | /* FIXME: add code to check return value later */ |
1341 | amdgpu_mca_smu_log_ras_error(adev, blk, type: AMDGPU_MCA_ERROR_TYPE_UE, err_data); |
1342 | amdgpu_mca_smu_log_ras_error(adev, blk, type: AMDGPU_MCA_ERROR_TYPE_CE, err_data); |
1343 | } |
1344 | } |
1345 | |
1346 | return 0; |
1347 | } |
1348 | |
1349 | /* query/inject/cure begin */ |
1350 | int amdgpu_ras_query_error_status(struct amdgpu_device *adev, struct ras_query_if *info) |
1351 | { |
1352 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head: &info->head); |
1353 | struct ras_err_data err_data; |
1354 | unsigned int error_query_mode; |
1355 | int ret; |
1356 | |
1357 | if (!obj) |
1358 | return -EINVAL; |
1359 | |
1360 | ret = amdgpu_ras_error_data_init(err_data: &err_data); |
1361 | if (ret) |
1362 | return ret; |
1363 | |
1364 | if (!amdgpu_ras_get_error_query_mode(adev, mode: &error_query_mode)) |
1365 | return -EINVAL; |
1366 | |
1367 | ret = amdgpu_ras_query_error_status_helper(adev, info, |
1368 | err_data: &err_data, |
1369 | error_query_mode); |
1370 | if (ret) |
1371 | goto out_fini_err_data; |
1372 | |
1373 | amdgpu_rasmgr_error_data_statistic_update(obj, err_data: &err_data); |
1374 | |
1375 | info->ue_count = obj->err_data.ue_count; |
1376 | info->ce_count = obj->err_data.ce_count; |
1377 | info->de_count = obj->err_data.de_count; |
1378 | |
1379 | amdgpu_ras_error_generate_report(adev, query_if: info, err_data: &err_data); |
1380 | |
1381 | out_fini_err_data: |
1382 | amdgpu_ras_error_data_fini(err_data: &err_data); |
1383 | |
1384 | return ret; |
1385 | } |
1386 | |
1387 | int amdgpu_ras_reset_error_count(struct amdgpu_device *adev, |
1388 | enum amdgpu_ras_block block) |
1389 | { |
1390 | struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, sub_block_index: 0); |
1391 | struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); |
1392 | const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; |
1393 | const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs; |
1394 | struct amdgpu_hive_info *hive; |
1395 | int hive_ras_recovery = 0; |
1396 | |
1397 | if (!block_obj || !block_obj->hw_ops) { |
1398 | dev_dbg_once(adev->dev, "%s doesn't config RAS function\n" , |
1399 | ras_block_str(block)); |
1400 | return -EOPNOTSUPP; |
1401 | } |
1402 | |
1403 | if (!amdgpu_ras_is_supported(adev, block) || |
1404 | !amdgpu_ras_get_aca_debug_mode(adev)) |
1405 | return -EOPNOTSUPP; |
1406 | |
1407 | hive = amdgpu_get_xgmi_hive(adev); |
1408 | if (hive) { |
1409 | hive_ras_recovery = atomic_read(v: &hive->ras_recovery); |
1410 | amdgpu_put_xgmi_hive(hive); |
1411 | } |
1412 | |
1413 | /* skip ras error reset in gpu reset */ |
1414 | if ((amdgpu_in_reset(adev) || atomic_read(v: &ras->in_recovery) || |
1415 | hive_ras_recovery) && |
1416 | ((smu_funcs && smu_funcs->set_debug_mode) || |
1417 | (mca_funcs && mca_funcs->mca_set_debug_mode))) |
1418 | return -EOPNOTSUPP; |
1419 | |
1420 | if (block_obj->hw_ops->reset_ras_error_count) |
1421 | block_obj->hw_ops->reset_ras_error_count(adev); |
1422 | |
1423 | return 0; |
1424 | } |
1425 | |
1426 | int amdgpu_ras_reset_error_status(struct amdgpu_device *adev, |
1427 | enum amdgpu_ras_block block) |
1428 | { |
1429 | struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, block, sub_block_index: 0); |
1430 | |
1431 | if (amdgpu_ras_reset_error_count(adev, block) == -EOPNOTSUPP) |
1432 | return 0; |
1433 | |
1434 | if ((block == AMDGPU_RAS_BLOCK__GFX) || |
1435 | (block == AMDGPU_RAS_BLOCK__MMHUB)) { |
1436 | if (block_obj->hw_ops->reset_ras_error_status) |
1437 | block_obj->hw_ops->reset_ras_error_status(adev); |
1438 | } |
1439 | |
1440 | return 0; |
1441 | } |
1442 | |
1443 | /* wrapper of psp_ras_trigger_error */ |
1444 | int amdgpu_ras_error_inject(struct amdgpu_device *adev, |
1445 | struct ras_inject_if *info) |
1446 | { |
1447 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head: &info->head); |
1448 | struct ta_ras_trigger_error_input block_info = { |
1449 | .block_id = amdgpu_ras_block_to_ta(block: info->head.block), |
1450 | .inject_error_type = amdgpu_ras_error_to_ta(error: info->head.type), |
1451 | .sub_block_index = info->head.sub_block_index, |
1452 | .address = info->address, |
1453 | .value = info->value, |
1454 | }; |
1455 | int ret = -EINVAL; |
1456 | struct amdgpu_ras_block_object *block_obj = amdgpu_ras_get_ras_block(adev, |
1457 | block: info->head.block, |
1458 | sub_block_index: info->head.sub_block_index); |
1459 | |
1460 | /* inject on guest isn't allowed, return success directly */ |
1461 | if (amdgpu_sriov_vf(adev)) |
1462 | return 0; |
1463 | |
1464 | if (!obj) |
1465 | return -EINVAL; |
1466 | |
1467 | if (!block_obj || !block_obj->hw_ops) { |
1468 | dev_dbg_once(adev->dev, "%s doesn't config RAS function\n" , |
1469 | get_ras_block_str(&info->head)); |
1470 | return -EINVAL; |
1471 | } |
1472 | |
1473 | /* Calculate XGMI relative offset */ |
1474 | if (adev->gmc.xgmi.num_physical_nodes > 1 && |
1475 | info->head.block != AMDGPU_RAS_BLOCK__GFX) { |
1476 | block_info.address = |
1477 | amdgpu_xgmi_get_relative_phy_addr(adev, |
1478 | addr: block_info.address); |
1479 | } |
1480 | |
1481 | if (block_obj->hw_ops->ras_error_inject) { |
1482 | if (info->head.block == AMDGPU_RAS_BLOCK__GFX) |
1483 | ret = block_obj->hw_ops->ras_error_inject(adev, info, info->instance_mask); |
1484 | else /* Special ras_error_inject is defined (e.g: xgmi) */ |
1485 | ret = block_obj->hw_ops->ras_error_inject(adev, &block_info, |
1486 | info->instance_mask); |
1487 | } else { |
1488 | /* default path */ |
1489 | ret = psp_ras_trigger_error(psp: &adev->psp, info: &block_info, instance_mask: info->instance_mask); |
1490 | } |
1491 | |
1492 | if (ret) |
1493 | dev_err(adev->dev, "ras inject %s failed %d\n" , |
1494 | get_ras_block_str(&info->head), ret); |
1495 | |
1496 | return ret; |
1497 | } |
1498 | |
1499 | /** |
1500 | * amdgpu_ras_query_error_count_helper -- Get error counter for specific IP |
1501 | * @adev: pointer to AMD GPU device |
1502 | * @ce_count: pointer to an integer to be set to the count of correctible errors. |
1503 | * @ue_count: pointer to an integer to be set to the count of uncorrectible errors. |
1504 | * @query_info: pointer to ras_query_if |
1505 | * |
1506 | * Return 0 for query success or do nothing, otherwise return an error |
1507 | * on failures |
1508 | */ |
1509 | static int amdgpu_ras_query_error_count_helper(struct amdgpu_device *adev, |
1510 | unsigned long *ce_count, |
1511 | unsigned long *ue_count, |
1512 | struct ras_query_if *query_info) |
1513 | { |
1514 | int ret; |
1515 | |
1516 | if (!query_info) |
1517 | /* do nothing if query_info is not specified */ |
1518 | return 0; |
1519 | |
1520 | ret = amdgpu_ras_query_error_status(adev, info: query_info); |
1521 | if (ret) |
1522 | return ret; |
1523 | |
1524 | *ce_count += query_info->ce_count; |
1525 | *ue_count += query_info->ue_count; |
1526 | |
1527 | /* some hardware/IP supports read to clear |
1528 | * no need to explictly reset the err status after the query call */ |
1529 | if (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) != IP_VERSION(11, 0, 2) && |
1530 | amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) != IP_VERSION(11, 0, 4)) { |
1531 | if (amdgpu_ras_reset_error_status(adev, block: query_info->head.block)) |
1532 | dev_warn(adev->dev, |
1533 | "Failed to reset error counter and error status\n" ); |
1534 | } |
1535 | |
1536 | return 0; |
1537 | } |
1538 | |
1539 | /** |
1540 | * amdgpu_ras_query_error_count -- Get error counts of all IPs or specific IP |
1541 | * @adev: pointer to AMD GPU device |
1542 | * @ce_count: pointer to an integer to be set to the count of correctible errors. |
1543 | * @ue_count: pointer to an integer to be set to the count of uncorrectible |
1544 | * errors. |
1545 | * @query_info: pointer to ras_query_if if the query request is only for |
1546 | * specific ip block; if info is NULL, then the qurey request is for |
1547 | * all the ip blocks that support query ras error counters/status |
1548 | * |
1549 | * If set, @ce_count or @ue_count, count and return the corresponding |
1550 | * error counts in those integer pointers. Return 0 if the device |
1551 | * supports RAS. Return -EOPNOTSUPP if the device doesn't support RAS. |
1552 | */ |
1553 | int amdgpu_ras_query_error_count(struct amdgpu_device *adev, |
1554 | unsigned long *ce_count, |
1555 | unsigned long *ue_count, |
1556 | struct ras_query_if *query_info) |
1557 | { |
1558 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
1559 | struct ras_manager *obj; |
1560 | unsigned long ce, ue; |
1561 | int ret; |
1562 | |
1563 | if (!adev->ras_enabled || !con) |
1564 | return -EOPNOTSUPP; |
1565 | |
1566 | /* Don't count since no reporting. |
1567 | */ |
1568 | if (!ce_count && !ue_count) |
1569 | return 0; |
1570 | |
1571 | ce = 0; |
1572 | ue = 0; |
1573 | if (!query_info) { |
1574 | /* query all the ip blocks that support ras query interface */ |
1575 | list_for_each_entry(obj, &con->head, node) { |
1576 | struct ras_query_if info = { |
1577 | .head = obj->head, |
1578 | }; |
1579 | |
1580 | ret = amdgpu_ras_query_error_count_helper(adev, ce_count: &ce, ue_count: &ue, query_info: &info); |
1581 | } |
1582 | } else { |
1583 | /* query specific ip block */ |
1584 | ret = amdgpu_ras_query_error_count_helper(adev, ce_count: &ce, ue_count: &ue, query_info); |
1585 | } |
1586 | |
1587 | if (ret) |
1588 | return ret; |
1589 | |
1590 | if (ce_count) |
1591 | *ce_count = ce; |
1592 | |
1593 | if (ue_count) |
1594 | *ue_count = ue; |
1595 | |
1596 | return 0; |
1597 | } |
1598 | /* query/inject/cure end */ |
1599 | |
1600 | |
1601 | /* sysfs begin */ |
1602 | |
1603 | static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, |
1604 | struct ras_badpage **bps, unsigned int *count); |
1605 | |
1606 | static char *amdgpu_ras_badpage_flags_str(unsigned int flags) |
1607 | { |
1608 | switch (flags) { |
1609 | case AMDGPU_RAS_RETIRE_PAGE_RESERVED: |
1610 | return "R" ; |
1611 | case AMDGPU_RAS_RETIRE_PAGE_PENDING: |
1612 | return "P" ; |
1613 | case AMDGPU_RAS_RETIRE_PAGE_FAULT: |
1614 | default: |
1615 | return "F" ; |
1616 | } |
1617 | } |
1618 | |
1619 | /** |
1620 | * DOC: AMDGPU RAS sysfs gpu_vram_bad_pages Interface |
1621 | * |
1622 | * It allows user to read the bad pages of vram on the gpu through |
1623 | * /sys/class/drm/card[0/1/2...]/device/ras/gpu_vram_bad_pages |
1624 | * |
1625 | * It outputs multiple lines, and each line stands for one gpu page. |
1626 | * |
1627 | * The format of one line is below, |
1628 | * gpu pfn : gpu page size : flags |
1629 | * |
1630 | * gpu pfn and gpu page size are printed in hex format. |
1631 | * flags can be one of below character, |
1632 | * |
1633 | * R: reserved, this gpu page is reserved and not able to use. |
1634 | * |
1635 | * P: pending for reserve, this gpu page is marked as bad, will be reserved |
1636 | * in next window of page_reserve. |
1637 | * |
1638 | * F: unable to reserve. this gpu page can't be reserved due to some reasons. |
1639 | * |
1640 | * Examples: |
1641 | * |
1642 | * .. code-block:: bash |
1643 | * |
1644 | * 0x00000001 : 0x00001000 : R |
1645 | * 0x00000002 : 0x00001000 : P |
1646 | * |
1647 | */ |
1648 | |
1649 | static ssize_t amdgpu_ras_sysfs_badpages_read(struct file *f, |
1650 | struct kobject *kobj, struct bin_attribute *attr, |
1651 | char *buf, loff_t ppos, size_t count) |
1652 | { |
1653 | struct amdgpu_ras *con = |
1654 | container_of(attr, struct amdgpu_ras, badpages_attr); |
1655 | struct amdgpu_device *adev = con->adev; |
1656 | const unsigned int element_size = |
1657 | sizeof("0xabcdabcd : 0x12345678 : R\n" ) - 1; |
1658 | unsigned int start = div64_ul(ppos + element_size - 1, element_size); |
1659 | unsigned int end = div64_ul(ppos + count - 1, element_size); |
1660 | ssize_t s = 0; |
1661 | struct ras_badpage *bps = NULL; |
1662 | unsigned int bps_count = 0; |
1663 | |
1664 | memset(buf, 0, count); |
1665 | |
1666 | if (amdgpu_ras_badpages_read(adev, bps: &bps, count: &bps_count)) |
1667 | return 0; |
1668 | |
1669 | for (; start < end && start < bps_count; start++) |
1670 | s += scnprintf(buf: &buf[s], size: element_size + 1, |
1671 | fmt: "0x%08x : 0x%08x : %1s\n" , |
1672 | bps[start].bp, |
1673 | bps[start].size, |
1674 | amdgpu_ras_badpage_flags_str(flags: bps[start].flags)); |
1675 | |
1676 | kfree(objp: bps); |
1677 | |
1678 | return s; |
1679 | } |
1680 | |
1681 | static ssize_t amdgpu_ras_sysfs_features_read(struct device *dev, |
1682 | struct device_attribute *attr, char *buf) |
1683 | { |
1684 | struct amdgpu_ras *con = |
1685 | container_of(attr, struct amdgpu_ras, features_attr); |
1686 | |
1687 | return sysfs_emit(buf, fmt: "feature mask: 0x%x\n" , con->features); |
1688 | } |
1689 | |
1690 | static ssize_t amdgpu_ras_sysfs_version_show(struct device *dev, |
1691 | struct device_attribute *attr, char *buf) |
1692 | { |
1693 | struct amdgpu_ras *con = |
1694 | container_of(attr, struct amdgpu_ras, version_attr); |
1695 | return sysfs_emit(buf, fmt: "table version: 0x%x\n" , con->eeprom_control.tbl_hdr.version); |
1696 | } |
1697 | |
1698 | static ssize_t amdgpu_ras_sysfs_schema_show(struct device *dev, |
1699 | struct device_attribute *attr, char *buf) |
1700 | { |
1701 | struct amdgpu_ras *con = |
1702 | container_of(attr, struct amdgpu_ras, schema_attr); |
1703 | return sysfs_emit(buf, fmt: "schema: 0x%x\n" , con->schema); |
1704 | } |
1705 | |
1706 | static void amdgpu_ras_sysfs_remove_bad_page_node(struct amdgpu_device *adev) |
1707 | { |
1708 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
1709 | |
1710 | if (adev->dev->kobj.sd) |
1711 | sysfs_remove_file_from_group(kobj: &adev->dev->kobj, |
1712 | attr: &con->badpages_attr.attr, |
1713 | group: RAS_FS_NAME); |
1714 | } |
1715 | |
1716 | static int amdgpu_ras_sysfs_remove_dev_attr_node(struct amdgpu_device *adev) |
1717 | { |
1718 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
1719 | struct attribute *attrs[] = { |
1720 | &con->features_attr.attr, |
1721 | &con->version_attr.attr, |
1722 | &con->schema_attr.attr, |
1723 | NULL |
1724 | }; |
1725 | struct attribute_group group = { |
1726 | .name = RAS_FS_NAME, |
1727 | .attrs = attrs, |
1728 | }; |
1729 | |
1730 | if (adev->dev->kobj.sd) |
1731 | sysfs_remove_group(kobj: &adev->dev->kobj, grp: &group); |
1732 | |
1733 | return 0; |
1734 | } |
1735 | |
1736 | int amdgpu_ras_sysfs_create(struct amdgpu_device *adev, |
1737 | struct ras_common_if *head) |
1738 | { |
1739 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); |
1740 | |
1741 | if (!obj || obj->attr_inuse) |
1742 | return -EINVAL; |
1743 | |
1744 | get_obj(obj); |
1745 | |
1746 | snprintf(buf: obj->fs_data.sysfs_name, size: sizeof(obj->fs_data.sysfs_name), |
1747 | fmt: "%s_err_count" , head->name); |
1748 | |
1749 | obj->sysfs_attr = (struct device_attribute){ |
1750 | .attr = { |
1751 | .name = obj->fs_data.sysfs_name, |
1752 | .mode = S_IRUGO, |
1753 | }, |
1754 | .show = amdgpu_ras_sysfs_read, |
1755 | }; |
1756 | sysfs_attr_init(&obj->sysfs_attr.attr); |
1757 | |
1758 | if (sysfs_add_file_to_group(kobj: &adev->dev->kobj, |
1759 | attr: &obj->sysfs_attr.attr, |
1760 | group: RAS_FS_NAME)) { |
1761 | put_obj(obj); |
1762 | return -EINVAL; |
1763 | } |
1764 | |
1765 | obj->attr_inuse = 1; |
1766 | |
1767 | return 0; |
1768 | } |
1769 | |
1770 | int amdgpu_ras_sysfs_remove(struct amdgpu_device *adev, |
1771 | struct ras_common_if *head) |
1772 | { |
1773 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); |
1774 | |
1775 | if (!obj || !obj->attr_inuse) |
1776 | return -EINVAL; |
1777 | |
1778 | if (adev->dev->kobj.sd) |
1779 | sysfs_remove_file_from_group(kobj: &adev->dev->kobj, |
1780 | attr: &obj->sysfs_attr.attr, |
1781 | group: RAS_FS_NAME); |
1782 | obj->attr_inuse = 0; |
1783 | put_obj(obj); |
1784 | |
1785 | return 0; |
1786 | } |
1787 | |
1788 | static int amdgpu_ras_sysfs_remove_all(struct amdgpu_device *adev) |
1789 | { |
1790 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
1791 | struct ras_manager *obj, *tmp; |
1792 | |
1793 | list_for_each_entry_safe(obj, tmp, &con->head, node) { |
1794 | amdgpu_ras_sysfs_remove(adev, head: &obj->head); |
1795 | } |
1796 | |
1797 | if (amdgpu_bad_page_threshold != 0) |
1798 | amdgpu_ras_sysfs_remove_bad_page_node(adev); |
1799 | |
1800 | amdgpu_ras_sysfs_remove_dev_attr_node(adev); |
1801 | |
1802 | return 0; |
1803 | } |
1804 | /* sysfs end */ |
1805 | |
1806 | /** |
1807 | * DOC: AMDGPU RAS Reboot Behavior for Unrecoverable Errors |
1808 | * |
1809 | * Normally when there is an uncorrectable error, the driver will reset |
1810 | * the GPU to recover. However, in the event of an unrecoverable error, |
1811 | * the driver provides an interface to reboot the system automatically |
1812 | * in that event. |
1813 | * |
1814 | * The following file in debugfs provides that interface: |
1815 | * /sys/kernel/debug/dri/[0/1/2...]/ras/auto_reboot |
1816 | * |
1817 | * Usage: |
1818 | * |
1819 | * .. code-block:: bash |
1820 | * |
1821 | * echo true > .../ras/auto_reboot |
1822 | * |
1823 | */ |
1824 | /* debugfs begin */ |
1825 | static struct dentry *amdgpu_ras_debugfs_create_ctrl_node(struct amdgpu_device *adev) |
1826 | { |
1827 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
1828 | struct amdgpu_ras_eeprom_control *eeprom = &con->eeprom_control; |
1829 | struct drm_minor *minor = adev_to_drm(adev)->primary; |
1830 | struct dentry *dir; |
1831 | |
1832 | dir = debugfs_create_dir(name: RAS_FS_NAME, parent: minor->debugfs_root); |
1833 | debugfs_create_file(name: "ras_ctrl" , S_IWUGO | S_IRUGO, parent: dir, data: adev, |
1834 | fops: &amdgpu_ras_debugfs_ctrl_ops); |
1835 | debugfs_create_file(name: "ras_eeprom_reset" , S_IWUGO | S_IRUGO, parent: dir, data: adev, |
1836 | fops: &amdgpu_ras_debugfs_eeprom_ops); |
1837 | debugfs_create_u32(name: "bad_page_cnt_threshold" , mode: 0444, parent: dir, |
1838 | value: &con->bad_page_cnt_threshold); |
1839 | debugfs_create_u32(name: "ras_num_recs" , mode: 0444, parent: dir, value: &eeprom->ras_num_recs); |
1840 | debugfs_create_x32(name: "ras_hw_enabled" , mode: 0444, parent: dir, value: &adev->ras_hw_enabled); |
1841 | debugfs_create_x32(name: "ras_enabled" , mode: 0444, parent: dir, value: &adev->ras_enabled); |
1842 | debugfs_create_file(name: "ras_eeprom_size" , S_IRUGO, parent: dir, data: adev, |
1843 | fops: &amdgpu_ras_debugfs_eeprom_size_ops); |
1844 | con->de_ras_eeprom_table = debugfs_create_file(name: "ras_eeprom_table" , |
1845 | S_IRUGO, parent: dir, data: adev, |
1846 | fops: &amdgpu_ras_debugfs_eeprom_table_ops); |
1847 | amdgpu_ras_debugfs_set_ret_size(control: &con->eeprom_control); |
1848 | |
1849 | /* |
1850 | * After one uncorrectable error happens, usually GPU recovery will |
1851 | * be scheduled. But due to the known problem in GPU recovery failing |
1852 | * to bring GPU back, below interface provides one direct way to |
1853 | * user to reboot system automatically in such case within |
1854 | * ERREVENT_ATHUB_INTERRUPT generated. Normal GPU recovery routine |
1855 | * will never be called. |
1856 | */ |
1857 | debugfs_create_bool(name: "auto_reboot" , S_IWUGO | S_IRUGO, parent: dir, value: &con->reboot); |
1858 | |
1859 | /* |
1860 | * User could set this not to clean up hardware's error count register |
1861 | * of RAS IPs during ras recovery. |
1862 | */ |
1863 | debugfs_create_bool(name: "disable_ras_err_cnt_harvest" , mode: 0644, parent: dir, |
1864 | value: &con->disable_ras_err_cnt_harvest); |
1865 | return dir; |
1866 | } |
1867 | |
1868 | static void amdgpu_ras_debugfs_create(struct amdgpu_device *adev, |
1869 | struct ras_fs_if *head, |
1870 | struct dentry *dir) |
1871 | { |
1872 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head: &head->head); |
1873 | |
1874 | if (!obj || !dir) |
1875 | return; |
1876 | |
1877 | get_obj(obj); |
1878 | |
1879 | memcpy(obj->fs_data.debugfs_name, |
1880 | head->debugfs_name, |
1881 | sizeof(obj->fs_data.debugfs_name)); |
1882 | |
1883 | debugfs_create_file(name: obj->fs_data.debugfs_name, S_IWUGO | S_IRUGO, parent: dir, |
1884 | data: obj, fops: &amdgpu_ras_debugfs_ops); |
1885 | } |
1886 | |
1887 | void amdgpu_ras_debugfs_create_all(struct amdgpu_device *adev) |
1888 | { |
1889 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
1890 | struct dentry *dir; |
1891 | struct ras_manager *obj; |
1892 | struct ras_fs_if fs_info; |
1893 | |
1894 | /* |
1895 | * it won't be called in resume path, no need to check |
1896 | * suspend and gpu reset status |
1897 | */ |
1898 | if (!IS_ENABLED(CONFIG_DEBUG_FS) || !con) |
1899 | return; |
1900 | |
1901 | dir = amdgpu_ras_debugfs_create_ctrl_node(adev); |
1902 | |
1903 | list_for_each_entry(obj, &con->head, node) { |
1904 | if (amdgpu_ras_is_supported(adev, block: obj->head.block) && |
1905 | (obj->attr_inuse == 1)) { |
1906 | sprintf(buf: fs_info.debugfs_name, fmt: "%s_err_inject" , |
1907 | get_ras_block_str(ras_block: &obj->head)); |
1908 | fs_info.head = obj->head; |
1909 | amdgpu_ras_debugfs_create(adev, head: &fs_info, dir); |
1910 | } |
1911 | } |
1912 | |
1913 | if (amdgpu_aca_is_enabled(adev)) |
1914 | amdgpu_aca_smu_debugfs_init(adev, root: dir); |
1915 | else |
1916 | amdgpu_mca_smu_debugfs_init(adev, root: dir); |
1917 | } |
1918 | |
1919 | /* debugfs end */ |
1920 | |
1921 | /* ras fs */ |
1922 | static BIN_ATTR(gpu_vram_bad_pages, S_IRUGO, |
1923 | amdgpu_ras_sysfs_badpages_read, NULL, 0); |
1924 | static DEVICE_ATTR(features, S_IRUGO, |
1925 | amdgpu_ras_sysfs_features_read, NULL); |
1926 | static DEVICE_ATTR(version, 0444, |
1927 | amdgpu_ras_sysfs_version_show, NULL); |
1928 | static DEVICE_ATTR(schema, 0444, |
1929 | amdgpu_ras_sysfs_schema_show, NULL); |
1930 | static int amdgpu_ras_fs_init(struct amdgpu_device *adev) |
1931 | { |
1932 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
1933 | struct attribute_group group = { |
1934 | .name = RAS_FS_NAME, |
1935 | }; |
1936 | struct attribute *attrs[] = { |
1937 | &con->features_attr.attr, |
1938 | &con->version_attr.attr, |
1939 | &con->schema_attr.attr, |
1940 | NULL |
1941 | }; |
1942 | struct bin_attribute *bin_attrs[] = { |
1943 | NULL, |
1944 | NULL, |
1945 | }; |
1946 | int r; |
1947 | |
1948 | group.attrs = attrs; |
1949 | |
1950 | /* add features entry */ |
1951 | con->features_attr = dev_attr_features; |
1952 | sysfs_attr_init(attrs[0]); |
1953 | |
1954 | /* add version entry */ |
1955 | con->version_attr = dev_attr_version; |
1956 | sysfs_attr_init(attrs[1]); |
1957 | |
1958 | /* add schema entry */ |
1959 | con->schema_attr = dev_attr_schema; |
1960 | sysfs_attr_init(attrs[2]); |
1961 | |
1962 | if (amdgpu_bad_page_threshold != 0) { |
1963 | /* add bad_page_features entry */ |
1964 | bin_attr_gpu_vram_bad_pages.private = NULL; |
1965 | con->badpages_attr = bin_attr_gpu_vram_bad_pages; |
1966 | bin_attrs[0] = &con->badpages_attr; |
1967 | group.bin_attrs = bin_attrs; |
1968 | sysfs_bin_attr_init(bin_attrs[0]); |
1969 | } |
1970 | |
1971 | r = sysfs_create_group(kobj: &adev->dev->kobj, grp: &group); |
1972 | if (r) |
1973 | dev_err(adev->dev, "Failed to create RAS sysfs group!" ); |
1974 | |
1975 | return 0; |
1976 | } |
1977 | |
1978 | static int amdgpu_ras_fs_fini(struct amdgpu_device *adev) |
1979 | { |
1980 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
1981 | struct ras_manager *con_obj, *ip_obj, *tmp; |
1982 | |
1983 | if (IS_ENABLED(CONFIG_DEBUG_FS)) { |
1984 | list_for_each_entry_safe(con_obj, tmp, &con->head, node) { |
1985 | ip_obj = amdgpu_ras_find_obj(adev, head: &con_obj->head); |
1986 | if (ip_obj) |
1987 | put_obj(obj: ip_obj); |
1988 | } |
1989 | } |
1990 | |
1991 | amdgpu_ras_sysfs_remove_all(adev); |
1992 | return 0; |
1993 | } |
1994 | /* ras fs end */ |
1995 | |
1996 | /* ih begin */ |
1997 | |
1998 | /* For the hardware that cannot enable bif ring for both ras_controller_irq |
1999 | * and ras_err_evnet_athub_irq ih cookies, the driver has to poll status |
2000 | * register to check whether the interrupt is triggered or not, and properly |
2001 | * ack the interrupt if it is there |
2002 | */ |
2003 | void amdgpu_ras_interrupt_fatal_error_handler(struct amdgpu_device *adev) |
2004 | { |
2005 | /* Fatal error events are handled on host side */ |
2006 | if (amdgpu_sriov_vf(adev)) |
2007 | return; |
2008 | |
2009 | if (adev->nbio.ras && |
2010 | adev->nbio.ras->handle_ras_controller_intr_no_bifring) |
2011 | adev->nbio.ras->handle_ras_controller_intr_no_bifring(adev); |
2012 | |
2013 | if (adev->nbio.ras && |
2014 | adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring) |
2015 | adev->nbio.ras->handle_ras_err_event_athub_intr_no_bifring(adev); |
2016 | } |
2017 | |
2018 | static void amdgpu_ras_interrupt_poison_consumption_handler(struct ras_manager *obj, |
2019 | struct amdgpu_iv_entry *entry) |
2020 | { |
2021 | bool poison_stat = false; |
2022 | struct amdgpu_device *adev = obj->adev; |
2023 | struct amdgpu_ras_block_object *block_obj = |
2024 | amdgpu_ras_get_ras_block(adev, block: obj->head.block, sub_block_index: 0); |
2025 | |
2026 | if (!block_obj) |
2027 | return; |
2028 | |
2029 | /* both query_poison_status and handle_poison_consumption are optional, |
2030 | * but at least one of them should be implemented if we need poison |
2031 | * consumption handler |
2032 | */ |
2033 | if (block_obj->hw_ops && block_obj->hw_ops->query_poison_status) { |
2034 | poison_stat = block_obj->hw_ops->query_poison_status(adev); |
2035 | if (!poison_stat) { |
2036 | /* Not poison consumption interrupt, no need to handle it */ |
2037 | dev_info(adev->dev, "No RAS poison status in %s poison IH.\n" , |
2038 | block_obj->ras_comm.name); |
2039 | |
2040 | return; |
2041 | } |
2042 | } |
2043 | |
2044 | amdgpu_umc_poison_handler(adev, block: obj->head.block, reset: false); |
2045 | |
2046 | if (block_obj->hw_ops && block_obj->hw_ops->handle_poison_consumption) |
2047 | poison_stat = block_obj->hw_ops->handle_poison_consumption(adev); |
2048 | |
2049 | /* gpu reset is fallback for failed and default cases */ |
2050 | if (poison_stat) { |
2051 | dev_info(adev->dev, "GPU reset for %s RAS poison consumption is issued!\n" , |
2052 | block_obj->ras_comm.name); |
2053 | amdgpu_ras_reset_gpu(adev); |
2054 | } else { |
2055 | amdgpu_gfx_poison_consumption_handler(adev, entry); |
2056 | } |
2057 | } |
2058 | |
2059 | static void amdgpu_ras_interrupt_poison_creation_handler(struct ras_manager *obj, |
2060 | struct amdgpu_iv_entry *entry) |
2061 | { |
2062 | dev_info(obj->adev->dev, |
2063 | "Poison is created\n" ); |
2064 | } |
2065 | |
2066 | static void amdgpu_ras_interrupt_umc_handler(struct ras_manager *obj, |
2067 | struct amdgpu_iv_entry *entry) |
2068 | { |
2069 | struct ras_ih_data *data = &obj->ih_data; |
2070 | struct ras_err_data err_data; |
2071 | int ret; |
2072 | |
2073 | if (!data->cb) |
2074 | return; |
2075 | |
2076 | ret = amdgpu_ras_error_data_init(err_data: &err_data); |
2077 | if (ret) |
2078 | return; |
2079 | |
2080 | /* Let IP handle its data, maybe we need get the output |
2081 | * from the callback to update the error type/count, etc |
2082 | */ |
2083 | ret = data->cb(obj->adev, &err_data, entry); |
2084 | /* ue will trigger an interrupt, and in that case |
2085 | * we need do a reset to recovery the whole system. |
2086 | * But leave IP do that recovery, here we just dispatch |
2087 | * the error. |
2088 | */ |
2089 | if (ret == AMDGPU_RAS_SUCCESS) { |
2090 | /* these counts could be left as 0 if |
2091 | * some blocks do not count error number |
2092 | */ |
2093 | obj->err_data.ue_count += err_data.ue_count; |
2094 | obj->err_data.ce_count += err_data.ce_count; |
2095 | obj->err_data.de_count += err_data.de_count; |
2096 | } |
2097 | |
2098 | amdgpu_ras_error_data_fini(err_data: &err_data); |
2099 | } |
2100 | |
2101 | static void amdgpu_ras_interrupt_handler(struct ras_manager *obj) |
2102 | { |
2103 | struct ras_ih_data *data = &obj->ih_data; |
2104 | struct amdgpu_iv_entry entry; |
2105 | |
2106 | while (data->rptr != data->wptr) { |
2107 | rmb(); |
2108 | memcpy(&entry, &data->ring[data->rptr], |
2109 | data->element_size); |
2110 | |
2111 | wmb(); |
2112 | data->rptr = (data->aligned_element_size + |
2113 | data->rptr) % data->ring_size; |
2114 | |
2115 | if (amdgpu_ras_is_poison_mode_supported(adev: obj->adev)) { |
2116 | if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) |
2117 | amdgpu_ras_interrupt_poison_creation_handler(obj, entry: &entry); |
2118 | else |
2119 | amdgpu_ras_interrupt_poison_consumption_handler(obj, entry: &entry); |
2120 | } else { |
2121 | if (obj->head.block == AMDGPU_RAS_BLOCK__UMC) |
2122 | amdgpu_ras_interrupt_umc_handler(obj, entry: &entry); |
2123 | else |
2124 | dev_warn(obj->adev->dev, |
2125 | "No RAS interrupt handler for non-UMC block with poison disabled.\n" ); |
2126 | } |
2127 | } |
2128 | } |
2129 | |
2130 | static void amdgpu_ras_interrupt_process_handler(struct work_struct *work) |
2131 | { |
2132 | struct ras_ih_data *data = |
2133 | container_of(work, struct ras_ih_data, ih_work); |
2134 | struct ras_manager *obj = |
2135 | container_of(data, struct ras_manager, ih_data); |
2136 | |
2137 | amdgpu_ras_interrupt_handler(obj); |
2138 | } |
2139 | |
2140 | int amdgpu_ras_interrupt_dispatch(struct amdgpu_device *adev, |
2141 | struct ras_dispatch_if *info) |
2142 | { |
2143 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head: &info->head); |
2144 | struct ras_ih_data *data = &obj->ih_data; |
2145 | |
2146 | if (!obj) |
2147 | return -EINVAL; |
2148 | |
2149 | if (data->inuse == 0) |
2150 | return 0; |
2151 | |
2152 | /* Might be overflow... */ |
2153 | memcpy(&data->ring[data->wptr], info->entry, |
2154 | data->element_size); |
2155 | |
2156 | wmb(); |
2157 | data->wptr = (data->aligned_element_size + |
2158 | data->wptr) % data->ring_size; |
2159 | |
2160 | schedule_work(work: &data->ih_work); |
2161 | |
2162 | return 0; |
2163 | } |
2164 | |
2165 | int amdgpu_ras_interrupt_remove_handler(struct amdgpu_device *adev, |
2166 | struct ras_common_if *head) |
2167 | { |
2168 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); |
2169 | struct ras_ih_data *data; |
2170 | |
2171 | if (!obj) |
2172 | return -EINVAL; |
2173 | |
2174 | data = &obj->ih_data; |
2175 | if (data->inuse == 0) |
2176 | return 0; |
2177 | |
2178 | cancel_work_sync(work: &data->ih_work); |
2179 | |
2180 | kfree(objp: data->ring); |
2181 | memset(data, 0, sizeof(*data)); |
2182 | put_obj(obj); |
2183 | |
2184 | return 0; |
2185 | } |
2186 | |
2187 | int amdgpu_ras_interrupt_add_handler(struct amdgpu_device *adev, |
2188 | struct ras_common_if *head) |
2189 | { |
2190 | struct ras_manager *obj = amdgpu_ras_find_obj(adev, head); |
2191 | struct ras_ih_data *data; |
2192 | struct amdgpu_ras_block_object *ras_obj; |
2193 | |
2194 | if (!obj) { |
2195 | /* in case we registe the IH before enable ras feature */ |
2196 | obj = amdgpu_ras_create_obj(adev, head); |
2197 | if (!obj) |
2198 | return -EINVAL; |
2199 | } else |
2200 | get_obj(obj); |
2201 | |
2202 | ras_obj = container_of(head, struct amdgpu_ras_block_object, ras_comm); |
2203 | |
2204 | data = &obj->ih_data; |
2205 | /* add the callback.etc */ |
2206 | *data = (struct ras_ih_data) { |
2207 | .inuse = 0, |
2208 | .cb = ras_obj->ras_cb, |
2209 | .element_size = sizeof(struct amdgpu_iv_entry), |
2210 | .rptr = 0, |
2211 | .wptr = 0, |
2212 | }; |
2213 | |
2214 | INIT_WORK(&data->ih_work, amdgpu_ras_interrupt_process_handler); |
2215 | |
2216 | data->aligned_element_size = ALIGN(data->element_size, 8); |
2217 | /* the ring can store 64 iv entries. */ |
2218 | data->ring_size = 64 * data->aligned_element_size; |
2219 | data->ring = kmalloc(size: data->ring_size, GFP_KERNEL); |
2220 | if (!data->ring) { |
2221 | put_obj(obj); |
2222 | return -ENOMEM; |
2223 | } |
2224 | |
2225 | /* IH is ready */ |
2226 | data->inuse = 1; |
2227 | |
2228 | return 0; |
2229 | } |
2230 | |
2231 | static int amdgpu_ras_interrupt_remove_all(struct amdgpu_device *adev) |
2232 | { |
2233 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
2234 | struct ras_manager *obj, *tmp; |
2235 | |
2236 | list_for_each_entry_safe(obj, tmp, &con->head, node) { |
2237 | amdgpu_ras_interrupt_remove_handler(adev, head: &obj->head); |
2238 | } |
2239 | |
2240 | return 0; |
2241 | } |
2242 | /* ih end */ |
2243 | |
2244 | /* traversal all IPs except NBIO to query error counter */ |
2245 | static void amdgpu_ras_log_on_err_counter(struct amdgpu_device *adev) |
2246 | { |
2247 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
2248 | struct ras_manager *obj; |
2249 | |
2250 | if (!adev->ras_enabled || !con) |
2251 | return; |
2252 | |
2253 | list_for_each_entry(obj, &con->head, node) { |
2254 | struct ras_query_if info = { |
2255 | .head = obj->head, |
2256 | }; |
2257 | |
2258 | /* |
2259 | * PCIE_BIF IP has one different isr by ras controller |
2260 | * interrupt, the specific ras counter query will be |
2261 | * done in that isr. So skip such block from common |
2262 | * sync flood interrupt isr calling. |
2263 | */ |
2264 | if (info.head.block == AMDGPU_RAS_BLOCK__PCIE_BIF) |
2265 | continue; |
2266 | |
2267 | /* |
2268 | * this is a workaround for aldebaran, skip send msg to |
2269 | * smu to get ecc_info table due to smu handle get ecc |
2270 | * info table failed temporarily. |
2271 | * should be removed until smu fix handle ecc_info table. |
2272 | */ |
2273 | if ((info.head.block == AMDGPU_RAS_BLOCK__UMC) && |
2274 | (amdgpu_ip_version(adev, ip: MP1_HWIP, inst: 0) == |
2275 | IP_VERSION(13, 0, 2))) |
2276 | continue; |
2277 | |
2278 | amdgpu_ras_query_error_status(adev, info: &info); |
2279 | |
2280 | if (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) != |
2281 | IP_VERSION(11, 0, 2) && |
2282 | amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) != |
2283 | IP_VERSION(11, 0, 4) && |
2284 | amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0) != |
2285 | IP_VERSION(13, 0, 0)) { |
2286 | if (amdgpu_ras_reset_error_status(adev, block: info.head.block)) |
2287 | dev_warn(adev->dev, "Failed to reset error counter and error status" ); |
2288 | } |
2289 | } |
2290 | } |
2291 | |
2292 | /* Parse RdRspStatus and WrRspStatus */ |
2293 | static void amdgpu_ras_error_status_query(struct amdgpu_device *adev, |
2294 | struct ras_query_if *info) |
2295 | { |
2296 | struct amdgpu_ras_block_object *block_obj; |
2297 | /* |
2298 | * Only two block need to query read/write |
2299 | * RspStatus at current state |
2300 | */ |
2301 | if ((info->head.block != AMDGPU_RAS_BLOCK__GFX) && |
2302 | (info->head.block != AMDGPU_RAS_BLOCK__MMHUB)) |
2303 | return; |
2304 | |
2305 | block_obj = amdgpu_ras_get_ras_block(adev, |
2306 | block: info->head.block, |
2307 | sub_block_index: info->head.sub_block_index); |
2308 | |
2309 | if (!block_obj || !block_obj->hw_ops) { |
2310 | dev_dbg_once(adev->dev, "%s doesn't config RAS function\n" , |
2311 | get_ras_block_str(&info->head)); |
2312 | return; |
2313 | } |
2314 | |
2315 | if (block_obj->hw_ops->query_ras_error_status) |
2316 | block_obj->hw_ops->query_ras_error_status(adev); |
2317 | |
2318 | } |
2319 | |
2320 | static void amdgpu_ras_query_err_status(struct amdgpu_device *adev) |
2321 | { |
2322 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
2323 | struct ras_manager *obj; |
2324 | |
2325 | if (!adev->ras_enabled || !con) |
2326 | return; |
2327 | |
2328 | list_for_each_entry(obj, &con->head, node) { |
2329 | struct ras_query_if info = { |
2330 | .head = obj->head, |
2331 | }; |
2332 | |
2333 | amdgpu_ras_error_status_query(adev, info: &info); |
2334 | } |
2335 | } |
2336 | |
2337 | /* recovery begin */ |
2338 | |
2339 | /* return 0 on success. |
2340 | * caller need free bps. |
2341 | */ |
2342 | static int amdgpu_ras_badpages_read(struct amdgpu_device *adev, |
2343 | struct ras_badpage **bps, unsigned int *count) |
2344 | { |
2345 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
2346 | struct ras_err_handler_data *data; |
2347 | int i = 0; |
2348 | int ret = 0, status; |
2349 | |
2350 | if (!con || !con->eh_data || !bps || !count) |
2351 | return -EINVAL; |
2352 | |
2353 | mutex_lock(&con->recovery_lock); |
2354 | data = con->eh_data; |
2355 | if (!data || data->count == 0) { |
2356 | *bps = NULL; |
2357 | ret = -EINVAL; |
2358 | goto out; |
2359 | } |
2360 | |
2361 | *bps = kmalloc(size: sizeof(struct ras_badpage) * data->count, GFP_KERNEL); |
2362 | if (!*bps) { |
2363 | ret = -ENOMEM; |
2364 | goto out; |
2365 | } |
2366 | |
2367 | for (; i < data->count; i++) { |
2368 | (*bps)[i] = (struct ras_badpage){ |
2369 | .bp = data->bps[i].retired_page, |
2370 | .size = AMDGPU_GPU_PAGE_SIZE, |
2371 | .flags = AMDGPU_RAS_RETIRE_PAGE_RESERVED, |
2372 | }; |
2373 | status = amdgpu_vram_mgr_query_page_status(mgr: &adev->mman.vram_mgr, |
2374 | start: data->bps[i].retired_page); |
2375 | if (status == -EBUSY) |
2376 | (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_PENDING; |
2377 | else if (status == -ENOENT) |
2378 | (*bps)[i].flags = AMDGPU_RAS_RETIRE_PAGE_FAULT; |
2379 | } |
2380 | |
2381 | *count = data->count; |
2382 | out: |
2383 | mutex_unlock(lock: &con->recovery_lock); |
2384 | return ret; |
2385 | } |
2386 | |
2387 | static void amdgpu_ras_do_recovery(struct work_struct *work) |
2388 | { |
2389 | struct amdgpu_ras *ras = |
2390 | container_of(work, struct amdgpu_ras, recovery_work); |
2391 | struct amdgpu_device *remote_adev = NULL; |
2392 | struct amdgpu_device *adev = ras->adev; |
2393 | struct list_head device_list, *device_list_handle = NULL; |
2394 | struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); |
2395 | |
2396 | if (hive) |
2397 | atomic_set(v: &hive->ras_recovery, i: 1); |
2398 | if (!ras->disable_ras_err_cnt_harvest) { |
2399 | |
2400 | /* Build list of devices to query RAS related errors */ |
2401 | if (hive && adev->gmc.xgmi.num_physical_nodes > 1) { |
2402 | device_list_handle = &hive->device_list; |
2403 | } else { |
2404 | INIT_LIST_HEAD(list: &device_list); |
2405 | list_add_tail(new: &adev->gmc.xgmi.head, head: &device_list); |
2406 | device_list_handle = &device_list; |
2407 | } |
2408 | |
2409 | list_for_each_entry(remote_adev, |
2410 | device_list_handle, gmc.xgmi.head) { |
2411 | amdgpu_ras_query_err_status(adev: remote_adev); |
2412 | amdgpu_ras_log_on_err_counter(adev: remote_adev); |
2413 | } |
2414 | |
2415 | } |
2416 | |
2417 | if (amdgpu_device_should_recover_gpu(adev: ras->adev)) { |
2418 | struct amdgpu_reset_context reset_context; |
2419 | memset(&reset_context, 0, sizeof(reset_context)); |
2420 | |
2421 | reset_context.method = AMD_RESET_METHOD_NONE; |
2422 | reset_context.reset_req_dev = adev; |
2423 | |
2424 | /* Perform full reset in fatal error mode */ |
2425 | if (!amdgpu_ras_is_poison_mode_supported(adev: ras->adev)) |
2426 | set_bit(nr: AMDGPU_NEED_FULL_RESET, addr: &reset_context.flags); |
2427 | else { |
2428 | clear_bit(nr: AMDGPU_NEED_FULL_RESET, addr: &reset_context.flags); |
2429 | |
2430 | if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE2_RESET) { |
2431 | ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE2_RESET; |
2432 | reset_context.method = AMD_RESET_METHOD_MODE2; |
2433 | } |
2434 | |
2435 | /* Fatal error occurs in poison mode, mode1 reset is used to |
2436 | * recover gpu. |
2437 | */ |
2438 | if (ras->gpu_reset_flags & AMDGPU_RAS_GPU_RESET_MODE1_RESET) { |
2439 | ras->gpu_reset_flags &= ~AMDGPU_RAS_GPU_RESET_MODE1_RESET; |
2440 | set_bit(nr: AMDGPU_NEED_FULL_RESET, addr: &reset_context.flags); |
2441 | |
2442 | /* For any RAS error that needs a full reset to |
2443 | * recover, set the fatal error status |
2444 | */ |
2445 | if (hive) { |
2446 | list_for_each_entry(remote_adev, |
2447 | &hive->device_list, |
2448 | gmc.xgmi.head) |
2449 | amdgpu_ras_set_fed(adev: remote_adev, |
2450 | status: true); |
2451 | } else { |
2452 | amdgpu_ras_set_fed(adev, status: true); |
2453 | } |
2454 | psp_fatal_error_recovery_quirk(&adev->psp); |
2455 | } |
2456 | } |
2457 | |
2458 | amdgpu_device_gpu_recover(adev: ras->adev, NULL, reset_context: &reset_context); |
2459 | } |
2460 | atomic_set(v: &ras->in_recovery, i: 0); |
2461 | if (hive) { |
2462 | atomic_set(v: &hive->ras_recovery, i: 0); |
2463 | amdgpu_put_xgmi_hive(hive); |
2464 | } |
2465 | } |
2466 | |
2467 | /* alloc/realloc bps array */ |
2468 | static int amdgpu_ras_realloc_eh_data_space(struct amdgpu_device *adev, |
2469 | struct ras_err_handler_data *data, int pages) |
2470 | { |
2471 | unsigned int old_space = data->count + data->space_left; |
2472 | unsigned int new_space = old_space + pages; |
2473 | unsigned int align_space = ALIGN(new_space, 512); |
2474 | void *bps = kmalloc(size: align_space * sizeof(*data->bps), GFP_KERNEL); |
2475 | |
2476 | if (!bps) { |
2477 | return -ENOMEM; |
2478 | } |
2479 | |
2480 | if (data->bps) { |
2481 | memcpy(bps, data->bps, |
2482 | data->count * sizeof(*data->bps)); |
2483 | kfree(objp: data->bps); |
2484 | } |
2485 | |
2486 | data->bps = bps; |
2487 | data->space_left += align_space - old_space; |
2488 | return 0; |
2489 | } |
2490 | |
2491 | /* it deal with vram only. */ |
2492 | int amdgpu_ras_add_bad_pages(struct amdgpu_device *adev, |
2493 | struct eeprom_table_record *bps, int pages) |
2494 | { |
2495 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
2496 | struct ras_err_handler_data *data; |
2497 | int ret = 0; |
2498 | uint32_t i; |
2499 | |
2500 | if (!con || !con->eh_data || !bps || pages <= 0) |
2501 | return 0; |
2502 | |
2503 | mutex_lock(&con->recovery_lock); |
2504 | data = con->eh_data; |
2505 | if (!data) |
2506 | goto out; |
2507 | |
2508 | for (i = 0; i < pages; i++) { |
2509 | if (amdgpu_ras_check_bad_page_unlock(con, |
2510 | addr: bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT)) |
2511 | continue; |
2512 | |
2513 | if (!data->space_left && |
2514 | amdgpu_ras_realloc_eh_data_space(adev, data, pages: 256)) { |
2515 | ret = -ENOMEM; |
2516 | goto out; |
2517 | } |
2518 | |
2519 | amdgpu_vram_mgr_reserve_range(mgr: &adev->mman.vram_mgr, |
2520 | start: bps[i].retired_page << AMDGPU_GPU_PAGE_SHIFT, |
2521 | AMDGPU_GPU_PAGE_SIZE); |
2522 | |
2523 | memcpy(&data->bps[data->count], &bps[i], sizeof(*data->bps)); |
2524 | data->count++; |
2525 | data->space_left--; |
2526 | } |
2527 | out: |
2528 | mutex_unlock(lock: &con->recovery_lock); |
2529 | |
2530 | return ret; |
2531 | } |
2532 | |
2533 | /* |
2534 | * write error record array to eeprom, the function should be |
2535 | * protected by recovery_lock |
2536 | * new_cnt: new added UE count, excluding reserved bad pages, can be NULL |
2537 | */ |
2538 | int amdgpu_ras_save_bad_pages(struct amdgpu_device *adev, |
2539 | unsigned long *new_cnt) |
2540 | { |
2541 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
2542 | struct ras_err_handler_data *data; |
2543 | struct amdgpu_ras_eeprom_control *control; |
2544 | int save_count; |
2545 | |
2546 | if (!con || !con->eh_data) { |
2547 | if (new_cnt) |
2548 | *new_cnt = 0; |
2549 | |
2550 | return 0; |
2551 | } |
2552 | |
2553 | mutex_lock(&con->recovery_lock); |
2554 | control = &con->eeprom_control; |
2555 | data = con->eh_data; |
2556 | save_count = data->count - control->ras_num_recs; |
2557 | mutex_unlock(lock: &con->recovery_lock); |
2558 | |
2559 | if (new_cnt) |
2560 | *new_cnt = save_count / adev->umc.retire_unit; |
2561 | |
2562 | /* only new entries are saved */ |
2563 | if (save_count > 0) { |
2564 | if (amdgpu_ras_eeprom_append(control, |
2565 | records: &data->bps[control->ras_num_recs], |
2566 | num: save_count)) { |
2567 | dev_err(adev->dev, "Failed to save EEPROM table data!" ); |
2568 | return -EIO; |
2569 | } |
2570 | |
2571 | dev_info(adev->dev, "Saved %d pages to EEPROM table.\n" , save_count); |
2572 | } |
2573 | |
2574 | return 0; |
2575 | } |
2576 | |
2577 | /* |
2578 | * read error record array in eeprom and reserve enough space for |
2579 | * storing new bad pages |
2580 | */ |
2581 | static int amdgpu_ras_load_bad_pages(struct amdgpu_device *adev) |
2582 | { |
2583 | struct amdgpu_ras_eeprom_control *control = |
2584 | &adev->psp.ras_context.ras->eeprom_control; |
2585 | struct eeprom_table_record *bps; |
2586 | int ret; |
2587 | |
2588 | /* no bad page record, skip eeprom access */ |
2589 | if (control->ras_num_recs == 0 || amdgpu_bad_page_threshold == 0) |
2590 | return 0; |
2591 | |
2592 | bps = kcalloc(n: control->ras_num_recs, size: sizeof(*bps), GFP_KERNEL); |
2593 | if (!bps) |
2594 | return -ENOMEM; |
2595 | |
2596 | ret = amdgpu_ras_eeprom_read(control, records: bps, num: control->ras_num_recs); |
2597 | if (ret) |
2598 | dev_err(adev->dev, "Failed to load EEPROM table records!" ); |
2599 | else |
2600 | ret = amdgpu_ras_add_bad_pages(adev, bps, pages: control->ras_num_recs); |
2601 | |
2602 | kfree(objp: bps); |
2603 | return ret; |
2604 | } |
2605 | |
2606 | static bool amdgpu_ras_check_bad_page_unlock(struct amdgpu_ras *con, |
2607 | uint64_t addr) |
2608 | { |
2609 | struct ras_err_handler_data *data = con->eh_data; |
2610 | int i; |
2611 | |
2612 | addr >>= AMDGPU_GPU_PAGE_SHIFT; |
2613 | for (i = 0; i < data->count; i++) |
2614 | if (addr == data->bps[i].retired_page) |
2615 | return true; |
2616 | |
2617 | return false; |
2618 | } |
2619 | |
2620 | /* |
2621 | * check if an address belongs to bad page |
2622 | * |
2623 | * Note: this check is only for umc block |
2624 | */ |
2625 | static bool amdgpu_ras_check_bad_page(struct amdgpu_device *adev, |
2626 | uint64_t addr) |
2627 | { |
2628 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
2629 | bool ret = false; |
2630 | |
2631 | if (!con || !con->eh_data) |
2632 | return ret; |
2633 | |
2634 | mutex_lock(&con->recovery_lock); |
2635 | ret = amdgpu_ras_check_bad_page_unlock(con, addr); |
2636 | mutex_unlock(lock: &con->recovery_lock); |
2637 | return ret; |
2638 | } |
2639 | |
2640 | static void amdgpu_ras_validate_threshold(struct amdgpu_device *adev, |
2641 | uint32_t max_count) |
2642 | { |
2643 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
2644 | |
2645 | /* |
2646 | * Justification of value bad_page_cnt_threshold in ras structure |
2647 | * |
2648 | * Generally, 0 <= amdgpu_bad_page_threshold <= max record length |
2649 | * in eeprom or amdgpu_bad_page_threshold == -2, introduce two |
2650 | * scenarios accordingly. |
2651 | * |
2652 | * Bad page retirement enablement: |
2653 | * - If amdgpu_bad_page_threshold = -2, |
2654 | * bad_page_cnt_threshold = typical value by formula. |
2655 | * |
2656 | * - When the value from user is 0 < amdgpu_bad_page_threshold < |
2657 | * max record length in eeprom, use it directly. |
2658 | * |
2659 | * Bad page retirement disablement: |
2660 | * - If amdgpu_bad_page_threshold = 0, bad page retirement |
2661 | * functionality is disabled, and bad_page_cnt_threshold will |
2662 | * take no effect. |
2663 | */ |
2664 | |
2665 | if (amdgpu_bad_page_threshold < 0) { |
2666 | u64 val = adev->gmc.mc_vram_size; |
2667 | |
2668 | do_div(val, RAS_BAD_PAGE_COVER); |
2669 | con->bad_page_cnt_threshold = min(lower_32_bits(val), |
2670 | max_count); |
2671 | } else { |
2672 | con->bad_page_cnt_threshold = min_t(int, max_count, |
2673 | amdgpu_bad_page_threshold); |
2674 | } |
2675 | } |
2676 | |
2677 | static int amdgpu_ras_page_retirement_thread(void *param) |
2678 | { |
2679 | struct amdgpu_device *adev = (struct amdgpu_device *)param; |
2680 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
2681 | |
2682 | while (!kthread_should_stop()) { |
2683 | |
2684 | wait_event_interruptible(con->page_retirement_wq, |
2685 | kthread_should_stop() || |
2686 | atomic_read(&con->page_retirement_req_cnt)); |
2687 | |
2688 | if (kthread_should_stop()) |
2689 | break; |
2690 | |
2691 | dev_info(adev->dev, "Start processing page retirement. request:%d\n" , |
2692 | atomic_read(&con->page_retirement_req_cnt)); |
2693 | |
2694 | atomic_dec(v: &con->page_retirement_req_cnt); |
2695 | |
2696 | amdgpu_umc_bad_page_polling_timeout(adev, |
2697 | reset: false, MAX_UMC_POISON_POLLING_TIME_ASYNC); |
2698 | } |
2699 | |
2700 | return 0; |
2701 | } |
2702 | |
2703 | int amdgpu_ras_recovery_init(struct amdgpu_device *adev) |
2704 | { |
2705 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
2706 | struct ras_err_handler_data **data; |
2707 | u32 max_eeprom_records_count = 0; |
2708 | bool exc_err_limit = false; |
2709 | int ret; |
2710 | |
2711 | if (!con || amdgpu_sriov_vf(adev)) |
2712 | return 0; |
2713 | |
2714 | /* Allow access to RAS EEPROM via debugfs, when the ASIC |
2715 | * supports RAS and debugfs is enabled, but when |
2716 | * adev->ras_enabled is unset, i.e. when "ras_enable" |
2717 | * module parameter is set to 0. |
2718 | */ |
2719 | con->adev = adev; |
2720 | |
2721 | if (!adev->ras_enabled) |
2722 | return 0; |
2723 | |
2724 | data = &con->eh_data; |
2725 | *data = kzalloc(size: sizeof(**data), GFP_KERNEL); |
2726 | if (!*data) { |
2727 | ret = -ENOMEM; |
2728 | goto out; |
2729 | } |
2730 | |
2731 | mutex_init(&con->recovery_lock); |
2732 | INIT_WORK(&con->recovery_work, amdgpu_ras_do_recovery); |
2733 | atomic_set(v: &con->in_recovery, i: 0); |
2734 | con->eeprom_control.bad_channel_bitmap = 0; |
2735 | |
2736 | max_eeprom_records_count = amdgpu_ras_eeprom_max_record_count(control: &con->eeprom_control); |
2737 | amdgpu_ras_validate_threshold(adev, max_count: max_eeprom_records_count); |
2738 | |
2739 | /* Todo: During test the SMU might fail to read the eeprom through I2C |
2740 | * when the GPU is pending on XGMI reset during probe time |
2741 | * (Mostly after second bus reset), skip it now |
2742 | */ |
2743 | if (adev->gmc.xgmi.pending_reset) |
2744 | return 0; |
2745 | ret = amdgpu_ras_eeprom_init(control: &con->eeprom_control, exceed_err_limit: &exc_err_limit); |
2746 | /* |
2747 | * This calling fails when exc_err_limit is true or |
2748 | * ret != 0. |
2749 | */ |
2750 | if (exc_err_limit || ret) |
2751 | goto free; |
2752 | |
2753 | if (con->eeprom_control.ras_num_recs) { |
2754 | ret = amdgpu_ras_load_bad_pages(adev); |
2755 | if (ret) |
2756 | goto free; |
2757 | |
2758 | amdgpu_dpm_send_hbm_bad_pages_num(adev, size: con->eeprom_control.ras_num_recs); |
2759 | |
2760 | if (con->update_channel_flag == true) { |
2761 | amdgpu_dpm_send_hbm_bad_channel_flag(adev, size: con->eeprom_control.bad_channel_bitmap); |
2762 | con->update_channel_flag = false; |
2763 | } |
2764 | } |
2765 | |
2766 | mutex_init(&con->page_retirement_lock); |
2767 | init_waitqueue_head(&con->page_retirement_wq); |
2768 | atomic_set(v: &con->page_retirement_req_cnt, i: 0); |
2769 | con->page_retirement_thread = |
2770 | kthread_run(amdgpu_ras_page_retirement_thread, adev, "umc_page_retirement" ); |
2771 | if (IS_ERR(ptr: con->page_retirement_thread)) { |
2772 | con->page_retirement_thread = NULL; |
2773 | dev_warn(adev->dev, "Failed to create umc_page_retirement thread!!!\n" ); |
2774 | } |
2775 | |
2776 | #ifdef CONFIG_X86_MCE_AMD |
2777 | if ((adev->asic_type == CHIP_ALDEBARAN) && |
2778 | (adev->gmc.xgmi.connected_to_cpu)) |
2779 | amdgpu_register_bad_pages_mca_notifier(adev); |
2780 | #endif |
2781 | return 0; |
2782 | |
2783 | free: |
2784 | kfree(objp: (*data)->bps); |
2785 | kfree(objp: *data); |
2786 | con->eh_data = NULL; |
2787 | out: |
2788 | dev_warn(adev->dev, "Failed to initialize ras recovery! (%d)\n" , ret); |
2789 | |
2790 | /* |
2791 | * Except error threshold exceeding case, other failure cases in this |
2792 | * function would not fail amdgpu driver init. |
2793 | */ |
2794 | if (!exc_err_limit) |
2795 | ret = 0; |
2796 | else |
2797 | ret = -EINVAL; |
2798 | |
2799 | return ret; |
2800 | } |
2801 | |
2802 | static int amdgpu_ras_recovery_fini(struct amdgpu_device *adev) |
2803 | { |
2804 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
2805 | struct ras_err_handler_data *data = con->eh_data; |
2806 | |
2807 | /* recovery_init failed to init it, fini is useless */ |
2808 | if (!data) |
2809 | return 0; |
2810 | |
2811 | if (con->page_retirement_thread) |
2812 | kthread_stop(k: con->page_retirement_thread); |
2813 | |
2814 | atomic_set(v: &con->page_retirement_req_cnt, i: 0); |
2815 | |
2816 | cancel_work_sync(work: &con->recovery_work); |
2817 | |
2818 | mutex_lock(&con->recovery_lock); |
2819 | con->eh_data = NULL; |
2820 | kfree(objp: data->bps); |
2821 | kfree(objp: data); |
2822 | mutex_unlock(lock: &con->recovery_lock); |
2823 | |
2824 | return 0; |
2825 | } |
2826 | /* recovery end */ |
2827 | |
2828 | static bool amdgpu_ras_asic_supported(struct amdgpu_device *adev) |
2829 | { |
2830 | if (amdgpu_sriov_vf(adev)) { |
2831 | switch (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0)) { |
2832 | case IP_VERSION(13, 0, 2): |
2833 | case IP_VERSION(13, 0, 6): |
2834 | return true; |
2835 | default: |
2836 | return false; |
2837 | } |
2838 | } |
2839 | |
2840 | if (adev->asic_type == CHIP_IP_DISCOVERY) { |
2841 | switch (amdgpu_ip_version(adev, ip: MP0_HWIP, inst: 0)) { |
2842 | case IP_VERSION(13, 0, 0): |
2843 | case IP_VERSION(13, 0, 6): |
2844 | case IP_VERSION(13, 0, 10): |
2845 | return true; |
2846 | default: |
2847 | return false; |
2848 | } |
2849 | } |
2850 | |
2851 | return adev->asic_type == CHIP_VEGA10 || |
2852 | adev->asic_type == CHIP_VEGA20 || |
2853 | adev->asic_type == CHIP_ARCTURUS || |
2854 | adev->asic_type == CHIP_ALDEBARAN || |
2855 | adev->asic_type == CHIP_SIENNA_CICHLID; |
2856 | } |
2857 | |
2858 | /* |
2859 | * this is workaround for vega20 workstation sku, |
2860 | * force enable gfx ras, ignore vbios gfx ras flag |
2861 | * due to GC EDC can not write |
2862 | */ |
2863 | static void amdgpu_ras_get_quirks(struct amdgpu_device *adev) |
2864 | { |
2865 | struct atom_context *ctx = adev->mode_info.atom_context; |
2866 | |
2867 | if (!ctx) |
2868 | return; |
2869 | |
2870 | if (strnstr(ctx->vbios_pn, "D16406" , |
2871 | sizeof(ctx->vbios_pn)) || |
2872 | strnstr(ctx->vbios_pn, "D36002" , |
2873 | sizeof(ctx->vbios_pn))) |
2874 | adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX); |
2875 | } |
2876 | |
2877 | /* Query ras capablity via atomfirmware interface */ |
2878 | static void amdgpu_ras_query_ras_capablity_from_vbios(struct amdgpu_device *adev) |
2879 | { |
2880 | /* mem_ecc cap */ |
2881 | if (amdgpu_atomfirmware_mem_ecc_supported(adev)) { |
2882 | dev_info(adev->dev, "MEM ECC is active.\n" ); |
2883 | adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__UMC | |
2884 | 1 << AMDGPU_RAS_BLOCK__DF); |
2885 | } else { |
2886 | dev_info(adev->dev, "MEM ECC is not presented.\n" ); |
2887 | } |
2888 | |
2889 | /* sram_ecc cap */ |
2890 | if (amdgpu_atomfirmware_sram_ecc_supported(adev)) { |
2891 | dev_info(adev->dev, "SRAM ECC is active.\n" ); |
2892 | if (!amdgpu_sriov_vf(adev)) |
2893 | adev->ras_hw_enabled |= ~(1 << AMDGPU_RAS_BLOCK__UMC | |
2894 | 1 << AMDGPU_RAS_BLOCK__DF); |
2895 | else |
2896 | adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__PCIE_BIF | |
2897 | 1 << AMDGPU_RAS_BLOCK__SDMA | |
2898 | 1 << AMDGPU_RAS_BLOCK__GFX); |
2899 | |
2900 | /* |
2901 | * VCN/JPEG RAS can be supported on both bare metal and |
2902 | * SRIOV environment |
2903 | */ |
2904 | if (amdgpu_ip_version(adev, ip: VCN_HWIP, inst: 0) == IP_VERSION(2, 6, 0) || |
2905 | amdgpu_ip_version(adev, ip: VCN_HWIP, inst: 0) == IP_VERSION(4, 0, 0) || |
2906 | amdgpu_ip_version(adev, ip: VCN_HWIP, inst: 0) == IP_VERSION(4, 0, 3)) |
2907 | adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__VCN | |
2908 | 1 << AMDGPU_RAS_BLOCK__JPEG); |
2909 | else |
2910 | adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__VCN | |
2911 | 1 << AMDGPU_RAS_BLOCK__JPEG); |
2912 | |
2913 | /* |
2914 | * XGMI RAS is not supported if xgmi num physical nodes |
2915 | * is zero |
2916 | */ |
2917 | if (!adev->gmc.xgmi.num_physical_nodes) |
2918 | adev->ras_hw_enabled &= ~(1 << AMDGPU_RAS_BLOCK__XGMI_WAFL); |
2919 | } else { |
2920 | dev_info(adev->dev, "SRAM ECC is not presented.\n" ); |
2921 | } |
2922 | } |
2923 | |
2924 | /* Query poison mode from umc/df IP callbacks */ |
2925 | static void amdgpu_ras_query_poison_mode(struct amdgpu_device *adev) |
2926 | { |
2927 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
2928 | bool df_poison, umc_poison; |
2929 | |
2930 | /* poison setting is useless on SRIOV guest */ |
2931 | if (amdgpu_sriov_vf(adev) || !con) |
2932 | return; |
2933 | |
2934 | /* Init poison supported flag, the default value is false */ |
2935 | if (adev->gmc.xgmi.connected_to_cpu || |
2936 | adev->gmc.is_app_apu) { |
2937 | /* enabled by default when GPU is connected to CPU */ |
2938 | con->poison_supported = true; |
2939 | } else if (adev->df.funcs && |
2940 | adev->df.funcs->query_ras_poison_mode && |
2941 | adev->umc.ras && |
2942 | adev->umc.ras->query_ras_poison_mode) { |
2943 | df_poison = |
2944 | adev->df.funcs->query_ras_poison_mode(adev); |
2945 | umc_poison = |
2946 | adev->umc.ras->query_ras_poison_mode(adev); |
2947 | |
2948 | /* Only poison is set in both DF and UMC, we can support it */ |
2949 | if (df_poison && umc_poison) |
2950 | con->poison_supported = true; |
2951 | else if (df_poison != umc_poison) |
2952 | dev_warn(adev->dev, |
2953 | "Poison setting is inconsistent in DF/UMC(%d:%d)!\n" , |
2954 | df_poison, umc_poison); |
2955 | } |
2956 | } |
2957 | |
2958 | /* |
2959 | * check hardware's ras ability which will be saved in hw_supported. |
2960 | * if hardware does not support ras, we can skip some ras initializtion and |
2961 | * forbid some ras operations from IP. |
2962 | * if software itself, say boot parameter, limit the ras ability. We still |
2963 | * need allow IP do some limited operations, like disable. In such case, |
2964 | * we have to initialize ras as normal. but need check if operation is |
2965 | * allowed or not in each function. |
2966 | */ |
2967 | static void amdgpu_ras_check_supported(struct amdgpu_device *adev) |
2968 | { |
2969 | adev->ras_hw_enabled = adev->ras_enabled = 0; |
2970 | |
2971 | if (!amdgpu_ras_asic_supported(adev)) |
2972 | return; |
2973 | |
2974 | /* query ras capability from psp */ |
2975 | if (amdgpu_psp_get_ras_capability(psp: &adev->psp)) |
2976 | goto init_ras_enabled_flag; |
2977 | |
2978 | /* query ras capablity from bios */ |
2979 | if (!adev->gmc.xgmi.connected_to_cpu && !adev->gmc.is_app_apu) { |
2980 | amdgpu_ras_query_ras_capablity_from_vbios(adev); |
2981 | } else { |
2982 | /* driver only manages a few IP blocks RAS feature |
2983 | * when GPU is connected cpu through XGMI */ |
2984 | adev->ras_hw_enabled |= (1 << AMDGPU_RAS_BLOCK__GFX | |
2985 | 1 << AMDGPU_RAS_BLOCK__SDMA | |
2986 | 1 << AMDGPU_RAS_BLOCK__MMHUB); |
2987 | } |
2988 | |
2989 | /* apply asic specific settings (vega20 only for now) */ |
2990 | amdgpu_ras_get_quirks(adev); |
2991 | |
2992 | /* query poison mode from umc/df ip callback */ |
2993 | amdgpu_ras_query_poison_mode(adev); |
2994 | |
2995 | init_ras_enabled_flag: |
2996 | /* hw_supported needs to be aligned with RAS block mask. */ |
2997 | adev->ras_hw_enabled &= AMDGPU_RAS_BLOCK_MASK; |
2998 | |
2999 | adev->ras_enabled = amdgpu_ras_enable == 0 ? 0 : |
3000 | adev->ras_hw_enabled & amdgpu_ras_mask; |
3001 | |
3002 | /* aca is disabled by default */ |
3003 | adev->aca.is_enabled = false; |
3004 | } |
3005 | |
3006 | static void amdgpu_ras_counte_dw(struct work_struct *work) |
3007 | { |
3008 | struct amdgpu_ras *con = container_of(work, struct amdgpu_ras, |
3009 | ras_counte_delay_work.work); |
3010 | struct amdgpu_device *adev = con->adev; |
3011 | struct drm_device *dev = adev_to_drm(adev); |
3012 | unsigned long ce_count, ue_count; |
3013 | int res; |
3014 | |
3015 | res = pm_runtime_get_sync(dev: dev->dev); |
3016 | if (res < 0) |
3017 | goto Out; |
3018 | |
3019 | /* Cache new values. |
3020 | */ |
3021 | if (amdgpu_ras_query_error_count(adev, ce_count: &ce_count, ue_count: &ue_count, NULL) == 0) { |
3022 | atomic_set(v: &con->ras_ce_count, i: ce_count); |
3023 | atomic_set(v: &con->ras_ue_count, i: ue_count); |
3024 | } |
3025 | |
3026 | pm_runtime_mark_last_busy(dev: dev->dev); |
3027 | Out: |
3028 | pm_runtime_put_autosuspend(dev: dev->dev); |
3029 | } |
3030 | |
3031 | static int amdgpu_get_ras_schema(struct amdgpu_device *adev) |
3032 | { |
3033 | return amdgpu_ras_is_poison_mode_supported(adev) ? AMDGPU_RAS_ERROR__POISON : 0 | |
3034 | AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE | |
3035 | AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE | |
3036 | AMDGPU_RAS_ERROR__PARITY; |
3037 | } |
3038 | |
3039 | int amdgpu_ras_init(struct amdgpu_device *adev) |
3040 | { |
3041 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
3042 | int r; |
3043 | |
3044 | if (con) |
3045 | return 0; |
3046 | |
3047 | con = kzalloc(size: sizeof(*con) + |
3048 | sizeof(struct ras_manager) * AMDGPU_RAS_BLOCK_COUNT + |
3049 | sizeof(struct ras_manager) * AMDGPU_RAS_MCA_BLOCK_COUNT, |
3050 | GFP_KERNEL); |
3051 | if (!con) |
3052 | return -ENOMEM; |
3053 | |
3054 | con->adev = adev; |
3055 | INIT_DELAYED_WORK(&con->ras_counte_delay_work, amdgpu_ras_counte_dw); |
3056 | atomic_set(v: &con->ras_ce_count, i: 0); |
3057 | atomic_set(v: &con->ras_ue_count, i: 0); |
3058 | |
3059 | con->objs = (struct ras_manager *)(con + 1); |
3060 | |
3061 | amdgpu_ras_set_context(adev, ras_con: con); |
3062 | |
3063 | amdgpu_ras_check_supported(adev); |
3064 | |
3065 | if (!adev->ras_enabled || adev->asic_type == CHIP_VEGA10) { |
3066 | /* set gfx block ras context feature for VEGA20 Gaming |
3067 | * send ras disable cmd to ras ta during ras late init. |
3068 | */ |
3069 | if (!adev->ras_enabled && adev->asic_type == CHIP_VEGA20) { |
3070 | con->features |= BIT(AMDGPU_RAS_BLOCK__GFX); |
3071 | |
3072 | return 0; |
3073 | } |
3074 | |
3075 | r = 0; |
3076 | goto release_con; |
3077 | } |
3078 | |
3079 | con->update_channel_flag = false; |
3080 | con->features = 0; |
3081 | con->schema = 0; |
3082 | INIT_LIST_HEAD(list: &con->head); |
3083 | /* Might need get this flag from vbios. */ |
3084 | con->flags = RAS_DEFAULT_FLAGS; |
3085 | |
3086 | /* initialize nbio ras function ahead of any other |
3087 | * ras functions so hardware fatal error interrupt |
3088 | * can be enabled as early as possible */ |
3089 | switch (amdgpu_ip_version(adev, ip: NBIO_HWIP, inst: 0)) { |
3090 | case IP_VERSION(7, 4, 0): |
3091 | case IP_VERSION(7, 4, 1): |
3092 | case IP_VERSION(7, 4, 4): |
3093 | if (!adev->gmc.xgmi.connected_to_cpu) |
3094 | adev->nbio.ras = &nbio_v7_4_ras; |
3095 | break; |
3096 | case IP_VERSION(4, 3, 0): |
3097 | if (adev->ras_hw_enabled & (1 << AMDGPU_RAS_BLOCK__DF)) |
3098 | /* unlike other generation of nbio ras, |
3099 | * nbio v4_3 only support fatal error interrupt |
3100 | * to inform software that DF is freezed due to |
3101 | * system fatal error event. driver should not |
3102 | * enable nbio ras in such case. Instead, |
3103 | * check DF RAS */ |
3104 | adev->nbio.ras = &nbio_v4_3_ras; |
3105 | break; |
3106 | case IP_VERSION(7, 9, 0): |
3107 | if (!adev->gmc.is_app_apu) |
3108 | adev->nbio.ras = &nbio_v7_9_ras; |
3109 | break; |
3110 | default: |
3111 | /* nbio ras is not available */ |
3112 | break; |
3113 | } |
3114 | |
3115 | /* nbio ras block needs to be enabled ahead of other ras blocks |
3116 | * to handle fatal error */ |
3117 | r = amdgpu_nbio_ras_sw_init(adev); |
3118 | if (r) |
3119 | return r; |
3120 | |
3121 | if (adev->nbio.ras && |
3122 | adev->nbio.ras->init_ras_controller_interrupt) { |
3123 | r = adev->nbio.ras->init_ras_controller_interrupt(adev); |
3124 | if (r) |
3125 | goto release_con; |
3126 | } |
3127 | |
3128 | if (adev->nbio.ras && |
3129 | adev->nbio.ras->init_ras_err_event_athub_interrupt) { |
3130 | r = adev->nbio.ras->init_ras_err_event_athub_interrupt(adev); |
3131 | if (r) |
3132 | goto release_con; |
3133 | } |
3134 | |
3135 | /* Packed socket_id to ras feature mask bits[31:29] */ |
3136 | if (adev->smuio.funcs && |
3137 | adev->smuio.funcs->get_socket_id) |
3138 | con->features |= ((adev->smuio.funcs->get_socket_id(adev)) << |
3139 | AMDGPU_RAS_FEATURES_SOCKETID_SHIFT); |
3140 | |
3141 | /* Get RAS schema for particular SOC */ |
3142 | con->schema = amdgpu_get_ras_schema(adev); |
3143 | |
3144 | if (amdgpu_ras_fs_init(adev)) { |
3145 | r = -EINVAL; |
3146 | goto release_con; |
3147 | } |
3148 | |
3149 | dev_info(adev->dev, "RAS INFO: ras initialized successfully, " |
3150 | "hardware ability[%x] ras_mask[%x]\n" , |
3151 | adev->ras_hw_enabled, adev->ras_enabled); |
3152 | |
3153 | return 0; |
3154 | release_con: |
3155 | amdgpu_ras_set_context(adev, NULL); |
3156 | kfree(objp: con); |
3157 | |
3158 | return r; |
3159 | } |
3160 | |
3161 | int amdgpu_persistent_edc_harvesting_supported(struct amdgpu_device *adev) |
3162 | { |
3163 | if (adev->gmc.xgmi.connected_to_cpu || |
3164 | adev->gmc.is_app_apu) |
3165 | return 1; |
3166 | return 0; |
3167 | } |
3168 | |
3169 | static int amdgpu_persistent_edc_harvesting(struct amdgpu_device *adev, |
3170 | struct ras_common_if *ras_block) |
3171 | { |
3172 | struct ras_query_if info = { |
3173 | .head = *ras_block, |
3174 | }; |
3175 | |
3176 | if (!amdgpu_persistent_edc_harvesting_supported(adev)) |
3177 | return 0; |
3178 | |
3179 | if (amdgpu_ras_query_error_status(adev, info: &info) != 0) |
3180 | DRM_WARN("RAS init harvest failure" ); |
3181 | |
3182 | if (amdgpu_ras_reset_error_status(adev, block: ras_block->block) != 0) |
3183 | DRM_WARN("RAS init harvest reset failure" ); |
3184 | |
3185 | return 0; |
3186 | } |
3187 | |
3188 | bool amdgpu_ras_is_poison_mode_supported(struct amdgpu_device *adev) |
3189 | { |
3190 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
3191 | |
3192 | if (!con) |
3193 | return false; |
3194 | |
3195 | return con->poison_supported; |
3196 | } |
3197 | |
3198 | /* helper function to handle common stuff in ip late init phase */ |
3199 | int amdgpu_ras_block_late_init(struct amdgpu_device *adev, |
3200 | struct ras_common_if *ras_block) |
3201 | { |
3202 | struct amdgpu_ras_block_object *ras_obj = NULL; |
3203 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
3204 | struct ras_query_if *query_info; |
3205 | unsigned long ue_count, ce_count; |
3206 | int r; |
3207 | |
3208 | /* disable RAS feature per IP block if it is not supported */ |
3209 | if (!amdgpu_ras_is_supported(adev, block: ras_block->block)) { |
3210 | amdgpu_ras_feature_enable_on_boot(adev, head: ras_block, enable: 0); |
3211 | return 0; |
3212 | } |
3213 | |
3214 | r = amdgpu_ras_feature_enable_on_boot(adev, head: ras_block, enable: 1); |
3215 | if (r) { |
3216 | if (adev->in_suspend || amdgpu_in_reset(adev)) { |
3217 | /* in resume phase, if fail to enable ras, |
3218 | * clean up all ras fs nodes, and disable ras */ |
3219 | goto cleanup; |
3220 | } else |
3221 | return r; |
3222 | } |
3223 | |
3224 | /* check for errors on warm reset edc persisant supported ASIC */ |
3225 | amdgpu_persistent_edc_harvesting(adev, ras_block); |
3226 | |
3227 | /* in resume phase, no need to create ras fs node */ |
3228 | if (adev->in_suspend || amdgpu_in_reset(adev)) |
3229 | return 0; |
3230 | |
3231 | ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm); |
3232 | if (ras_obj->ras_cb || (ras_obj->hw_ops && |
3233 | (ras_obj->hw_ops->query_poison_status || |
3234 | ras_obj->hw_ops->handle_poison_consumption))) { |
3235 | r = amdgpu_ras_interrupt_add_handler(adev, head: ras_block); |
3236 | if (r) |
3237 | goto cleanup; |
3238 | } |
3239 | |
3240 | if (ras_obj->hw_ops && |
3241 | (ras_obj->hw_ops->query_ras_error_count || |
3242 | ras_obj->hw_ops->query_ras_error_status)) { |
3243 | r = amdgpu_ras_sysfs_create(adev, head: ras_block); |
3244 | if (r) |
3245 | goto interrupt; |
3246 | |
3247 | /* Those are the cached values at init. |
3248 | */ |
3249 | query_info = kzalloc(size: sizeof(*query_info), GFP_KERNEL); |
3250 | if (!query_info) |
3251 | return -ENOMEM; |
3252 | memcpy(&query_info->head, ras_block, sizeof(struct ras_common_if)); |
3253 | |
3254 | if (amdgpu_ras_query_error_count(adev, ce_count: &ce_count, ue_count: &ue_count, query_info) == 0) { |
3255 | atomic_set(v: &con->ras_ce_count, i: ce_count); |
3256 | atomic_set(v: &con->ras_ue_count, i: ue_count); |
3257 | } |
3258 | |
3259 | kfree(objp: query_info); |
3260 | } |
3261 | |
3262 | return 0; |
3263 | |
3264 | interrupt: |
3265 | if (ras_obj->ras_cb) |
3266 | amdgpu_ras_interrupt_remove_handler(adev, head: ras_block); |
3267 | cleanup: |
3268 | amdgpu_ras_feature_enable(adev, head: ras_block, enable: 0); |
3269 | return r; |
3270 | } |
3271 | |
3272 | static int amdgpu_ras_block_late_init_default(struct amdgpu_device *adev, |
3273 | struct ras_common_if *ras_block) |
3274 | { |
3275 | return amdgpu_ras_block_late_init(adev, ras_block); |
3276 | } |
3277 | |
3278 | /* helper function to remove ras fs node and interrupt handler */ |
3279 | void amdgpu_ras_block_late_fini(struct amdgpu_device *adev, |
3280 | struct ras_common_if *ras_block) |
3281 | { |
3282 | struct amdgpu_ras_block_object *ras_obj; |
3283 | if (!ras_block) |
3284 | return; |
3285 | |
3286 | amdgpu_ras_sysfs_remove(adev, head: ras_block); |
3287 | |
3288 | ras_obj = container_of(ras_block, struct amdgpu_ras_block_object, ras_comm); |
3289 | if (ras_obj->ras_cb) |
3290 | amdgpu_ras_interrupt_remove_handler(adev, head: ras_block); |
3291 | } |
3292 | |
3293 | static void amdgpu_ras_block_late_fini_default(struct amdgpu_device *adev, |
3294 | struct ras_common_if *ras_block) |
3295 | { |
3296 | return amdgpu_ras_block_late_fini(adev, ras_block); |
3297 | } |
3298 | |
3299 | /* do some init work after IP late init as dependence. |
3300 | * and it runs in resume/gpu reset/booting up cases. |
3301 | */ |
3302 | void amdgpu_ras_resume(struct amdgpu_device *adev) |
3303 | { |
3304 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
3305 | struct ras_manager *obj, *tmp; |
3306 | |
3307 | if (!adev->ras_enabled || !con) { |
3308 | /* clean ras context for VEGA20 Gaming after send ras disable cmd */ |
3309 | amdgpu_release_ras_context(adev); |
3310 | |
3311 | return; |
3312 | } |
3313 | |
3314 | if (con->flags & AMDGPU_RAS_FLAG_INIT_BY_VBIOS) { |
3315 | /* Set up all other IPs which are not implemented. There is a |
3316 | * tricky thing that IP's actual ras error type should be |
3317 | * MULTI_UNCORRECTABLE, but as driver does not handle it, so |
3318 | * ERROR_NONE make sense anyway. |
3319 | */ |
3320 | amdgpu_ras_enable_all_features(adev, bypass: 1); |
3321 | |
3322 | /* We enable ras on all hw_supported block, but as boot |
3323 | * parameter might disable some of them and one or more IP has |
3324 | * not implemented yet. So we disable them on behalf. |
3325 | */ |
3326 | list_for_each_entry_safe(obj, tmp, &con->head, node) { |
3327 | if (!amdgpu_ras_is_supported(adev, block: obj->head.block)) { |
3328 | amdgpu_ras_feature_enable(adev, head: &obj->head, enable: 0); |
3329 | /* there should be no any reference. */ |
3330 | WARN_ON(alive_obj(obj)); |
3331 | } |
3332 | } |
3333 | } |
3334 | } |
3335 | |
3336 | void amdgpu_ras_suspend(struct amdgpu_device *adev) |
3337 | { |
3338 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
3339 | |
3340 | if (!adev->ras_enabled || !con) |
3341 | return; |
3342 | |
3343 | amdgpu_ras_disable_all_features(adev, bypass: 0); |
3344 | /* Make sure all ras objects are disabled. */ |
3345 | if (AMDGPU_RAS_GET_FEATURES(con->features)) |
3346 | amdgpu_ras_disable_all_features(adev, bypass: 1); |
3347 | } |
3348 | |
3349 | int amdgpu_ras_late_init(struct amdgpu_device *adev) |
3350 | { |
3351 | struct amdgpu_ras_block_list *node, *tmp; |
3352 | struct amdgpu_ras_block_object *obj; |
3353 | int r; |
3354 | |
3355 | /* Guest side doesn't need init ras feature */ |
3356 | if (amdgpu_sriov_vf(adev)) |
3357 | return 0; |
3358 | |
3359 | if (amdgpu_aca_is_enabled(adev)) { |
3360 | if (amdgpu_in_reset(adev)) |
3361 | r = amdgpu_aca_reset(adev); |
3362 | else |
3363 | r = amdgpu_aca_init(adev); |
3364 | if (r) |
3365 | return r; |
3366 | |
3367 | amdgpu_ras_set_aca_debug_mode(adev, enable: false); |
3368 | } else { |
3369 | amdgpu_ras_set_mca_debug_mode(adev, enable: false); |
3370 | } |
3371 | |
3372 | list_for_each_entry_safe(node, tmp, &adev->ras_list, node) { |
3373 | obj = node->ras_obj; |
3374 | if (!obj) { |
3375 | dev_warn(adev->dev, "Warning: abnormal ras list node.\n" ); |
3376 | continue; |
3377 | } |
3378 | |
3379 | if (!amdgpu_ras_is_supported(adev, block: obj->ras_comm.block)) |
3380 | continue; |
3381 | |
3382 | if (obj->ras_late_init) { |
3383 | r = obj->ras_late_init(adev, &obj->ras_comm); |
3384 | if (r) { |
3385 | dev_err(adev->dev, "%s failed to execute ras_late_init! ret:%d\n" , |
3386 | obj->ras_comm.name, r); |
3387 | return r; |
3388 | } |
3389 | } else |
3390 | amdgpu_ras_block_late_init_default(adev, ras_block: &obj->ras_comm); |
3391 | } |
3392 | |
3393 | return 0; |
3394 | } |
3395 | |
3396 | /* do some fini work before IP fini as dependence */ |
3397 | int amdgpu_ras_pre_fini(struct amdgpu_device *adev) |
3398 | { |
3399 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
3400 | |
3401 | if (!adev->ras_enabled || !con) |
3402 | return 0; |
3403 | |
3404 | |
3405 | /* Need disable ras on all IPs here before ip [hw/sw]fini */ |
3406 | if (AMDGPU_RAS_GET_FEATURES(con->features)) |
3407 | amdgpu_ras_disable_all_features(adev, bypass: 0); |
3408 | amdgpu_ras_recovery_fini(adev); |
3409 | return 0; |
3410 | } |
3411 | |
3412 | int amdgpu_ras_fini(struct amdgpu_device *adev) |
3413 | { |
3414 | struct amdgpu_ras_block_list *ras_node, *tmp; |
3415 | struct amdgpu_ras_block_object *obj = NULL; |
3416 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
3417 | |
3418 | if (!adev->ras_enabled || !con) |
3419 | return 0; |
3420 | |
3421 | list_for_each_entry_safe(ras_node, tmp, &adev->ras_list, node) { |
3422 | if (ras_node->ras_obj) { |
3423 | obj = ras_node->ras_obj; |
3424 | if (amdgpu_ras_is_supported(adev, block: obj->ras_comm.block) && |
3425 | obj->ras_fini) |
3426 | obj->ras_fini(adev, &obj->ras_comm); |
3427 | else |
3428 | amdgpu_ras_block_late_fini_default(adev, ras_block: &obj->ras_comm); |
3429 | } |
3430 | |
3431 | /* Clear ras blocks from ras_list and free ras block list node */ |
3432 | list_del(entry: &ras_node->node); |
3433 | kfree(objp: ras_node); |
3434 | } |
3435 | |
3436 | amdgpu_ras_fs_fini(adev); |
3437 | amdgpu_ras_interrupt_remove_all(adev); |
3438 | |
3439 | if (amdgpu_aca_is_enabled(adev)) |
3440 | amdgpu_aca_fini(adev); |
3441 | |
3442 | WARN(AMDGPU_RAS_GET_FEATURES(con->features), "Feature mask is not cleared" ); |
3443 | |
3444 | if (AMDGPU_RAS_GET_FEATURES(con->features)) |
3445 | amdgpu_ras_disable_all_features(adev, bypass: 0); |
3446 | |
3447 | cancel_delayed_work_sync(dwork: &con->ras_counte_delay_work); |
3448 | |
3449 | amdgpu_ras_set_context(adev, NULL); |
3450 | kfree(objp: con); |
3451 | |
3452 | return 0; |
3453 | } |
3454 | |
3455 | bool amdgpu_ras_get_fed_status(struct amdgpu_device *adev) |
3456 | { |
3457 | struct amdgpu_ras *ras; |
3458 | |
3459 | ras = amdgpu_ras_get_context(adev); |
3460 | if (!ras) |
3461 | return false; |
3462 | |
3463 | return atomic_read(v: &ras->fed); |
3464 | } |
3465 | |
3466 | void amdgpu_ras_set_fed(struct amdgpu_device *adev, bool status) |
3467 | { |
3468 | struct amdgpu_ras *ras; |
3469 | |
3470 | ras = amdgpu_ras_get_context(adev); |
3471 | if (ras) |
3472 | atomic_set(v: &ras->fed, i: !!status); |
3473 | } |
3474 | |
3475 | void amdgpu_ras_global_ras_isr(struct amdgpu_device *adev) |
3476 | { |
3477 | if (atomic_cmpxchg(v: &amdgpu_ras_in_intr, old: 0, new: 1) == 0) { |
3478 | struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); |
3479 | |
3480 | dev_info(adev->dev, "uncorrectable hardware error" |
3481 | "(ERREVENT_ATHUB_INTERRUPT) detected!\n" ); |
3482 | |
3483 | ras->gpu_reset_flags |= AMDGPU_RAS_GPU_RESET_MODE1_RESET; |
3484 | amdgpu_ras_reset_gpu(adev); |
3485 | } |
3486 | } |
3487 | |
3488 | bool amdgpu_ras_need_emergency_restart(struct amdgpu_device *adev) |
3489 | { |
3490 | if (adev->asic_type == CHIP_VEGA20 && |
3491 | adev->pm.fw_version <= 0x283400) { |
3492 | return !(amdgpu_asic_reset_method(adev) == AMD_RESET_METHOD_BACO) && |
3493 | amdgpu_ras_intr_triggered(); |
3494 | } |
3495 | |
3496 | return false; |
3497 | } |
3498 | |
3499 | void amdgpu_release_ras_context(struct amdgpu_device *adev) |
3500 | { |
3501 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
3502 | |
3503 | if (!con) |
3504 | return; |
3505 | |
3506 | if (!adev->ras_enabled && con->features & BIT(AMDGPU_RAS_BLOCK__GFX)) { |
3507 | con->features &= ~BIT(AMDGPU_RAS_BLOCK__GFX); |
3508 | amdgpu_ras_set_context(adev, NULL); |
3509 | kfree(objp: con); |
3510 | } |
3511 | } |
3512 | |
3513 | #ifdef CONFIG_X86_MCE_AMD |
3514 | static struct amdgpu_device *find_adev(uint32_t node_id) |
3515 | { |
3516 | int i; |
3517 | struct amdgpu_device *adev = NULL; |
3518 | |
3519 | for (i = 0; i < mce_adev_list.num_gpu; i++) { |
3520 | adev = mce_adev_list.devs[i]; |
3521 | |
3522 | if (adev && adev->gmc.xgmi.connected_to_cpu && |
3523 | adev->gmc.xgmi.physical_node_id == node_id) |
3524 | break; |
3525 | adev = NULL; |
3526 | } |
3527 | |
3528 | return adev; |
3529 | } |
3530 | |
3531 | #define GET_MCA_IPID_GPUID(m) (((m) >> 44) & 0xF) |
3532 | #define GET_UMC_INST(m) (((m) >> 21) & 0x7) |
3533 | #define GET_CHAN_INDEX(m) ((((m) >> 12) & 0x3) | (((m) >> 18) & 0x4)) |
3534 | #define GPU_ID_OFFSET 8 |
3535 | |
3536 | static int amdgpu_bad_page_notifier(struct notifier_block *nb, |
3537 | unsigned long val, void *data) |
3538 | { |
3539 | struct mce *m = (struct mce *)data; |
3540 | struct amdgpu_device *adev = NULL; |
3541 | uint32_t gpu_id = 0; |
3542 | uint32_t umc_inst = 0, ch_inst = 0; |
3543 | |
3544 | /* |
3545 | * If the error was generated in UMC_V2, which belongs to GPU UMCs, |
3546 | * and error occurred in DramECC (Extended error code = 0) then only |
3547 | * process the error, else bail out. |
3548 | */ |
3549 | if (!m || !((smca_get_bank_type(cpu: m->extcpu, bank: m->bank) == SMCA_UMC_V2) && |
3550 | (XEC(m->status, 0x3f) == 0x0))) |
3551 | return NOTIFY_DONE; |
3552 | |
3553 | /* |
3554 | * If it is correctable error, return. |
3555 | */ |
3556 | if (mce_is_correctable(m)) |
3557 | return NOTIFY_OK; |
3558 | |
3559 | /* |
3560 | * GPU Id is offset by GPU_ID_OFFSET in MCA_IPID_UMC register. |
3561 | */ |
3562 | gpu_id = GET_MCA_IPID_GPUID(m->ipid) - GPU_ID_OFFSET; |
3563 | |
3564 | adev = find_adev(node_id: gpu_id); |
3565 | if (!adev) { |
3566 | DRM_WARN("%s: Unable to find adev for gpu_id: %d\n" , __func__, |
3567 | gpu_id); |
3568 | return NOTIFY_DONE; |
3569 | } |
3570 | |
3571 | /* |
3572 | * If it is uncorrectable error, then find out UMC instance and |
3573 | * channel index. |
3574 | */ |
3575 | umc_inst = GET_UMC_INST(m->ipid); |
3576 | ch_inst = GET_CHAN_INDEX(m->ipid); |
3577 | |
3578 | dev_info(adev->dev, "Uncorrectable error detected in UMC inst: %d, chan_idx: %d" , |
3579 | umc_inst, ch_inst); |
3580 | |
3581 | if (!amdgpu_umc_page_retirement_mca(adev, err_addr: m->addr, ch_inst, umc_inst)) |
3582 | return NOTIFY_OK; |
3583 | else |
3584 | return NOTIFY_DONE; |
3585 | } |
3586 | |
3587 | static struct notifier_block amdgpu_bad_page_nb = { |
3588 | .notifier_call = amdgpu_bad_page_notifier, |
3589 | .priority = MCE_PRIO_UC, |
3590 | }; |
3591 | |
3592 | static void amdgpu_register_bad_pages_mca_notifier(struct amdgpu_device *adev) |
3593 | { |
3594 | /* |
3595 | * Add the adev to the mce_adev_list. |
3596 | * During mode2 reset, amdgpu device is temporarily |
3597 | * removed from the mgpu_info list which can cause |
3598 | * page retirement to fail. |
3599 | * Use this list instead of mgpu_info to find the amdgpu |
3600 | * device on which the UMC error was reported. |
3601 | */ |
3602 | mce_adev_list.devs[mce_adev_list.num_gpu++] = adev; |
3603 | |
3604 | /* |
3605 | * Register the x86 notifier only once |
3606 | * with MCE subsystem. |
3607 | */ |
3608 | if (notifier_registered == false) { |
3609 | mce_register_decode_chain(nb: &amdgpu_bad_page_nb); |
3610 | notifier_registered = true; |
3611 | } |
3612 | } |
3613 | #endif |
3614 | |
3615 | struct amdgpu_ras *amdgpu_ras_get_context(struct amdgpu_device *adev) |
3616 | { |
3617 | if (!adev) |
3618 | return NULL; |
3619 | |
3620 | return adev->psp.ras_context.ras; |
3621 | } |
3622 | |
3623 | int amdgpu_ras_set_context(struct amdgpu_device *adev, struct amdgpu_ras *ras_con) |
3624 | { |
3625 | if (!adev) |
3626 | return -EINVAL; |
3627 | |
3628 | adev->psp.ras_context.ras = ras_con; |
3629 | return 0; |
3630 | } |
3631 | |
3632 | /* check if ras is supported on block, say, sdma, gfx */ |
3633 | int amdgpu_ras_is_supported(struct amdgpu_device *adev, |
3634 | unsigned int block) |
3635 | { |
3636 | int ret = 0; |
3637 | struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); |
3638 | |
3639 | if (block >= AMDGPU_RAS_BLOCK_COUNT) |
3640 | return 0; |
3641 | |
3642 | ret = ras && (adev->ras_enabled & (1 << block)); |
3643 | |
3644 | /* For the special asic with mem ecc enabled but sram ecc |
3645 | * not enabled, even if the ras block is not supported on |
3646 | * .ras_enabled, if the asic supports poison mode and the |
3647 | * ras block has ras configuration, it can be considered |
3648 | * that the ras block supports ras function. |
3649 | */ |
3650 | if (!ret && |
3651 | (block == AMDGPU_RAS_BLOCK__GFX || |
3652 | block == AMDGPU_RAS_BLOCK__SDMA || |
3653 | block == AMDGPU_RAS_BLOCK__VCN || |
3654 | block == AMDGPU_RAS_BLOCK__JPEG) && |
3655 | (amdgpu_ras_mask & (1 << block)) && |
3656 | amdgpu_ras_is_poison_mode_supported(adev) && |
3657 | amdgpu_ras_get_ras_block(adev, block, sub_block_index: 0)) |
3658 | ret = 1; |
3659 | |
3660 | return ret; |
3661 | } |
3662 | |
3663 | int amdgpu_ras_reset_gpu(struct amdgpu_device *adev) |
3664 | { |
3665 | struct amdgpu_ras *ras = amdgpu_ras_get_context(adev); |
3666 | |
3667 | if (atomic_cmpxchg(v: &ras->in_recovery, old: 0, new: 1) == 0) |
3668 | amdgpu_reset_domain_schedule(domain: ras->adev->reset_domain, work: &ras->recovery_work); |
3669 | return 0; |
3670 | } |
3671 | |
3672 | int amdgpu_ras_set_mca_debug_mode(struct amdgpu_device *adev, bool enable) |
3673 | { |
3674 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
3675 | int ret = 0; |
3676 | |
3677 | if (con) { |
3678 | ret = amdgpu_mca_smu_set_debug_mode(adev, enable); |
3679 | if (!ret) |
3680 | con->is_aca_debug_mode = enable; |
3681 | } |
3682 | |
3683 | return ret; |
3684 | } |
3685 | |
3686 | int amdgpu_ras_set_aca_debug_mode(struct amdgpu_device *adev, bool enable) |
3687 | { |
3688 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
3689 | int ret = 0; |
3690 | |
3691 | if (con) { |
3692 | if (amdgpu_aca_is_enabled(adev)) |
3693 | ret = amdgpu_aca_smu_set_debug_mode(adev, en: enable); |
3694 | else |
3695 | ret = amdgpu_mca_smu_set_debug_mode(adev, enable); |
3696 | if (!ret) |
3697 | con->is_aca_debug_mode = enable; |
3698 | } |
3699 | |
3700 | return ret; |
3701 | } |
3702 | |
3703 | bool amdgpu_ras_get_aca_debug_mode(struct amdgpu_device *adev) |
3704 | { |
3705 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
3706 | const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs; |
3707 | const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; |
3708 | |
3709 | if (!con) |
3710 | return false; |
3711 | |
3712 | if ((amdgpu_aca_is_enabled(adev) && smu_funcs && smu_funcs->set_debug_mode) || |
3713 | (!amdgpu_aca_is_enabled(adev) && mca_funcs && mca_funcs->mca_set_debug_mode)) |
3714 | return con->is_aca_debug_mode; |
3715 | else |
3716 | return true; |
3717 | } |
3718 | |
3719 | bool amdgpu_ras_get_error_query_mode(struct amdgpu_device *adev, |
3720 | unsigned int *error_query_mode) |
3721 | { |
3722 | struct amdgpu_ras *con = amdgpu_ras_get_context(adev); |
3723 | const struct amdgpu_mca_smu_funcs *mca_funcs = adev->mca.mca_funcs; |
3724 | const struct aca_smu_funcs *smu_funcs = adev->aca.smu_funcs; |
3725 | |
3726 | if (!con) { |
3727 | *error_query_mode = AMDGPU_RAS_INVALID_ERROR_QUERY; |
3728 | return false; |
3729 | } |
3730 | |
3731 | if ((smu_funcs && smu_funcs->set_debug_mode) || (mca_funcs && mca_funcs->mca_set_debug_mode)) |
3732 | *error_query_mode = |
3733 | (con->is_aca_debug_mode) ? AMDGPU_RAS_DIRECT_ERROR_QUERY : AMDGPU_RAS_FIRMWARE_ERROR_QUERY; |
3734 | else |
3735 | *error_query_mode = AMDGPU_RAS_DIRECT_ERROR_QUERY; |
3736 | |
3737 | return true; |
3738 | } |
3739 | |
3740 | /* Register each ip ras block into amdgpu ras */ |
3741 | int amdgpu_ras_register_ras_block(struct amdgpu_device *adev, |
3742 | struct amdgpu_ras_block_object *ras_block_obj) |
3743 | { |
3744 | struct amdgpu_ras_block_list *ras_node; |
3745 | if (!adev || !ras_block_obj) |
3746 | return -EINVAL; |
3747 | |
3748 | ras_node = kzalloc(size: sizeof(*ras_node), GFP_KERNEL); |
3749 | if (!ras_node) |
3750 | return -ENOMEM; |
3751 | |
3752 | INIT_LIST_HEAD(list: &ras_node->node); |
3753 | ras_node->ras_obj = ras_block_obj; |
3754 | list_add_tail(new: &ras_node->node, head: &adev->ras_list); |
3755 | |
3756 | return 0; |
3757 | } |
3758 | |
3759 | void amdgpu_ras_get_error_type_name(uint32_t err_type, char *err_type_name) |
3760 | { |
3761 | if (!err_type_name) |
3762 | return; |
3763 | |
3764 | switch (err_type) { |
3765 | case AMDGPU_RAS_ERROR__SINGLE_CORRECTABLE: |
3766 | sprintf(buf: err_type_name, fmt: "correctable" ); |
3767 | break; |
3768 | case AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE: |
3769 | sprintf(buf: err_type_name, fmt: "uncorrectable" ); |
3770 | break; |
3771 | default: |
3772 | sprintf(buf: err_type_name, fmt: "unknown" ); |
3773 | break; |
3774 | } |
3775 | } |
3776 | |
3777 | bool amdgpu_ras_inst_get_memory_id_field(struct amdgpu_device *adev, |
3778 | const struct amdgpu_ras_err_status_reg_entry *reg_entry, |
3779 | uint32_t instance, |
3780 | uint32_t *memory_id) |
3781 | { |
3782 | uint32_t err_status_lo_data, err_status_lo_offset; |
3783 | |
3784 | if (!reg_entry) |
3785 | return false; |
3786 | |
3787 | err_status_lo_offset = |
3788 | AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance, |
3789 | reg_entry->seg_lo, reg_entry->reg_lo); |
3790 | err_status_lo_data = RREG32(err_status_lo_offset); |
3791 | |
3792 | if ((reg_entry->flags & AMDGPU_RAS_ERR_STATUS_VALID) && |
3793 | !REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, ERR_STATUS_VALID_FLAG)) |
3794 | return false; |
3795 | |
3796 | *memory_id = REG_GET_FIELD(err_status_lo_data, ERR_STATUS_LO, MEMORY_ID); |
3797 | |
3798 | return true; |
3799 | } |
3800 | |
3801 | bool amdgpu_ras_inst_get_err_cnt_field(struct amdgpu_device *adev, |
3802 | const struct amdgpu_ras_err_status_reg_entry *reg_entry, |
3803 | uint32_t instance, |
3804 | unsigned long *err_cnt) |
3805 | { |
3806 | uint32_t err_status_hi_data, err_status_hi_offset; |
3807 | |
3808 | if (!reg_entry) |
3809 | return false; |
3810 | |
3811 | err_status_hi_offset = |
3812 | AMDGPU_RAS_REG_ENTRY_OFFSET(reg_entry->hwip, instance, |
3813 | reg_entry->seg_hi, reg_entry->reg_hi); |
3814 | err_status_hi_data = RREG32(err_status_hi_offset); |
3815 | |
3816 | if ((reg_entry->flags & AMDGPU_RAS_ERR_INFO_VALID) && |
3817 | !REG_GET_FIELD(err_status_hi_data, ERR_STATUS_HI, ERR_INFO_VALID_FLAG)) |
3818 | /* keep the check here in case we need to refer to the result later */ |
3819 | dev_dbg(adev->dev, "Invalid err_info field\n" ); |
3820 | |
3821 | /* read err count */ |
3822 | *err_cnt = REG_GET_FIELD(err_status_hi_data, ERR_STATUS, ERR_CNT); |
3823 | |
3824 | return true; |
3825 | } |
3826 | |
3827 | void amdgpu_ras_inst_query_ras_error_count(struct amdgpu_device *adev, |
3828 | const struct amdgpu_ras_err_status_reg_entry *reg_list, |
3829 | uint32_t reg_list_size, |
3830 | const struct amdgpu_ras_memory_id_entry *mem_list, |
3831 | uint32_t mem_list_size, |
3832 | uint32_t instance, |
3833 | uint32_t err_type, |
3834 | unsigned long *err_count) |
3835 | { |
3836 | uint32_t memory_id; |
3837 | unsigned long err_cnt; |
3838 | char err_type_name[16]; |
3839 | uint32_t i, j; |
3840 | |
3841 | for (i = 0; i < reg_list_size; i++) { |
3842 | /* query memory_id from err_status_lo */ |
3843 | if (!amdgpu_ras_inst_get_memory_id_field(adev, reg_entry: ®_list[i], |
3844 | instance, memory_id: &memory_id)) |
3845 | continue; |
3846 | |
3847 | /* query err_cnt from err_status_hi */ |
3848 | if (!amdgpu_ras_inst_get_err_cnt_field(adev, reg_entry: ®_list[i], |
3849 | instance, err_cnt: &err_cnt) || |
3850 | !err_cnt) |
3851 | continue; |
3852 | |
3853 | *err_count += err_cnt; |
3854 | |
3855 | /* log the errors */ |
3856 | amdgpu_ras_get_error_type_name(err_type, err_type_name); |
3857 | if (!mem_list) { |
3858 | /* memory_list is not supported */ |
3859 | dev_info(adev->dev, |
3860 | "%ld %s hardware errors detected in %s, instance: %d, memory_id: %d\n" , |
3861 | err_cnt, err_type_name, |
3862 | reg_list[i].block_name, |
3863 | instance, memory_id); |
3864 | } else { |
3865 | for (j = 0; j < mem_list_size; j++) { |
3866 | if (memory_id == mem_list[j].memory_id) { |
3867 | dev_info(adev->dev, |
3868 | "%ld %s hardware errors detected in %s, instance: %d, memory block: %s\n" , |
3869 | err_cnt, err_type_name, |
3870 | reg_list[i].block_name, |
3871 | instance, mem_list[j].name); |
3872 | break; |
3873 | } |
3874 | } |
3875 | } |
3876 | } |
3877 | } |
3878 | |
3879 | void amdgpu_ras_inst_reset_ras_error_count(struct amdgpu_device *adev, |
3880 | const struct amdgpu_ras_err_status_reg_entry *reg_list, |
3881 | uint32_t reg_list_size, |
3882 | uint32_t instance) |
3883 | { |
3884 | uint32_t err_status_lo_offset, err_status_hi_offset; |
3885 | uint32_t i; |
3886 | |
3887 | for (i = 0; i < reg_list_size; i++) { |
3888 | err_status_lo_offset = |
3889 | AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance, |
3890 | reg_list[i].seg_lo, reg_list[i].reg_lo); |
3891 | err_status_hi_offset = |
3892 | AMDGPU_RAS_REG_ENTRY_OFFSET(reg_list[i].hwip, instance, |
3893 | reg_list[i].seg_hi, reg_list[i].reg_hi); |
3894 | WREG32(err_status_lo_offset, 0); |
3895 | WREG32(err_status_hi_offset, 0); |
3896 | } |
3897 | } |
3898 | |
3899 | int amdgpu_ras_error_data_init(struct ras_err_data *err_data) |
3900 | { |
3901 | memset(err_data, 0, sizeof(*err_data)); |
3902 | |
3903 | INIT_LIST_HEAD(list: &err_data->err_node_list); |
3904 | |
3905 | return 0; |
3906 | } |
3907 | |
3908 | static void amdgpu_ras_error_node_release(struct ras_err_node *err_node) |
3909 | { |
3910 | if (!err_node) |
3911 | return; |
3912 | |
3913 | list_del(entry: &err_node->node); |
3914 | kvfree(addr: err_node); |
3915 | } |
3916 | |
3917 | void amdgpu_ras_error_data_fini(struct ras_err_data *err_data) |
3918 | { |
3919 | struct ras_err_node *err_node, *tmp; |
3920 | |
3921 | list_for_each_entry_safe(err_node, tmp, &err_data->err_node_list, node) |
3922 | amdgpu_ras_error_node_release(err_node); |
3923 | } |
3924 | |
3925 | static struct ras_err_node *amdgpu_ras_error_find_node_by_id(struct ras_err_data *err_data, |
3926 | struct amdgpu_smuio_mcm_config_info *mcm_info) |
3927 | { |
3928 | struct ras_err_node *err_node; |
3929 | struct amdgpu_smuio_mcm_config_info *ref_id; |
3930 | |
3931 | if (!err_data || !mcm_info) |
3932 | return NULL; |
3933 | |
3934 | for_each_ras_error(err_node, err_data) { |
3935 | ref_id = &err_node->err_info.mcm_info; |
3936 | |
3937 | if (mcm_info->socket_id == ref_id->socket_id && |
3938 | mcm_info->die_id == ref_id->die_id) |
3939 | return err_node; |
3940 | } |
3941 | |
3942 | return NULL; |
3943 | } |
3944 | |
3945 | static struct ras_err_node *amdgpu_ras_error_node_new(void) |
3946 | { |
3947 | struct ras_err_node *err_node; |
3948 | |
3949 | err_node = kvzalloc(size: sizeof(*err_node), GFP_KERNEL); |
3950 | if (!err_node) |
3951 | return NULL; |
3952 | |
3953 | INIT_LIST_HEAD(list: &err_node->node); |
3954 | |
3955 | return err_node; |
3956 | } |
3957 | |
3958 | static int ras_err_info_cmp(void *priv, const struct list_head *a, const struct list_head *b) |
3959 | { |
3960 | struct ras_err_node *nodea = container_of(a, struct ras_err_node, node); |
3961 | struct ras_err_node *nodeb = container_of(b, struct ras_err_node, node); |
3962 | struct amdgpu_smuio_mcm_config_info *infoa = &nodea->err_info.mcm_info; |
3963 | struct amdgpu_smuio_mcm_config_info *infob = &nodeb->err_info.mcm_info; |
3964 | |
3965 | if (unlikely(infoa->socket_id != infob->socket_id)) |
3966 | return infoa->socket_id - infob->socket_id; |
3967 | else |
3968 | return infoa->die_id - infob->die_id; |
3969 | |
3970 | return 0; |
3971 | } |
3972 | |
3973 | static struct ras_err_info *amdgpu_ras_error_get_info(struct ras_err_data *err_data, |
3974 | struct amdgpu_smuio_mcm_config_info *mcm_info) |
3975 | { |
3976 | struct ras_err_node *err_node; |
3977 | |
3978 | err_node = amdgpu_ras_error_find_node_by_id(err_data, mcm_info); |
3979 | if (err_node) |
3980 | return &err_node->err_info; |
3981 | |
3982 | err_node = amdgpu_ras_error_node_new(); |
3983 | if (!err_node) |
3984 | return NULL; |
3985 | |
3986 | INIT_LIST_HEAD(list: &err_node->err_info.err_addr_list); |
3987 | |
3988 | memcpy(&err_node->err_info.mcm_info, mcm_info, sizeof(*mcm_info)); |
3989 | |
3990 | err_data->err_list_count++; |
3991 | list_add_tail(new: &err_node->node, head: &err_data->err_node_list); |
3992 | list_sort(NULL, head: &err_data->err_node_list, cmp: ras_err_info_cmp); |
3993 | |
3994 | return &err_node->err_info; |
3995 | } |
3996 | |
3997 | void amdgpu_ras_add_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *err_addr) |
3998 | { |
3999 | struct ras_err_addr *mca_err_addr; |
4000 | |
4001 | mca_err_addr = kzalloc(size: sizeof(*mca_err_addr), GFP_KERNEL); |
4002 | if (!mca_err_addr) |
4003 | return; |
4004 | |
4005 | INIT_LIST_HEAD(list: &mca_err_addr->node); |
4006 | |
4007 | mca_err_addr->err_status = err_addr->err_status; |
4008 | mca_err_addr->err_ipid = err_addr->err_ipid; |
4009 | mca_err_addr->err_addr = err_addr->err_addr; |
4010 | |
4011 | list_add_tail(new: &mca_err_addr->node, head: &err_info->err_addr_list); |
4012 | } |
4013 | |
4014 | void amdgpu_ras_del_mca_err_addr(struct ras_err_info *err_info, struct ras_err_addr *mca_err_addr) |
4015 | { |
4016 | list_del(entry: &mca_err_addr->node); |
4017 | kfree(objp: mca_err_addr); |
4018 | } |
4019 | |
4020 | int amdgpu_ras_error_statistic_ue_count(struct ras_err_data *err_data, |
4021 | struct amdgpu_smuio_mcm_config_info *mcm_info, |
4022 | struct ras_err_addr *err_addr, u64 count) |
4023 | { |
4024 | struct ras_err_info *err_info; |
4025 | |
4026 | if (!err_data || !mcm_info) |
4027 | return -EINVAL; |
4028 | |
4029 | if (!count) |
4030 | return 0; |
4031 | |
4032 | err_info = amdgpu_ras_error_get_info(err_data, mcm_info); |
4033 | if (!err_info) |
4034 | return -EINVAL; |
4035 | |
4036 | if (err_addr && err_addr->err_status) |
4037 | amdgpu_ras_add_mca_err_addr(err_info, err_addr); |
4038 | |
4039 | err_info->ue_count += count; |
4040 | err_data->ue_count += count; |
4041 | |
4042 | return 0; |
4043 | } |
4044 | |
4045 | int amdgpu_ras_error_statistic_ce_count(struct ras_err_data *err_data, |
4046 | struct amdgpu_smuio_mcm_config_info *mcm_info, |
4047 | struct ras_err_addr *err_addr, u64 count) |
4048 | { |
4049 | struct ras_err_info *err_info; |
4050 | |
4051 | if (!err_data || !mcm_info) |
4052 | return -EINVAL; |
4053 | |
4054 | if (!count) |
4055 | return 0; |
4056 | |
4057 | err_info = amdgpu_ras_error_get_info(err_data, mcm_info); |
4058 | if (!err_info) |
4059 | return -EINVAL; |
4060 | |
4061 | err_info->ce_count += count; |
4062 | err_data->ce_count += count; |
4063 | |
4064 | return 0; |
4065 | } |
4066 | |
4067 | int amdgpu_ras_error_statistic_de_count(struct ras_err_data *err_data, |
4068 | struct amdgpu_smuio_mcm_config_info *mcm_info, |
4069 | struct ras_err_addr *err_addr, u64 count) |
4070 | { |
4071 | struct ras_err_info *err_info; |
4072 | |
4073 | if (!err_data || !mcm_info) |
4074 | return -EINVAL; |
4075 | |
4076 | if (!count) |
4077 | return 0; |
4078 | |
4079 | err_info = amdgpu_ras_error_get_info(err_data, mcm_info); |
4080 | if (!err_info) |
4081 | return -EINVAL; |
4082 | |
4083 | if (err_addr && err_addr->err_status) |
4084 | amdgpu_ras_add_mca_err_addr(err_info, err_addr); |
4085 | |
4086 | err_info->de_count += count; |
4087 | err_data->de_count += count; |
4088 | |
4089 | return 0; |
4090 | } |
4091 | |
4092 | #define mmMP0_SMN_C2PMSG_92 0x1609C |
4093 | #define mmMP0_SMN_C2PMSG_126 0x160BE |
4094 | static void amdgpu_ras_boot_time_error_reporting(struct amdgpu_device *adev, |
4095 | u32 instance, u32 boot_error) |
4096 | { |
4097 | u32 socket_id, aid_id, hbm_id; |
4098 | u32 reg_data; |
4099 | u64 reg_addr; |
4100 | |
4101 | socket_id = AMDGPU_RAS_GPU_ERR_SOCKET_ID(boot_error); |
4102 | aid_id = AMDGPU_RAS_GPU_ERR_AID_ID(boot_error); |
4103 | hbm_id = AMDGPU_RAS_GPU_ERR_HBM_ID(boot_error); |
4104 | |
4105 | /* The pattern for smn addressing in other SOC could be different from |
4106 | * the one for aqua_vanjaram. We should revisit the code if the pattern |
4107 | * is changed. In such case, replace the aqua_vanjaram implementation |
4108 | * with more common helper */ |
4109 | reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) + |
4110 | aqua_vanjaram_encode_ext_smn_addressing(ext_id: instance); |
4111 | |
4112 | reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr); |
4113 | dev_err(adev->dev, "socket: %d, aid: %d, firmware boot failed, fw status is 0x%x\n" , |
4114 | socket_id, aid_id, reg_data); |
4115 | |
4116 | if (AMDGPU_RAS_GPU_ERR_MEM_TRAINING(boot_error)) |
4117 | dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, memory training failed\n" , |
4118 | socket_id, aid_id, hbm_id); |
4119 | |
4120 | if (AMDGPU_RAS_GPU_ERR_FW_LOAD(boot_error)) |
4121 | dev_info(adev->dev, "socket: %d, aid: %d, firmware load failed at boot time\n" , |
4122 | socket_id, aid_id); |
4123 | |
4124 | if (AMDGPU_RAS_GPU_ERR_WAFL_LINK_TRAINING(boot_error)) |
4125 | dev_info(adev->dev, "socket: %d, aid: %d, wafl link training failed\n" , |
4126 | socket_id, aid_id); |
4127 | |
4128 | if (AMDGPU_RAS_GPU_ERR_XGMI_LINK_TRAINING(boot_error)) |
4129 | dev_info(adev->dev, "socket: %d, aid: %d, xgmi link training failed\n" , |
4130 | socket_id, aid_id); |
4131 | |
4132 | if (AMDGPU_RAS_GPU_ERR_USR_CP_LINK_TRAINING(boot_error)) |
4133 | dev_info(adev->dev, "socket: %d, aid: %d, usr cp link training failed\n" , |
4134 | socket_id, aid_id); |
4135 | |
4136 | if (AMDGPU_RAS_GPU_ERR_USR_DP_LINK_TRAINING(boot_error)) |
4137 | dev_info(adev->dev, "socket: %d, aid: %d, usr dp link training failed\n" , |
4138 | socket_id, aid_id); |
4139 | |
4140 | if (AMDGPU_RAS_GPU_ERR_HBM_MEM_TEST(boot_error)) |
4141 | dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm memory test failed\n" , |
4142 | socket_id, aid_id, hbm_id); |
4143 | |
4144 | if (AMDGPU_RAS_GPU_ERR_HBM_BIST_TEST(boot_error)) |
4145 | dev_info(adev->dev, "socket: %d, aid: %d, hbm: %d, hbm bist test failed\n" , |
4146 | socket_id, aid_id, hbm_id); |
4147 | } |
4148 | |
4149 | static int amdgpu_ras_wait_for_boot_complete(struct amdgpu_device *adev, |
4150 | u32 instance, u32 *boot_error) |
4151 | { |
4152 | u32 reg_addr; |
4153 | u32 reg_data; |
4154 | int retry_loop; |
4155 | |
4156 | reg_addr = (mmMP0_SMN_C2PMSG_92 << 2) + |
4157 | aqua_vanjaram_encode_ext_smn_addressing(ext_id: instance); |
4158 | |
4159 | for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) { |
4160 | reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr); |
4161 | if ((reg_data & AMDGPU_RAS_BOOT_STATUS_MASK) == AMDGPU_RAS_BOOT_STEADY_STATUS) { |
4162 | *boot_error = AMDGPU_RAS_BOOT_SUCEESS; |
4163 | return 0; |
4164 | } |
4165 | msleep(msecs: 1); |
4166 | } |
4167 | |
4168 | /* The pattern for smn addressing in other SOC could be different from |
4169 | * the one for aqua_vanjaram. We should revisit the code if the pattern |
4170 | * is changed. In such case, replace the aqua_vanjaram implementation |
4171 | * with more common helper */ |
4172 | reg_addr = (mmMP0_SMN_C2PMSG_126 << 2) + |
4173 | aqua_vanjaram_encode_ext_smn_addressing(ext_id: instance); |
4174 | |
4175 | for (retry_loop = 0; retry_loop < AMDGPU_RAS_BOOT_STATUS_POLLING_LIMIT; retry_loop++) { |
4176 | reg_data = amdgpu_device_indirect_rreg_ext(adev, reg_addr); |
4177 | if (AMDGPU_RAS_GPU_ERR_BOOT_STATUS(reg_data)) { |
4178 | *boot_error = reg_data; |
4179 | return 0; |
4180 | } |
4181 | msleep(msecs: 1); |
4182 | } |
4183 | |
4184 | *boot_error = reg_data; |
4185 | return -ETIME; |
4186 | } |
4187 | |
4188 | void amdgpu_ras_query_boot_status(struct amdgpu_device *adev, u32 num_instances) |
4189 | { |
4190 | u32 boot_error = 0; |
4191 | u32 i; |
4192 | |
4193 | for (i = 0; i < num_instances; i++) { |
4194 | if (amdgpu_ras_wait_for_boot_complete(adev, instance: i, boot_error: &boot_error)) |
4195 | amdgpu_ras_boot_time_error_reporting(adev, instance: i, boot_error); |
4196 | } |
4197 | } |
4198 | |