1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
2 | /* |
3 | * Copyright 2016-2022 Advanced Micro Devices, Inc. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | */ |
23 | |
24 | #include "kfd_priv.h" |
25 | #include "kfd_events.h" |
26 | #include "kfd_debug.h" |
27 | #include "soc15_int.h" |
28 | #include "kfd_device_queue_manager.h" |
29 | #include "kfd_smi_events.h" |
30 | |
31 | /* |
32 | * GFX9 SQ Interrupts |
33 | * |
34 | * There are 3 encoding types of interrupts sourced from SQ sent as a 44-bit |
35 | * packet to the Interrupt Handler: |
36 | * Auto - Generated by the SQG (various cmd overflows, timestamps etc) |
37 | * Wave - Generated by S_SENDMSG through a shader program |
38 | * Error - HW generated errors (Illegal instructions, Memviols, EDC etc) |
39 | * |
40 | * The 44-bit packet is mapped as {context_id1[7:0],context_id0[31:0]} plus |
41 | * 4-bits for VMID (SOC15_VMID_FROM_IH_ENTRY) as such: |
42 | * |
43 | * - context_id0[27:26] |
44 | * Encoding type (0 = Auto, 1 = Wave, 2 = Error) |
45 | * |
46 | * - context_id0[13] |
47 | * PRIV bit indicates that Wave S_SEND or error occurred within trap |
48 | * |
49 | * - {context_id1[7:0],context_id0[31:28],context_id0[11:0]} |
50 | * 24-bit data with the following layout per encoding type: |
51 | * Auto - only context_id0[8:0] is used, which reports various interrupts |
52 | * generated by SQG. The rest is 0. |
53 | * Wave - user data sent from m0 via S_SENDMSG |
54 | * Error - Error type (context_id1[7:4]), Error Details (rest of bits) |
55 | * |
56 | * The other context_id bits show coordinates (SE/SH/CU/SIMD/WAVE) for wave |
57 | * S_SENDMSG and Errors. These are 0 for Auto. |
58 | */ |
59 | |
60 | enum SQ_INTERRUPT_WORD_ENCODING { |
61 | SQ_INTERRUPT_WORD_ENCODING_AUTO = 0x0, |
62 | SQ_INTERRUPT_WORD_ENCODING_INST, |
63 | SQ_INTERRUPT_WORD_ENCODING_ERROR, |
64 | }; |
65 | |
66 | enum SQ_INTERRUPT_ERROR_TYPE { |
67 | SQ_INTERRUPT_ERROR_TYPE_EDC_FUE = 0x0, |
68 | SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST, |
69 | SQ_INTERRUPT_ERROR_TYPE_MEMVIOL, |
70 | SQ_INTERRUPT_ERROR_TYPE_EDC_FED, |
71 | }; |
72 | |
73 | /* SQ_INTERRUPT_WORD_AUTO_CTXID */ |
74 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE__SHIFT 0 |
75 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT__SHIFT 1 |
76 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL__SHIFT 2 |
77 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP__SHIFT 3 |
78 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP__SHIFT 4 |
79 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW__SHIFT 5 |
80 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW__SHIFT 6 |
81 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW__SHIFT 7 |
82 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR__SHIFT 8 |
83 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID__SHIFT 24 |
84 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING__SHIFT 26 |
85 | |
86 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_MASK 0x00000001 |
87 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__WLT_MASK 0x00000002 |
88 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_BUF_FULL_MASK 0x00000004 |
89 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__REG_TIMESTAMP_MASK 0x00000008 |
90 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__CMD_TIMESTAMP_MASK 0x00000010 |
91 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_CMD_OVERFLOW_MASK 0x00000020 |
92 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__HOST_REG_OVERFLOW_MASK 0x00000040 |
93 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__IMMED_OVERFLOW_MASK 0x00000080 |
94 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__THREAD_TRACE_UTC_ERROR_MASK 0x00000100 |
95 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__SE_ID_MASK 0x03000000 |
96 | #define SQ_INTERRUPT_WORD_AUTO_CTXID__ENCODING_MASK 0x0c000000 |
97 | |
98 | /* SQ_INTERRUPT_WORD_WAVE_CTXID */ |
99 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA__SHIFT 0 |
100 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID__SHIFT 12 |
101 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV__SHIFT 13 |
102 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID__SHIFT 14 |
103 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID__SHIFT 18 |
104 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID__SHIFT 20 |
105 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID__SHIFT 24 |
106 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING__SHIFT 26 |
107 | |
108 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__DATA_MASK 0x00000fff |
109 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__SH_ID_MASK 0x00001000 |
110 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK 0x00002000 |
111 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__WAVE_ID_MASK 0x0003c000 |
112 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__SIMD_ID_MASK 0x000c0000 |
113 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__CU_ID_MASK 0x00f00000 |
114 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__SE_ID_MASK 0x03000000 |
115 | #define SQ_INTERRUPT_WORD_WAVE_CTXID__ENCODING_MASK 0x0c000000 |
116 | |
117 | /* GFX9 SQ interrupt 24-bit data from context_id<0,1> */ |
118 | #define KFD_CONTEXT_ID_GET_SQ_INT_DATA(ctx0, ctx1) \ |
119 | ((ctx0 & 0xfff) | ((ctx0 >> 16) & 0xf000) | ((ctx1 << 16) & 0xff0000)) |
120 | |
121 | #define KFD_SQ_INT_DATA__ERR_TYPE_MASK 0xF00000 |
122 | #define KFD_SQ_INT_DATA__ERR_TYPE__SHIFT 20 |
123 | |
124 | /* |
125 | * The debugger will send user data(m0) with PRIV=1 to indicate it requires |
126 | * notification from the KFD with the following queue id (DOORBELL_ID) and |
127 | * trap code (TRAP_CODE). |
128 | */ |
129 | #define KFD_INT_DATA_DEBUG_DOORBELL_MASK 0x0003ff |
130 | #define KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT 10 |
131 | #define KFD_INT_DATA_DEBUG_TRAP_CODE_MASK 0x07fc00 |
132 | #define KFD_DEBUG_DOORBELL_ID(sq_int_data) ((sq_int_data) & \ |
133 | KFD_INT_DATA_DEBUG_DOORBELL_MASK) |
134 | #define KFD_DEBUG_TRAP_CODE(sq_int_data) (((sq_int_data) & \ |
135 | KFD_INT_DATA_DEBUG_TRAP_CODE_MASK) \ |
136 | >> KFD_INT_DATA_DEBUG_TRAP_CODE_SHIFT) |
137 | #define KFD_DEBUG_CP_BAD_OP_ECODE_MASK 0x3fffc00 |
138 | #define KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT 10 |
139 | #define KFD_DEBUG_CP_BAD_OP_ECODE(ctxid0) (((ctxid0) & \ |
140 | KFD_DEBUG_CP_BAD_OP_ECODE_MASK) \ |
141 | >> KFD_DEBUG_CP_BAD_OP_ECODE_SHIFT) |
142 | |
143 | static void event_interrupt_poison_consumption_v9(struct kfd_node *dev, |
144 | uint16_t pasid, uint16_t client_id) |
145 | { |
146 | enum amdgpu_ras_block block = 0; |
147 | int old_poison, ret = -EINVAL; |
148 | struct kfd_process *p = kfd_lookup_process_by_pasid(pasid); |
149 | |
150 | if (!p) |
151 | return; |
152 | |
153 | /* all queues of a process will be unmapped in one time */ |
154 | old_poison = atomic_cmpxchg(v: &p->poison, old: 0, new: 1); |
155 | kfd_unref_process(p); |
156 | if (old_poison) |
157 | return; |
158 | |
159 | switch (client_id) { |
160 | case SOC15_IH_CLIENTID_SE0SH: |
161 | case SOC15_IH_CLIENTID_SE1SH: |
162 | case SOC15_IH_CLIENTID_SE2SH: |
163 | case SOC15_IH_CLIENTID_SE3SH: |
164 | case SOC15_IH_CLIENTID_UTCL2: |
165 | ret = kfd_dqm_evict_pasid(dqm: dev->dqm, pasid); |
166 | block = AMDGPU_RAS_BLOCK__GFX; |
167 | break; |
168 | case SOC15_IH_CLIENTID_SDMA0: |
169 | case SOC15_IH_CLIENTID_SDMA1: |
170 | case SOC15_IH_CLIENTID_SDMA2: |
171 | case SOC15_IH_CLIENTID_SDMA3: |
172 | case SOC15_IH_CLIENTID_SDMA4: |
173 | block = AMDGPU_RAS_BLOCK__SDMA; |
174 | break; |
175 | default: |
176 | break; |
177 | } |
178 | |
179 | kfd_signal_poison_consumed_event(dev, pasid); |
180 | |
181 | /* resetting queue passes, do page retirement without gpu reset |
182 | * resetting queue fails, fallback to gpu reset solution |
183 | */ |
184 | if (!ret) { |
185 | dev_warn(dev->adev->dev, |
186 | "RAS poison consumption, unmap queue flow succeeded: client id %d\n" , |
187 | client_id); |
188 | amdgpu_amdkfd_ras_poison_consumption_handler(adev: dev->adev, block, reset: false); |
189 | } else { |
190 | dev_warn(dev->adev->dev, |
191 | "RAS poison consumption, fall back to gpu reset flow: client id %d\n" , |
192 | client_id); |
193 | amdgpu_amdkfd_ras_poison_consumption_handler(adev: dev->adev, block, reset: true); |
194 | } |
195 | } |
196 | |
197 | static bool context_id_expected(struct kfd_dev *dev) |
198 | { |
199 | switch (KFD_GC_VERSION(dev)) { |
200 | case IP_VERSION(9, 0, 1): |
201 | return dev->mec_fw_version >= 0x817a; |
202 | case IP_VERSION(9, 1, 0): |
203 | case IP_VERSION(9, 2, 1): |
204 | case IP_VERSION(9, 2, 2): |
205 | case IP_VERSION(9, 3, 0): |
206 | case IP_VERSION(9, 4, 0): |
207 | return dev->mec_fw_version >= 0x17a; |
208 | default: |
209 | /* Other GFXv9 and later GPUs always sent valid context IDs |
210 | * on legitimate events |
211 | */ |
212 | return KFD_GC_VERSION(dev) >= IP_VERSION(9, 4, 1); |
213 | } |
214 | } |
215 | |
216 | static bool event_interrupt_isr_v9(struct kfd_node *dev, |
217 | const uint32_t *ih_ring_entry, |
218 | uint32_t *patched_ihre, |
219 | bool *patched_flag) |
220 | { |
221 | uint16_t source_id, client_id, pasid, vmid; |
222 | const uint32_t *data = ih_ring_entry; |
223 | |
224 | source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); |
225 | client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); |
226 | |
227 | /* Only handle interrupts from KFD VMIDs */ |
228 | vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); |
229 | if (!KFD_IRQ_IS_FENCE(client_id, source_id) && |
230 | (vmid < dev->vm_info.first_vmid_kfd || |
231 | vmid > dev->vm_info.last_vmid_kfd)) |
232 | return false; |
233 | |
234 | pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); |
235 | |
236 | /* Only handle clients we care about */ |
237 | if (client_id != SOC15_IH_CLIENTID_GRBM_CP && |
238 | client_id != SOC15_IH_CLIENTID_SDMA0 && |
239 | client_id != SOC15_IH_CLIENTID_SDMA1 && |
240 | client_id != SOC15_IH_CLIENTID_SDMA2 && |
241 | client_id != SOC15_IH_CLIENTID_SDMA3 && |
242 | client_id != SOC15_IH_CLIENTID_SDMA4 && |
243 | client_id != SOC15_IH_CLIENTID_SDMA5 && |
244 | client_id != SOC15_IH_CLIENTID_SDMA6 && |
245 | client_id != SOC15_IH_CLIENTID_SDMA7 && |
246 | client_id != SOC15_IH_CLIENTID_VMC && |
247 | client_id != SOC15_IH_CLIENTID_VMC1 && |
248 | client_id != SOC15_IH_CLIENTID_UTCL2 && |
249 | client_id != SOC15_IH_CLIENTID_SE0SH && |
250 | client_id != SOC15_IH_CLIENTID_SE1SH && |
251 | client_id != SOC15_IH_CLIENTID_SE2SH && |
252 | client_id != SOC15_IH_CLIENTID_SE3SH && |
253 | !KFD_IRQ_IS_FENCE(client_id, source_id)) |
254 | return false; |
255 | |
256 | /* This is a known issue for gfx9. Under non HWS, pasid is not set |
257 | * in the interrupt payload, so we need to find out the pasid on our |
258 | * own. |
259 | */ |
260 | if (!pasid && dev->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) { |
261 | const uint32_t pasid_mask = 0xffff; |
262 | |
263 | *patched_flag = true; |
264 | memcpy(patched_ihre, ih_ring_entry, |
265 | dev->kfd->device_info.ih_ring_entry_size); |
266 | |
267 | pasid = dev->dqm->vmid_pasid[vmid]; |
268 | |
269 | /* Patch the pasid field */ |
270 | patched_ihre[3] = cpu_to_le32((le32_to_cpu(patched_ihre[3]) |
271 | & ~pasid_mask) | pasid); |
272 | } |
273 | |
274 | pr_debug("client id 0x%x, source id %d, vmid %d, pasid 0x%x. raw data:\n" , |
275 | client_id, source_id, vmid, pasid); |
276 | pr_debug("%8X, %8X, %8X, %8X, %8X, %8X, %8X, %8X.\n" , |
277 | data[0], data[1], data[2], data[3], |
278 | data[4], data[5], data[6], data[7]); |
279 | |
280 | /* If there is no valid PASID, it's likely a bug */ |
281 | if (WARN_ONCE(pasid == 0, "Bug: No PASID in KFD interrupt" )) |
282 | return false; |
283 | |
284 | /* Workaround CP firmware sending bogus signals with 0 context_id. |
285 | * Those can be safely ignored on hardware and firmware versions that |
286 | * include a valid context_id on legitimate signals. This avoids the |
287 | * slow path in kfd_signal_event_interrupt that scans all event slots |
288 | * for signaled events. |
289 | */ |
290 | if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) { |
291 | uint32_t context_id = |
292 | SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry); |
293 | |
294 | if (context_id == 0 && context_id_expected(dev: dev->kfd)) |
295 | return false; |
296 | } |
297 | |
298 | /* Interrupt types we care about: various signals and faults. |
299 | * They will be forwarded to a work queue (see below). |
300 | */ |
301 | return source_id == SOC15_INTSRC_CP_END_OF_PIPE || |
302 | source_id == SOC15_INTSRC_SDMA_TRAP || |
303 | source_id == SOC15_INTSRC_SDMA_ECC || |
304 | source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG || |
305 | source_id == SOC15_INTSRC_CP_BAD_OPCODE || |
306 | KFD_IRQ_IS_FENCE(client_id, source_id) || |
307 | ((client_id == SOC15_IH_CLIENTID_VMC || |
308 | client_id == SOC15_IH_CLIENTID_VMC1 || |
309 | client_id == SOC15_IH_CLIENTID_UTCL2) && |
310 | !amdgpu_no_queue_eviction_on_vm_fault); |
311 | } |
312 | |
313 | static void event_interrupt_wq_v9(struct kfd_node *dev, |
314 | const uint32_t *ih_ring_entry) |
315 | { |
316 | uint16_t source_id, client_id, pasid, vmid; |
317 | uint32_t context_id0, context_id1; |
318 | uint32_t sq_intr_err, sq_int_data, encoding; |
319 | |
320 | source_id = SOC15_SOURCE_ID_FROM_IH_ENTRY(ih_ring_entry); |
321 | client_id = SOC15_CLIENT_ID_FROM_IH_ENTRY(ih_ring_entry); |
322 | pasid = SOC15_PASID_FROM_IH_ENTRY(ih_ring_entry); |
323 | vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); |
324 | context_id0 = SOC15_CONTEXT_ID0_FROM_IH_ENTRY(ih_ring_entry); |
325 | context_id1 = SOC15_CONTEXT_ID1_FROM_IH_ENTRY(ih_ring_entry); |
326 | |
327 | if (client_id == SOC15_IH_CLIENTID_GRBM_CP || |
328 | client_id == SOC15_IH_CLIENTID_SE0SH || |
329 | client_id == SOC15_IH_CLIENTID_SE1SH || |
330 | client_id == SOC15_IH_CLIENTID_SE2SH || |
331 | client_id == SOC15_IH_CLIENTID_SE3SH) { |
332 | if (source_id == SOC15_INTSRC_CP_END_OF_PIPE) |
333 | kfd_signal_event_interrupt(pasid, partial_id: context_id0, valid_id_bits: 32); |
334 | else if (source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG) { |
335 | sq_int_data = KFD_CONTEXT_ID_GET_SQ_INT_DATA(context_id0, context_id1); |
336 | encoding = REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, ENCODING); |
337 | switch (encoding) { |
338 | case SQ_INTERRUPT_WORD_ENCODING_AUTO: |
339 | pr_debug_ratelimited( |
340 | "sq_intr: auto, se %d, ttrace %d, wlt %d, ttrac_buf_full %d, reg_tms %d, cmd_tms %d, host_cmd_ovf %d, host_reg_ovf %d, immed_ovf %d, ttrace_utc_err %d\n" , |
341 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, SE_ID), |
342 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE), |
343 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, WLT), |
344 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_BUF_FULL), |
345 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, REG_TIMESTAMP), |
346 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, CMD_TIMESTAMP), |
347 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_CMD_OVERFLOW), |
348 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, HOST_REG_OVERFLOW), |
349 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, IMMED_OVERFLOW), |
350 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_AUTO_CTXID, THREAD_TRACE_UTC_ERROR)); |
351 | break; |
352 | case SQ_INTERRUPT_WORD_ENCODING_INST: |
353 | pr_debug_ratelimited("sq_intr: inst, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, intr_data 0x%x\n" , |
354 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID), |
355 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA), |
356 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID), |
357 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV), |
358 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID), |
359 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID), |
360 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID), |
361 | sq_int_data); |
362 | if (context_id0 & SQ_INTERRUPT_WORD_WAVE_CTXID__PRIV_MASK) { |
363 | if (kfd_set_dbg_ev_from_interrupt(dev, pasid, |
364 | KFD_DEBUG_DOORBELL_ID(sq_int_data), |
365 | KFD_DEBUG_TRAP_CODE(sq_int_data), |
366 | NULL, exception_data_size: 0)) |
367 | return; |
368 | } |
369 | break; |
370 | case SQ_INTERRUPT_WORD_ENCODING_ERROR: |
371 | sq_intr_err = REG_GET_FIELD(sq_int_data, KFD_SQ_INT_DATA, ERR_TYPE); |
372 | pr_warn_ratelimited("sq_intr: error, se %d, data 0x%x, sh %d, priv %d, wave_id %d, simd_id %d, cu_id %d, err_type %d\n" , |
373 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SE_ID), |
374 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, DATA), |
375 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SH_ID), |
376 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, PRIV), |
377 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, WAVE_ID), |
378 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, SIMD_ID), |
379 | REG_GET_FIELD(context_id0, SQ_INTERRUPT_WORD_WAVE_CTXID, CU_ID), |
380 | sq_intr_err); |
381 | if (sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_ILLEGAL_INST && |
382 | sq_intr_err != SQ_INTERRUPT_ERROR_TYPE_MEMVIOL) { |
383 | event_interrupt_poison_consumption_v9(dev, pasid, client_id); |
384 | return; |
385 | } |
386 | break; |
387 | default: |
388 | break; |
389 | } |
390 | kfd_signal_event_interrupt(pasid, partial_id: sq_int_data, valid_id_bits: 24); |
391 | } else if (source_id == SOC15_INTSRC_CP_BAD_OPCODE && |
392 | KFD_DBG_EC_TYPE_IS_PACKET(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0))) { |
393 | kfd_set_dbg_ev_from_interrupt(dev, pasid, |
394 | KFD_DEBUG_DOORBELL_ID(context_id0), |
395 | KFD_EC_MASK(KFD_DEBUG_CP_BAD_OP_ECODE(context_id0)), |
396 | NULL, exception_data_size: 0); |
397 | } |
398 | } else if (client_id == SOC15_IH_CLIENTID_SDMA0 || |
399 | client_id == SOC15_IH_CLIENTID_SDMA1 || |
400 | client_id == SOC15_IH_CLIENTID_SDMA2 || |
401 | client_id == SOC15_IH_CLIENTID_SDMA3 || |
402 | client_id == SOC15_IH_CLIENTID_SDMA4 || |
403 | client_id == SOC15_IH_CLIENTID_SDMA5 || |
404 | client_id == SOC15_IH_CLIENTID_SDMA6 || |
405 | client_id == SOC15_IH_CLIENTID_SDMA7) { |
406 | if (source_id == SOC15_INTSRC_SDMA_TRAP) { |
407 | kfd_signal_event_interrupt(pasid, partial_id: context_id0 & 0xfffffff, valid_id_bits: 28); |
408 | } else if (source_id == SOC15_INTSRC_SDMA_ECC) { |
409 | event_interrupt_poison_consumption_v9(dev, pasid, client_id); |
410 | return; |
411 | } |
412 | } else if (client_id == SOC15_IH_CLIENTID_VMC || |
413 | client_id == SOC15_IH_CLIENTID_VMC1 || |
414 | client_id == SOC15_IH_CLIENTID_UTCL2) { |
415 | struct kfd_vm_fault_info info = {0}; |
416 | uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry); |
417 | struct kfd_hsa_memory_exception_data exception_data; |
418 | |
419 | if (client_id == SOC15_IH_CLIENTID_UTCL2 && |
420 | amdgpu_amdkfd_ras_query_utcl2_poison_status(adev: dev->adev)) { |
421 | event_interrupt_poison_consumption_v9(dev, pasid, client_id); |
422 | return; |
423 | } |
424 | |
425 | info.vmid = vmid; |
426 | info.mc_id = client_id; |
427 | info.page_addr = ih_ring_entry[4] | |
428 | (uint64_t)(ih_ring_entry[5] & 0xf) << 32; |
429 | info.prot_valid = ring_id & 0x08; |
430 | info.prot_read = ring_id & 0x10; |
431 | info.prot_write = ring_id & 0x20; |
432 | |
433 | memset(&exception_data, 0, sizeof(exception_data)); |
434 | exception_data.gpu_id = dev->id; |
435 | exception_data.va = (info.page_addr) << PAGE_SHIFT; |
436 | exception_data.failure.NotPresent = info.prot_valid ? 1 : 0; |
437 | exception_data.failure.NoExecute = info.prot_exec ? 1 : 0; |
438 | exception_data.failure.ReadOnly = info.prot_write ? 1 : 0; |
439 | exception_data.failure.imprecise = 0; |
440 | |
441 | kfd_set_dbg_ev_from_interrupt(dev, |
442 | pasid, |
443 | doorbell_id: -1, |
444 | KFD_EC_MASK(EC_DEVICE_MEMORY_VIOLATION), |
445 | exception_data: &exception_data, |
446 | exception_data_size: sizeof(exception_data)); |
447 | kfd_smi_event_update_vmfault(dev, pasid); |
448 | } else if (KFD_IRQ_IS_FENCE(client_id, source_id)) { |
449 | kfd_process_close_interrupt_drain(pasid); |
450 | } |
451 | } |
452 | |
453 | static bool event_interrupt_isr_v9_4_3(struct kfd_node *node, |
454 | const uint32_t *ih_ring_entry, |
455 | uint32_t *patched_ihre, |
456 | bool *patched_flag) |
457 | { |
458 | uint16_t node_id, vmid; |
459 | |
460 | /* |
461 | * For GFX 9.4.3, process the interrupt if: |
462 | * - NodeID field in IH entry matches the corresponding bit |
463 | * set in interrupt_bitmap Bits 0-15. |
464 | * OR |
465 | * - If partition mode is CPX and interrupt came from |
466 | * Node_id 0,4,8,12, then check if the Bit (16 + client id) |
467 | * is set in interrupt bitmap Bits 16-31. |
468 | */ |
469 | node_id = SOC15_NODEID_FROM_IH_ENTRY(ih_ring_entry); |
470 | vmid = SOC15_VMID_FROM_IH_ENTRY(ih_ring_entry); |
471 | if (kfd_irq_is_from_node(node, node_id, vmid)) |
472 | return event_interrupt_isr_v9(dev: node, ih_ring_entry, |
473 | patched_ihre, patched_flag); |
474 | return false; |
475 | } |
476 | |
477 | const struct kfd_event_interrupt_class event_interrupt_class_v9 = { |
478 | .interrupt_isr = event_interrupt_isr_v9, |
479 | .interrupt_wq = event_interrupt_wq_v9, |
480 | }; |
481 | |
482 | const struct kfd_event_interrupt_class event_interrupt_class_v9_4_3 = { |
483 | .interrupt_isr = event_interrupt_isr_v9_4_3, |
484 | .interrupt_wq = event_interrupt_wq_v9, |
485 | }; |
486 | |