1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright 2014 IBM Corp. |
4 | */ |
5 | |
6 | #include <linux/spinlock.h> |
7 | #include <linux/sched.h> |
8 | #include <linux/sched/clock.h> |
9 | #include <linux/slab.h> |
10 | #include <linux/mutex.h> |
11 | #include <linux/mm.h> |
12 | #include <linux/uaccess.h> |
13 | #include <linux/delay.h> |
14 | #include <linux/irqdomain.h> |
15 | #include <asm/synch.h> |
16 | #include <asm/switch_to.h> |
17 | #include <misc/cxl-base.h> |
18 | |
19 | #include "cxl.h" |
20 | #include "trace.h" |
21 | |
22 | static int afu_control(struct cxl_afu *afu, u64 command, u64 clear, |
23 | u64 result, u64 mask, bool enabled) |
24 | { |
25 | u64 AFU_Cntl; |
26 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); |
27 | int rc = 0; |
28 | |
29 | spin_lock(lock: &afu->afu_cntl_lock); |
30 | pr_devel("AFU command starting: %llx\n" , command); |
31 | |
32 | trace_cxl_afu_ctrl(afu, cmd: command); |
33 | |
34 | AFU_Cntl = cxl_p2n_read(afu, reg: CXL_AFU_Cntl_An); |
35 | cxl_p2n_write(afu, reg: CXL_AFU_Cntl_An, val: (AFU_Cntl & ~clear) | command); |
36 | |
37 | AFU_Cntl = cxl_p2n_read(afu, reg: CXL_AFU_Cntl_An); |
38 | while ((AFU_Cntl & mask) != result) { |
39 | if (time_after_eq(jiffies, timeout)) { |
40 | dev_warn(&afu->dev, "WARNING: AFU control timed out!\n" ); |
41 | rc = -EBUSY; |
42 | goto out; |
43 | } |
44 | |
45 | if (!cxl_ops->link_ok(afu->adapter, afu)) { |
46 | afu->enabled = enabled; |
47 | rc = -EIO; |
48 | goto out; |
49 | } |
50 | |
51 | pr_devel_ratelimited("AFU control... (0x%016llx)\n" , |
52 | AFU_Cntl | command); |
53 | cpu_relax(); |
54 | AFU_Cntl = cxl_p2n_read(afu, reg: CXL_AFU_Cntl_An); |
55 | } |
56 | |
57 | if (AFU_Cntl & CXL_AFU_Cntl_An_RA) { |
58 | /* |
59 | * Workaround for a bug in the XSL used in the Mellanox CX4 |
60 | * that fails to clear the RA bit after an AFU reset, |
61 | * preventing subsequent AFU resets from working. |
62 | */ |
63 | cxl_p2n_write(afu, reg: CXL_AFU_Cntl_An, val: AFU_Cntl & ~CXL_AFU_Cntl_An_RA); |
64 | } |
65 | |
66 | pr_devel("AFU command complete: %llx\n" , command); |
67 | afu->enabled = enabled; |
68 | out: |
69 | trace_cxl_afu_ctrl_done(afu, cmd: command, rc); |
70 | spin_unlock(lock: &afu->afu_cntl_lock); |
71 | |
72 | return rc; |
73 | } |
74 | |
75 | static int afu_enable(struct cxl_afu *afu) |
76 | { |
77 | pr_devel("AFU enable request\n" ); |
78 | |
79 | return afu_control(afu, CXL_AFU_Cntl_An_E, clear: 0, |
80 | CXL_AFU_Cntl_An_ES_Enabled, |
81 | CXL_AFU_Cntl_An_ES_MASK, enabled: true); |
82 | } |
83 | |
84 | int cxl_afu_disable(struct cxl_afu *afu) |
85 | { |
86 | pr_devel("AFU disable request\n" ); |
87 | |
88 | return afu_control(afu, command: 0, CXL_AFU_Cntl_An_E, |
89 | CXL_AFU_Cntl_An_ES_Disabled, |
90 | CXL_AFU_Cntl_An_ES_MASK, enabled: false); |
91 | } |
92 | |
93 | /* This will disable as well as reset */ |
94 | static int native_afu_reset(struct cxl_afu *afu) |
95 | { |
96 | int rc; |
97 | u64 serr; |
98 | |
99 | pr_devel("AFU reset request\n" ); |
100 | |
101 | rc = afu_control(afu, CXL_AFU_Cntl_An_RA, clear: 0, |
102 | CXL_AFU_Cntl_An_RS_Complete | CXL_AFU_Cntl_An_ES_Disabled, |
103 | CXL_AFU_Cntl_An_RS_MASK | CXL_AFU_Cntl_An_ES_MASK, |
104 | enabled: false); |
105 | |
106 | /* |
107 | * Re-enable any masked interrupts when the AFU is not |
108 | * activated to avoid side effects after attaching a process |
109 | * in dedicated mode. |
110 | */ |
111 | if (afu->current_mode == 0) { |
112 | serr = cxl_p1n_read(afu, reg: CXL_PSL_SERR_An); |
113 | serr &= ~CXL_PSL_SERR_An_IRQ_MASKS; |
114 | cxl_p1n_write(afu, reg: CXL_PSL_SERR_An, val: serr); |
115 | } |
116 | |
117 | return rc; |
118 | } |
119 | |
120 | static int native_afu_check_and_enable(struct cxl_afu *afu) |
121 | { |
122 | if (!cxl_ops->link_ok(afu->adapter, afu)) { |
123 | WARN(1, "Refusing to enable afu while link down!\n" ); |
124 | return -EIO; |
125 | } |
126 | if (afu->enabled) |
127 | return 0; |
128 | return afu_enable(afu); |
129 | } |
130 | |
131 | int cxl_psl_purge(struct cxl_afu *afu) |
132 | { |
133 | u64 PSL_CNTL = cxl_p1n_read(afu, reg: CXL_PSL_SCNTL_An); |
134 | u64 AFU_Cntl = cxl_p2n_read(afu, reg: CXL_AFU_Cntl_An); |
135 | u64 dsisr, dar; |
136 | u64 start, end; |
137 | u64 trans_fault = 0x0ULL; |
138 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); |
139 | int rc = 0; |
140 | |
141 | trace_cxl_psl_ctrl(afu, CXL_PSL_SCNTL_An_Pc); |
142 | |
143 | pr_devel("PSL purge request\n" ); |
144 | |
145 | if (cxl_is_power8()) |
146 | trans_fault = CXL_PSL_DSISR_TRANS; |
147 | if (cxl_is_power9()) |
148 | trans_fault = CXL_PSL9_DSISR_An_TF; |
149 | |
150 | if (!cxl_ops->link_ok(afu->adapter, afu)) { |
151 | dev_warn(&afu->dev, "PSL Purge called with link down, ignoring\n" ); |
152 | rc = -EIO; |
153 | goto out; |
154 | } |
155 | |
156 | if ((AFU_Cntl & CXL_AFU_Cntl_An_ES_MASK) != CXL_AFU_Cntl_An_ES_Disabled) { |
157 | WARN(1, "psl_purge request while AFU not disabled!\n" ); |
158 | cxl_afu_disable(afu); |
159 | } |
160 | |
161 | cxl_p1n_write(afu, reg: CXL_PSL_SCNTL_An, |
162 | val: PSL_CNTL | CXL_PSL_SCNTL_An_Pc); |
163 | start = local_clock(); |
164 | PSL_CNTL = cxl_p1n_read(afu, reg: CXL_PSL_SCNTL_An); |
165 | while ((PSL_CNTL & CXL_PSL_SCNTL_An_Ps_MASK) |
166 | == CXL_PSL_SCNTL_An_Ps_Pending) { |
167 | if (time_after_eq(jiffies, timeout)) { |
168 | dev_warn(&afu->dev, "WARNING: PSL Purge timed out!\n" ); |
169 | rc = -EBUSY; |
170 | goto out; |
171 | } |
172 | if (!cxl_ops->link_ok(afu->adapter, afu)) { |
173 | rc = -EIO; |
174 | goto out; |
175 | } |
176 | |
177 | dsisr = cxl_p2n_read(afu, reg: CXL_PSL_DSISR_An); |
178 | pr_devel_ratelimited("PSL purging... PSL_CNTL: 0x%016llx PSL_DSISR: 0x%016llx\n" , |
179 | PSL_CNTL, dsisr); |
180 | |
181 | if (dsisr & trans_fault) { |
182 | dar = cxl_p2n_read(afu, reg: CXL_PSL_DAR_An); |
183 | dev_notice(&afu->dev, "PSL purge terminating pending translation, DSISR: 0x%016llx, DAR: 0x%016llx\n" , |
184 | dsisr, dar); |
185 | cxl_p2n_write(afu, reg: CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); |
186 | } else if (dsisr) { |
187 | dev_notice(&afu->dev, "PSL purge acknowledging pending non-translation fault, DSISR: 0x%016llx\n" , |
188 | dsisr); |
189 | cxl_p2n_write(afu, reg: CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); |
190 | } else { |
191 | cpu_relax(); |
192 | } |
193 | PSL_CNTL = cxl_p1n_read(afu, reg: CXL_PSL_SCNTL_An); |
194 | } |
195 | end = local_clock(); |
196 | pr_devel("PSL purged in %lld ns\n" , end - start); |
197 | |
198 | cxl_p1n_write(afu, reg: CXL_PSL_SCNTL_An, |
199 | val: PSL_CNTL & ~CXL_PSL_SCNTL_An_Pc); |
200 | out: |
201 | trace_cxl_psl_ctrl_done(afu, CXL_PSL_SCNTL_An_Pc, rc); |
202 | return rc; |
203 | } |
204 | |
205 | static int spa_max_procs(int spa_size) |
206 | { |
207 | /* |
208 | * From the CAIA: |
209 | * end_of_SPA_area = SPA_Base + ((n+4) * 128) + (( ((n*8) + 127) >> 7) * 128) + 255 |
210 | * Most of that junk is really just an overly-complicated way of saying |
211 | * the last 256 bytes are __aligned(128), so it's really: |
212 | * end_of_SPA_area = end_of_PSL_queue_area + __aligned(128) 255 |
213 | * and |
214 | * end_of_PSL_queue_area = SPA_Base + ((n+4) * 128) + (n*8) - 1 |
215 | * so |
216 | * sizeof(SPA) = ((n+4) * 128) + (n*8) + __aligned(128) 256 |
217 | * Ignore the alignment (which is safe in this case as long as we are |
218 | * careful with our rounding) and solve for n: |
219 | */ |
220 | return ((spa_size / 8) - 96) / 17; |
221 | } |
222 | |
223 | static int cxl_alloc_spa(struct cxl_afu *afu, int mode) |
224 | { |
225 | unsigned spa_size; |
226 | |
227 | /* Work out how many pages to allocate */ |
228 | afu->native->spa_order = -1; |
229 | do { |
230 | afu->native->spa_order++; |
231 | spa_size = (1 << afu->native->spa_order) * PAGE_SIZE; |
232 | |
233 | if (spa_size > 0x100000) { |
234 | dev_warn(&afu->dev, "num_of_processes too large for the SPA, limiting to %i (0x%x)\n" , |
235 | afu->native->spa_max_procs, afu->native->spa_size); |
236 | if (mode != CXL_MODE_DEDICATED) |
237 | afu->num_procs = afu->native->spa_max_procs; |
238 | break; |
239 | } |
240 | |
241 | afu->native->spa_size = spa_size; |
242 | afu->native->spa_max_procs = spa_max_procs(spa_size: afu->native->spa_size); |
243 | } while (afu->native->spa_max_procs < afu->num_procs); |
244 | |
245 | if (!(afu->native->spa = (struct cxl_process_element *) |
246 | __get_free_pages(GFP_KERNEL | __GFP_ZERO, order: afu->native->spa_order))) { |
247 | pr_err("cxl_alloc_spa: Unable to allocate scheduled process area\n" ); |
248 | return -ENOMEM; |
249 | } |
250 | pr_devel("spa pages: %i afu->spa_max_procs: %i afu->num_procs: %i\n" , |
251 | 1<<afu->native->spa_order, afu->native->spa_max_procs, afu->num_procs); |
252 | |
253 | return 0; |
254 | } |
255 | |
256 | static void attach_spa(struct cxl_afu *afu) |
257 | { |
258 | u64 spap; |
259 | |
260 | afu->native->sw_command_status = (__be64 *)((char *)afu->native->spa + |
261 | ((afu->native->spa_max_procs + 3) * 128)); |
262 | |
263 | spap = virt_to_phys(address: afu->native->spa) & CXL_PSL_SPAP_Addr; |
264 | spap |= ((afu->native->spa_size >> (12 - CXL_PSL_SPAP_Size_Shift)) - 1) & CXL_PSL_SPAP_Size; |
265 | spap |= CXL_PSL_SPAP_V; |
266 | pr_devel("cxl: SPA allocated at 0x%p. Max processes: %i, sw_command_status: 0x%p CXL_PSL_SPAP_An=0x%016llx\n" , |
267 | afu->native->spa, afu->native->spa_max_procs, |
268 | afu->native->sw_command_status, spap); |
269 | cxl_p1n_write(afu, reg: CXL_PSL_SPAP_An, val: spap); |
270 | } |
271 | |
272 | void cxl_release_spa(struct cxl_afu *afu) |
273 | { |
274 | if (afu->native->spa) { |
275 | free_pages(addr: (unsigned long) afu->native->spa, |
276 | order: afu->native->spa_order); |
277 | afu->native->spa = NULL; |
278 | } |
279 | } |
280 | |
281 | /* |
282 | * Invalidation of all ERAT entries is no longer required by CAIA2. Use |
283 | * only for debug. |
284 | */ |
285 | int cxl_invalidate_all_psl9(struct cxl *adapter) |
286 | { |
287 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); |
288 | u64 ierat; |
289 | |
290 | pr_devel("CXL adapter - invalidation of all ERAT entries\n" ); |
291 | |
292 | /* Invalidates all ERAT entries for Radix or HPT */ |
293 | ierat = CXL_XSL9_IERAT_IALL; |
294 | if (radix_enabled()) |
295 | ierat |= CXL_XSL9_IERAT_INVR; |
296 | cxl_p1_write(cxl: adapter, reg: CXL_XSL9_IERAT, val: ierat); |
297 | |
298 | while (cxl_p1_read(cxl: adapter, reg: CXL_XSL9_IERAT) & CXL_XSL9_IERAT_IINPROG) { |
299 | if (time_after_eq(jiffies, timeout)) { |
300 | dev_warn(&adapter->dev, |
301 | "WARNING: CXL adapter invalidation of all ERAT entries timed out!\n" ); |
302 | return -EBUSY; |
303 | } |
304 | if (!cxl_ops->link_ok(adapter, NULL)) |
305 | return -EIO; |
306 | cpu_relax(); |
307 | } |
308 | return 0; |
309 | } |
310 | |
311 | int cxl_invalidate_all_psl8(struct cxl *adapter) |
312 | { |
313 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); |
314 | |
315 | pr_devel("CXL adapter wide TLBIA & SLBIA\n" ); |
316 | |
317 | cxl_p1_write(cxl: adapter, reg: CXL_PSL_AFUSEL, CXL_PSL_AFUSEL_A); |
318 | |
319 | cxl_p1_write(cxl: adapter, reg: CXL_PSL_TLBIA, CXL_TLB_SLB_IQ_ALL); |
320 | while (cxl_p1_read(cxl: adapter, reg: CXL_PSL_TLBIA) & CXL_TLB_SLB_P) { |
321 | if (time_after_eq(jiffies, timeout)) { |
322 | dev_warn(&adapter->dev, "WARNING: CXL adapter wide TLBIA timed out!\n" ); |
323 | return -EBUSY; |
324 | } |
325 | if (!cxl_ops->link_ok(adapter, NULL)) |
326 | return -EIO; |
327 | cpu_relax(); |
328 | } |
329 | |
330 | cxl_p1_write(cxl: adapter, reg: CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_ALL); |
331 | while (cxl_p1_read(cxl: adapter, reg: CXL_PSL_SLBIA) & CXL_TLB_SLB_P) { |
332 | if (time_after_eq(jiffies, timeout)) { |
333 | dev_warn(&adapter->dev, "WARNING: CXL adapter wide SLBIA timed out!\n" ); |
334 | return -EBUSY; |
335 | } |
336 | if (!cxl_ops->link_ok(adapter, NULL)) |
337 | return -EIO; |
338 | cpu_relax(); |
339 | } |
340 | return 0; |
341 | } |
342 | |
343 | int cxl_data_cache_flush(struct cxl *adapter) |
344 | { |
345 | u64 reg; |
346 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); |
347 | |
348 | /* |
349 | * Do a datacache flush only if datacache is available. |
350 | * In case of PSL9D datacache absent hence flush operation. |
351 | * would timeout. |
352 | */ |
353 | if (adapter->native->no_data_cache) { |
354 | pr_devel("No PSL data cache. Ignoring cache flush req.\n" ); |
355 | return 0; |
356 | } |
357 | |
358 | pr_devel("Flushing data cache\n" ); |
359 | reg = cxl_p1_read(cxl: adapter, reg: CXL_PSL_Control); |
360 | reg |= CXL_PSL_Control_Fr; |
361 | cxl_p1_write(cxl: adapter, reg: CXL_PSL_Control, val: reg); |
362 | |
363 | reg = cxl_p1_read(cxl: adapter, reg: CXL_PSL_Control); |
364 | while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) { |
365 | if (time_after_eq(jiffies, timeout)) { |
366 | dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n" ); |
367 | return -EBUSY; |
368 | } |
369 | |
370 | if (!cxl_ops->link_ok(adapter, NULL)) { |
371 | dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n" ); |
372 | return -EIO; |
373 | } |
374 | cpu_relax(); |
375 | reg = cxl_p1_read(cxl: adapter, reg: CXL_PSL_Control); |
376 | } |
377 | |
378 | reg &= ~CXL_PSL_Control_Fr; |
379 | cxl_p1_write(cxl: adapter, reg: CXL_PSL_Control, val: reg); |
380 | return 0; |
381 | } |
382 | |
383 | static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1) |
384 | { |
385 | int rc; |
386 | |
387 | /* 1. Disable SSTP by writing 0 to SSTP1[V] */ |
388 | cxl_p2n_write(afu, reg: CXL_SSTP1_An, val: 0); |
389 | |
390 | /* 2. Invalidate all SLB entries */ |
391 | if ((rc = cxl_afu_slbia(afu))) |
392 | return rc; |
393 | |
394 | /* 3. Set SSTP0_An */ |
395 | cxl_p2n_write(afu, reg: CXL_SSTP0_An, val: sstp0); |
396 | |
397 | /* 4. Set SSTP1_An */ |
398 | cxl_p2n_write(afu, reg: CXL_SSTP1_An, val: sstp1); |
399 | |
400 | return 0; |
401 | } |
402 | |
403 | /* Using per slice version may improve performance here. (ie. SLBIA_An) */ |
404 | static void slb_invalid(struct cxl_context *ctx) |
405 | { |
406 | struct cxl *adapter = ctx->afu->adapter; |
407 | u64 slbia; |
408 | |
409 | WARN_ON(!mutex_is_locked(&ctx->afu->native->spa_mutex)); |
410 | |
411 | cxl_p1_write(cxl: adapter, reg: CXL_PSL_LBISEL, |
412 | val: ((u64)be32_to_cpu(ctx->elem->common.pid) << 32) | |
413 | be32_to_cpu(ctx->elem->lpid)); |
414 | cxl_p1_write(cxl: adapter, reg: CXL_PSL_SLBIA, CXL_TLB_SLB_IQ_LPIDPID); |
415 | |
416 | while (1) { |
417 | if (!cxl_ops->link_ok(adapter, NULL)) |
418 | break; |
419 | slbia = cxl_p1_read(cxl: adapter, reg: CXL_PSL_SLBIA); |
420 | if (!(slbia & CXL_TLB_SLB_P)) |
421 | break; |
422 | cpu_relax(); |
423 | } |
424 | } |
425 | |
426 | static int do_process_element_cmd(struct cxl_context *ctx, |
427 | u64 cmd, u64 pe_state) |
428 | { |
429 | u64 state; |
430 | unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); |
431 | int rc = 0; |
432 | |
433 | trace_cxl_llcmd(ctx, cmd); |
434 | |
435 | WARN_ON(!ctx->afu->enabled); |
436 | |
437 | ctx->elem->software_state = cpu_to_be32(pe_state); |
438 | smp_wmb(); |
439 | *(ctx->afu->native->sw_command_status) = cpu_to_be64(cmd | 0 | ctx->pe); |
440 | smp_mb(); |
441 | cxl_p1n_write(afu: ctx->afu, reg: CXL_PSL_LLCMD_An, val: cmd | ctx->pe); |
442 | while (1) { |
443 | if (time_after_eq(jiffies, timeout)) { |
444 | dev_warn(&ctx->afu->dev, "WARNING: Process Element Command timed out!\n" ); |
445 | rc = -EBUSY; |
446 | goto out; |
447 | } |
448 | if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { |
449 | dev_warn(&ctx->afu->dev, "WARNING: Device link down, aborting Process Element Command!\n" ); |
450 | rc = -EIO; |
451 | goto out; |
452 | } |
453 | state = be64_to_cpup(p: ctx->afu->native->sw_command_status); |
454 | if (state == ~0ULL) { |
455 | pr_err("cxl: Error adding process element to AFU\n" ); |
456 | rc = -1; |
457 | goto out; |
458 | } |
459 | if ((state & (CXL_SPA_SW_CMD_MASK | CXL_SPA_SW_STATE_MASK | CXL_SPA_SW_LINK_MASK)) == |
460 | (cmd | (cmd >> 16) | ctx->pe)) |
461 | break; |
462 | /* |
463 | * The command won't finish in the PSL if there are |
464 | * outstanding DSIs. Hence we need to yield here in |
465 | * case there are outstanding DSIs that we need to |
466 | * service. Tuning possiblity: we could wait for a |
467 | * while before sched |
468 | */ |
469 | schedule(); |
470 | |
471 | } |
472 | out: |
473 | trace_cxl_llcmd_done(ctx, cmd, rc); |
474 | return rc; |
475 | } |
476 | |
477 | static int add_process_element(struct cxl_context *ctx) |
478 | { |
479 | int rc = 0; |
480 | |
481 | mutex_lock(&ctx->afu->native->spa_mutex); |
482 | pr_devel("%s Adding pe: %i started\n" , __func__, ctx->pe); |
483 | if (!(rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_ADD, CXL_PE_SOFTWARE_STATE_V))) |
484 | ctx->pe_inserted = true; |
485 | pr_devel("%s Adding pe: %i finished\n" , __func__, ctx->pe); |
486 | mutex_unlock(lock: &ctx->afu->native->spa_mutex); |
487 | return rc; |
488 | } |
489 | |
490 | static int terminate_process_element(struct cxl_context *ctx) |
491 | { |
492 | int rc = 0; |
493 | |
494 | /* fast path terminate if it's already invalid */ |
495 | if (!(ctx->elem->software_state & cpu_to_be32(CXL_PE_SOFTWARE_STATE_V))) |
496 | return rc; |
497 | |
498 | mutex_lock(&ctx->afu->native->spa_mutex); |
499 | pr_devel("%s Terminate pe: %i started\n" , __func__, ctx->pe); |
500 | /* We could be asked to terminate when the hw is down. That |
501 | * should always succeed: it's not running if the hw has gone |
502 | * away and is being reset. |
503 | */ |
504 | if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) |
505 | rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_TERMINATE, |
506 | CXL_PE_SOFTWARE_STATE_V | CXL_PE_SOFTWARE_STATE_T); |
507 | ctx->elem->software_state = 0; /* Remove Valid bit */ |
508 | pr_devel("%s Terminate pe: %i finished\n" , __func__, ctx->pe); |
509 | mutex_unlock(lock: &ctx->afu->native->spa_mutex); |
510 | return rc; |
511 | } |
512 | |
513 | static int remove_process_element(struct cxl_context *ctx) |
514 | { |
515 | int rc = 0; |
516 | |
517 | mutex_lock(&ctx->afu->native->spa_mutex); |
518 | pr_devel("%s Remove pe: %i started\n" , __func__, ctx->pe); |
519 | |
520 | /* We could be asked to remove when the hw is down. Again, if |
521 | * the hw is down, the PE is gone, so we succeed. |
522 | */ |
523 | if (cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) |
524 | rc = do_process_element_cmd(ctx, CXL_SPA_SW_CMD_REMOVE, pe_state: 0); |
525 | |
526 | if (!rc) |
527 | ctx->pe_inserted = false; |
528 | if (cxl_is_power8()) |
529 | slb_invalid(ctx); |
530 | pr_devel("%s Remove pe: %i finished\n" , __func__, ctx->pe); |
531 | mutex_unlock(lock: &ctx->afu->native->spa_mutex); |
532 | |
533 | return rc; |
534 | } |
535 | |
536 | void cxl_assign_psn_space(struct cxl_context *ctx) |
537 | { |
538 | if (!ctx->afu->pp_size || ctx->master) { |
539 | ctx->psn_phys = ctx->afu->psn_phys; |
540 | ctx->psn_size = ctx->afu->adapter->ps_size; |
541 | } else { |
542 | ctx->psn_phys = ctx->afu->psn_phys + |
543 | (ctx->afu->native->pp_offset + ctx->afu->pp_size * ctx->pe); |
544 | ctx->psn_size = ctx->afu->pp_size; |
545 | } |
546 | } |
547 | |
548 | static int activate_afu_directed(struct cxl_afu *afu) |
549 | { |
550 | int rc; |
551 | |
552 | dev_info(&afu->dev, "Activating AFU directed mode\n" ); |
553 | |
554 | afu->num_procs = afu->max_procs_virtualised; |
555 | if (afu->native->spa == NULL) { |
556 | if (cxl_alloc_spa(afu, CXL_MODE_DIRECTED)) |
557 | return -ENOMEM; |
558 | } |
559 | attach_spa(afu); |
560 | |
561 | cxl_p1n_write(afu, reg: CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_AFU); |
562 | if (cxl_is_power8()) |
563 | cxl_p1n_write(afu, reg: CXL_PSL_AMOR_An, val: 0xFFFFFFFFFFFFFFFFULL); |
564 | cxl_p1n_write(afu, reg: CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); |
565 | |
566 | afu->current_mode = CXL_MODE_DIRECTED; |
567 | |
568 | if ((rc = cxl_chardev_m_afu_add(afu))) |
569 | return rc; |
570 | |
571 | if ((rc = cxl_sysfs_afu_m_add(afu))) |
572 | goto err; |
573 | |
574 | if ((rc = cxl_chardev_s_afu_add(afu))) |
575 | goto err1; |
576 | |
577 | return 0; |
578 | err1: |
579 | cxl_sysfs_afu_m_remove(afu); |
580 | err: |
581 | cxl_chardev_afu_remove(afu); |
582 | return rc; |
583 | } |
584 | |
585 | #ifdef CONFIG_CPU_LITTLE_ENDIAN |
586 | #define set_endian(sr) ((sr) |= CXL_PSL_SR_An_LE) |
587 | #else |
588 | #define set_endian(sr) ((sr) &= ~(CXL_PSL_SR_An_LE)) |
589 | #endif |
590 | |
591 | u64 cxl_calculate_sr(bool master, bool kernel, bool real_mode, bool p9) |
592 | { |
593 | u64 sr = 0; |
594 | |
595 | set_endian(sr); |
596 | if (master) |
597 | sr |= CXL_PSL_SR_An_MP; |
598 | if (mfspr(SPRN_LPCR) & LPCR_TC) |
599 | sr |= CXL_PSL_SR_An_TC; |
600 | |
601 | if (kernel) { |
602 | if (!real_mode) |
603 | sr |= CXL_PSL_SR_An_R; |
604 | sr |= (mfmsr() & MSR_SF) | CXL_PSL_SR_An_HV; |
605 | } else { |
606 | sr |= CXL_PSL_SR_An_PR | CXL_PSL_SR_An_R; |
607 | if (radix_enabled()) |
608 | sr |= CXL_PSL_SR_An_HV; |
609 | else |
610 | sr &= ~(CXL_PSL_SR_An_HV); |
611 | if (!test_tsk_thread_flag(current, flag: TIF_32BIT)) |
612 | sr |= CXL_PSL_SR_An_SF; |
613 | } |
614 | if (p9) { |
615 | if (radix_enabled()) |
616 | sr |= CXL_PSL_SR_An_XLAT_ror; |
617 | else |
618 | sr |= CXL_PSL_SR_An_XLAT_hpt; |
619 | } |
620 | return sr; |
621 | } |
622 | |
623 | static u64 calculate_sr(struct cxl_context *ctx) |
624 | { |
625 | return cxl_calculate_sr(master: ctx->master, kernel: ctx->kernel, real_mode: false, |
626 | p9: cxl_is_power9()); |
627 | } |
628 | |
629 | static void update_ivtes_directed(struct cxl_context *ctx) |
630 | { |
631 | bool need_update = (ctx->status == STARTED); |
632 | int r; |
633 | |
634 | if (need_update) { |
635 | WARN_ON(terminate_process_element(ctx)); |
636 | WARN_ON(remove_process_element(ctx)); |
637 | } |
638 | |
639 | for (r = 0; r < CXL_IRQ_RANGES; r++) { |
640 | ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); |
641 | ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); |
642 | } |
643 | |
644 | /* |
645 | * Theoretically we could use the update llcmd, instead of a |
646 | * terminate/remove/add (or if an atomic update was required we could |
647 | * do a suspend/update/resume), however it seems there might be issues |
648 | * with the update llcmd on some cards (including those using an XSL on |
649 | * an ASIC) so for now it's safest to go with the commands that are |
650 | * known to work. In the future if we come across a situation where the |
651 | * card may be performing transactions using the same PE while we are |
652 | * doing this update we might need to revisit this. |
653 | */ |
654 | if (need_update) |
655 | WARN_ON(add_process_element(ctx)); |
656 | } |
657 | |
658 | static int process_element_entry_psl9(struct cxl_context *ctx, u64 wed, u64 amr) |
659 | { |
660 | u32 pid; |
661 | int rc; |
662 | |
663 | cxl_assign_psn_space(ctx); |
664 | |
665 | ctx->elem->ctxtime = 0; /* disable */ |
666 | ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); |
667 | ctx->elem->haurp = 0; /* disable */ |
668 | |
669 | if (ctx->kernel) |
670 | pid = 0; |
671 | else { |
672 | if (ctx->mm == NULL) { |
673 | pr_devel("%s: unable to get mm for pe=%d pid=%i\n" , |
674 | __func__, ctx->pe, pid_nr(ctx->pid)); |
675 | return -EINVAL; |
676 | } |
677 | pid = ctx->mm->context.id; |
678 | } |
679 | |
680 | /* Assign a unique TIDR (thread id) for the current thread */ |
681 | if (!(ctx->tidr) && (ctx->assign_tidr)) { |
682 | rc = set_thread_tidr(current); |
683 | if (rc) |
684 | return -ENODEV; |
685 | ctx->tidr = current->thread.tidr; |
686 | pr_devel("%s: current tidr: %d\n" , __func__, ctx->tidr); |
687 | } |
688 | |
689 | ctx->elem->common.tid = cpu_to_be32(ctx->tidr); |
690 | ctx->elem->common.pid = cpu_to_be32(pid); |
691 | |
692 | ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); |
693 | |
694 | ctx->elem->common.csrp = 0; /* disable */ |
695 | |
696 | cxl_prefault(ctx, wed); |
697 | |
698 | /* |
699 | * Ensure we have the multiplexed PSL interrupt set up to take faults |
700 | * for kernel contexts that may not have allocated any AFU IRQs at all: |
701 | */ |
702 | if (ctx->irqs.range[0] == 0) { |
703 | ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; |
704 | ctx->irqs.range[0] = 1; |
705 | } |
706 | |
707 | ctx->elem->common.amr = cpu_to_be64(amr); |
708 | ctx->elem->common.wed = cpu_to_be64(wed); |
709 | |
710 | return 0; |
711 | } |
712 | |
713 | int cxl_attach_afu_directed_psl9(struct cxl_context *ctx, u64 wed, u64 amr) |
714 | { |
715 | int result; |
716 | |
717 | /* fill the process element entry */ |
718 | result = process_element_entry_psl9(ctx, wed, amr); |
719 | if (result) |
720 | return result; |
721 | |
722 | update_ivtes_directed(ctx); |
723 | |
724 | /* first guy needs to enable */ |
725 | result = cxl_ops->afu_check_and_enable(ctx->afu); |
726 | if (result) |
727 | return result; |
728 | |
729 | return add_process_element(ctx); |
730 | } |
731 | |
732 | int cxl_attach_afu_directed_psl8(struct cxl_context *ctx, u64 wed, u64 amr) |
733 | { |
734 | u32 pid; |
735 | int result; |
736 | |
737 | cxl_assign_psn_space(ctx); |
738 | |
739 | ctx->elem->ctxtime = 0; /* disable */ |
740 | ctx->elem->lpid = cpu_to_be32(mfspr(SPRN_LPID)); |
741 | ctx->elem->haurp = 0; /* disable */ |
742 | ctx->elem->u.sdr = cpu_to_be64(mfspr(SPRN_SDR1)); |
743 | |
744 | pid = current->pid; |
745 | if (ctx->kernel) |
746 | pid = 0; |
747 | ctx->elem->common.tid = 0; |
748 | ctx->elem->common.pid = cpu_to_be32(pid); |
749 | |
750 | ctx->elem->sr = cpu_to_be64(calculate_sr(ctx)); |
751 | |
752 | ctx->elem->common.csrp = 0; /* disable */ |
753 | ctx->elem->common.u.psl8.aurp0 = 0; /* disable */ |
754 | ctx->elem->common.u.psl8.aurp1 = 0; /* disable */ |
755 | |
756 | cxl_prefault(ctx, wed); |
757 | |
758 | ctx->elem->common.u.psl8.sstp0 = cpu_to_be64(ctx->sstp0); |
759 | ctx->elem->common.u.psl8.sstp1 = cpu_to_be64(ctx->sstp1); |
760 | |
761 | /* |
762 | * Ensure we have the multiplexed PSL interrupt set up to take faults |
763 | * for kernel contexts that may not have allocated any AFU IRQs at all: |
764 | */ |
765 | if (ctx->irqs.range[0] == 0) { |
766 | ctx->irqs.offset[0] = ctx->afu->native->psl_hwirq; |
767 | ctx->irqs.range[0] = 1; |
768 | } |
769 | |
770 | update_ivtes_directed(ctx); |
771 | |
772 | ctx->elem->common.amr = cpu_to_be64(amr); |
773 | ctx->elem->common.wed = cpu_to_be64(wed); |
774 | |
775 | /* first guy needs to enable */ |
776 | if ((result = cxl_ops->afu_check_and_enable(ctx->afu))) |
777 | return result; |
778 | |
779 | return add_process_element(ctx); |
780 | } |
781 | |
782 | static int deactivate_afu_directed(struct cxl_afu *afu) |
783 | { |
784 | dev_info(&afu->dev, "Deactivating AFU directed mode\n" ); |
785 | |
786 | afu->current_mode = 0; |
787 | afu->num_procs = 0; |
788 | |
789 | cxl_sysfs_afu_m_remove(afu); |
790 | cxl_chardev_afu_remove(afu); |
791 | |
792 | /* |
793 | * The CAIA section 2.2.1 indicates that the procedure for starting and |
794 | * stopping an AFU in AFU directed mode is AFU specific, which is not |
795 | * ideal since this code is generic and with one exception has no |
796 | * knowledge of the AFU. This is in contrast to the procedure for |
797 | * disabling a dedicated process AFU, which is documented to just |
798 | * require a reset. The architecture does indicate that both an AFU |
799 | * reset and an AFU disable should result in the AFU being disabled and |
800 | * we do both followed by a PSL purge for safety. |
801 | * |
802 | * Notably we used to have some issues with the disable sequence on PSL |
803 | * cards, which is why we ended up using this heavy weight procedure in |
804 | * the first place, however a bug was discovered that had rendered the |
805 | * disable operation ineffective, so it is conceivable that was the |
806 | * sole explanation for those difficulties. Careful regression testing |
807 | * is recommended if anyone attempts to remove or reorder these |
808 | * operations. |
809 | * |
810 | * The XSL on the Mellanox CX4 behaves a little differently from the |
811 | * PSL based cards and will time out an AFU reset if the AFU is still |
812 | * enabled. That card is special in that we do have a means to identify |
813 | * it from this code, so in that case we skip the reset and just use a |
814 | * disable/purge to avoid the timeout and corresponding noise in the |
815 | * kernel log. |
816 | */ |
817 | if (afu->adapter->native->sl_ops->needs_reset_before_disable) |
818 | cxl_ops->afu_reset(afu); |
819 | cxl_afu_disable(afu); |
820 | cxl_psl_purge(afu); |
821 | |
822 | return 0; |
823 | } |
824 | |
825 | int cxl_activate_dedicated_process_psl9(struct cxl_afu *afu) |
826 | { |
827 | dev_info(&afu->dev, "Activating dedicated process mode\n" ); |
828 | |
829 | /* |
830 | * If XSL is set to dedicated mode (Set in PSL_SCNTL reg), the |
831 | * XSL and AFU are programmed to work with a single context. |
832 | * The context information should be configured in the SPA area |
833 | * index 0 (so PSL_SPAP must be configured before enabling the |
834 | * AFU). |
835 | */ |
836 | afu->num_procs = 1; |
837 | if (afu->native->spa == NULL) { |
838 | if (cxl_alloc_spa(afu, CXL_MODE_DEDICATED)) |
839 | return -ENOMEM; |
840 | } |
841 | attach_spa(afu); |
842 | |
843 | cxl_p1n_write(afu, reg: CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process); |
844 | cxl_p1n_write(afu, reg: CXL_PSL_ID_An, CXL_PSL_ID_An_F | CXL_PSL_ID_An_L); |
845 | |
846 | afu->current_mode = CXL_MODE_DEDICATED; |
847 | |
848 | return cxl_chardev_d_afu_add(afu); |
849 | } |
850 | |
851 | int cxl_activate_dedicated_process_psl8(struct cxl_afu *afu) |
852 | { |
853 | dev_info(&afu->dev, "Activating dedicated process mode\n" ); |
854 | |
855 | cxl_p1n_write(afu, reg: CXL_PSL_SCNTL_An, CXL_PSL_SCNTL_An_PM_Process); |
856 | |
857 | cxl_p1n_write(afu, reg: CXL_PSL_CtxTime_An, val: 0); /* disable */ |
858 | cxl_p1n_write(afu, reg: CXL_PSL_SPAP_An, val: 0); /* disable */ |
859 | cxl_p1n_write(afu, reg: CXL_PSL_AMOR_An, val: 0xFFFFFFFFFFFFFFFFULL); |
860 | cxl_p1n_write(afu, reg: CXL_PSL_LPID_An, val: mfspr(SPRN_LPID)); |
861 | cxl_p1n_write(afu, reg: CXL_HAURP_An, val: 0); /* disable */ |
862 | cxl_p1n_write(afu, reg: CXL_PSL_SDR_An, val: mfspr(SPRN_SDR1)); |
863 | |
864 | cxl_p2n_write(afu, reg: CXL_CSRP_An, val: 0); /* disable */ |
865 | cxl_p2n_write(afu, reg: CXL_AURP0_An, val: 0); /* disable */ |
866 | cxl_p2n_write(afu, reg: CXL_AURP1_An, val: 0); /* disable */ |
867 | |
868 | afu->current_mode = CXL_MODE_DEDICATED; |
869 | afu->num_procs = 1; |
870 | |
871 | return cxl_chardev_d_afu_add(afu); |
872 | } |
873 | |
874 | void cxl_update_dedicated_ivtes_psl9(struct cxl_context *ctx) |
875 | { |
876 | int r; |
877 | |
878 | for (r = 0; r < CXL_IRQ_RANGES; r++) { |
879 | ctx->elem->ivte_offsets[r] = cpu_to_be16(ctx->irqs.offset[r]); |
880 | ctx->elem->ivte_ranges[r] = cpu_to_be16(ctx->irqs.range[r]); |
881 | } |
882 | } |
883 | |
884 | void cxl_update_dedicated_ivtes_psl8(struct cxl_context *ctx) |
885 | { |
886 | struct cxl_afu *afu = ctx->afu; |
887 | |
888 | cxl_p1n_write(afu, reg: CXL_PSL_IVTE_Offset_An, |
889 | val: (((u64)ctx->irqs.offset[0] & 0xffff) << 48) | |
890 | (((u64)ctx->irqs.offset[1] & 0xffff) << 32) | |
891 | (((u64)ctx->irqs.offset[2] & 0xffff) << 16) | |
892 | ((u64)ctx->irqs.offset[3] & 0xffff)); |
893 | cxl_p1n_write(afu, reg: CXL_PSL_IVTE_Limit_An, val: (u64) |
894 | (((u64)ctx->irqs.range[0] & 0xffff) << 48) | |
895 | (((u64)ctx->irqs.range[1] & 0xffff) << 32) | |
896 | (((u64)ctx->irqs.range[2] & 0xffff) << 16) | |
897 | ((u64)ctx->irqs.range[3] & 0xffff)); |
898 | } |
899 | |
900 | int cxl_attach_dedicated_process_psl9(struct cxl_context *ctx, u64 wed, u64 amr) |
901 | { |
902 | struct cxl_afu *afu = ctx->afu; |
903 | int result; |
904 | |
905 | /* fill the process element entry */ |
906 | result = process_element_entry_psl9(ctx, wed, amr); |
907 | if (result) |
908 | return result; |
909 | |
910 | if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes) |
911 | afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); |
912 | |
913 | ctx->elem->software_state = cpu_to_be32(CXL_PE_SOFTWARE_STATE_V); |
914 | /* |
915 | * Ideally we should do a wmb() here to make sure the changes to the |
916 | * PE are visible to the card before we call afu_enable. |
917 | * On ppc64 though all mmios are preceded by a 'sync' instruction hence |
918 | * we dont dont need one here. |
919 | */ |
920 | |
921 | result = cxl_ops->afu_reset(afu); |
922 | if (result) |
923 | return result; |
924 | |
925 | return afu_enable(afu); |
926 | } |
927 | |
928 | int cxl_attach_dedicated_process_psl8(struct cxl_context *ctx, u64 wed, u64 amr) |
929 | { |
930 | struct cxl_afu *afu = ctx->afu; |
931 | u64 pid; |
932 | int rc; |
933 | |
934 | pid = (u64)current->pid << 32; |
935 | if (ctx->kernel) |
936 | pid = 0; |
937 | cxl_p2n_write(afu, reg: CXL_PSL_PID_TID_An, val: pid); |
938 | |
939 | cxl_p1n_write(afu, reg: CXL_PSL_SR_An, val: calculate_sr(ctx)); |
940 | |
941 | if ((rc = cxl_write_sstp(afu, sstp0: ctx->sstp0, sstp1: ctx->sstp1))) |
942 | return rc; |
943 | |
944 | cxl_prefault(ctx, wed); |
945 | |
946 | if (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes) |
947 | afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); |
948 | |
949 | cxl_p2n_write(afu, reg: CXL_PSL_AMR_An, val: amr); |
950 | |
951 | /* master only context for dedicated */ |
952 | cxl_assign_psn_space(ctx); |
953 | |
954 | if ((rc = cxl_ops->afu_reset(afu))) |
955 | return rc; |
956 | |
957 | cxl_p2n_write(afu, reg: CXL_PSL_WED_An, val: wed); |
958 | |
959 | return afu_enable(afu); |
960 | } |
961 | |
962 | static int deactivate_dedicated_process(struct cxl_afu *afu) |
963 | { |
964 | dev_info(&afu->dev, "Deactivating dedicated process mode\n" ); |
965 | |
966 | afu->current_mode = 0; |
967 | afu->num_procs = 0; |
968 | |
969 | cxl_chardev_afu_remove(afu); |
970 | |
971 | return 0; |
972 | } |
973 | |
974 | static int native_afu_deactivate_mode(struct cxl_afu *afu, int mode) |
975 | { |
976 | if (mode == CXL_MODE_DIRECTED) |
977 | return deactivate_afu_directed(afu); |
978 | if (mode == CXL_MODE_DEDICATED) |
979 | return deactivate_dedicated_process(afu); |
980 | return 0; |
981 | } |
982 | |
983 | static int native_afu_activate_mode(struct cxl_afu *afu, int mode) |
984 | { |
985 | if (!mode) |
986 | return 0; |
987 | if (!(mode & afu->modes_supported)) |
988 | return -EINVAL; |
989 | |
990 | if (!cxl_ops->link_ok(afu->adapter, afu)) { |
991 | WARN(1, "Device link is down, refusing to activate!\n" ); |
992 | return -EIO; |
993 | } |
994 | |
995 | if (mode == CXL_MODE_DIRECTED) |
996 | return activate_afu_directed(afu); |
997 | if ((mode == CXL_MODE_DEDICATED) && |
998 | (afu->adapter->native->sl_ops->activate_dedicated_process)) |
999 | return afu->adapter->native->sl_ops->activate_dedicated_process(afu); |
1000 | |
1001 | return -EINVAL; |
1002 | } |
1003 | |
1004 | static int native_attach_process(struct cxl_context *ctx, bool kernel, |
1005 | u64 wed, u64 amr) |
1006 | { |
1007 | if (!cxl_ops->link_ok(ctx->afu->adapter, ctx->afu)) { |
1008 | WARN(1, "Device link is down, refusing to attach process!\n" ); |
1009 | return -EIO; |
1010 | } |
1011 | |
1012 | ctx->kernel = kernel; |
1013 | if ((ctx->afu->current_mode == CXL_MODE_DIRECTED) && |
1014 | (ctx->afu->adapter->native->sl_ops->attach_afu_directed)) |
1015 | return ctx->afu->adapter->native->sl_ops->attach_afu_directed(ctx, wed, amr); |
1016 | |
1017 | if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) && |
1018 | (ctx->afu->adapter->native->sl_ops->attach_dedicated_process)) |
1019 | return ctx->afu->adapter->native->sl_ops->attach_dedicated_process(ctx, wed, amr); |
1020 | |
1021 | return -EINVAL; |
1022 | } |
1023 | |
1024 | static inline int detach_process_native_dedicated(struct cxl_context *ctx) |
1025 | { |
1026 | /* |
1027 | * The CAIA section 2.1.1 indicates that we need to do an AFU reset to |
1028 | * stop the AFU in dedicated mode (we therefore do not make that |
1029 | * optional like we do in the afu directed path). It does not indicate |
1030 | * that we need to do an explicit disable (which should occur |
1031 | * implicitly as part of the reset) or purge, but we do these as well |
1032 | * to be on the safe side. |
1033 | * |
1034 | * Notably we used to have some issues with the disable sequence |
1035 | * (before the sequence was spelled out in the architecture) which is |
1036 | * why we were so heavy weight in the first place, however a bug was |
1037 | * discovered that had rendered the disable operation ineffective, so |
1038 | * it is conceivable that was the sole explanation for those |
1039 | * difficulties. Point is, we should be careful and do some regression |
1040 | * testing if we ever attempt to remove any part of this procedure. |
1041 | */ |
1042 | cxl_ops->afu_reset(ctx->afu); |
1043 | cxl_afu_disable(afu: ctx->afu); |
1044 | cxl_psl_purge(afu: ctx->afu); |
1045 | return 0; |
1046 | } |
1047 | |
1048 | static void native_update_ivtes(struct cxl_context *ctx) |
1049 | { |
1050 | if (ctx->afu->current_mode == CXL_MODE_DIRECTED) |
1051 | return update_ivtes_directed(ctx); |
1052 | if ((ctx->afu->current_mode == CXL_MODE_DEDICATED) && |
1053 | (ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes)) |
1054 | return ctx->afu->adapter->native->sl_ops->update_dedicated_ivtes(ctx); |
1055 | WARN(1, "native_update_ivtes: Bad mode\n" ); |
1056 | } |
1057 | |
1058 | static inline int detach_process_native_afu_directed(struct cxl_context *ctx) |
1059 | { |
1060 | if (!ctx->pe_inserted) |
1061 | return 0; |
1062 | if (terminate_process_element(ctx)) |
1063 | return -1; |
1064 | if (remove_process_element(ctx)) |
1065 | return -1; |
1066 | |
1067 | return 0; |
1068 | } |
1069 | |
1070 | static int native_detach_process(struct cxl_context *ctx) |
1071 | { |
1072 | trace_cxl_detach(ctx); |
1073 | |
1074 | if (ctx->afu->current_mode == CXL_MODE_DEDICATED) |
1075 | return detach_process_native_dedicated(ctx); |
1076 | |
1077 | return detach_process_native_afu_directed(ctx); |
1078 | } |
1079 | |
1080 | static int native_get_irq_info(struct cxl_afu *afu, struct cxl_irq_info *info) |
1081 | { |
1082 | /* If the adapter has gone away, we can't get any meaningful |
1083 | * information. |
1084 | */ |
1085 | if (!cxl_ops->link_ok(afu->adapter, afu)) |
1086 | return -EIO; |
1087 | |
1088 | info->dsisr = cxl_p2n_read(afu, reg: CXL_PSL_DSISR_An); |
1089 | info->dar = cxl_p2n_read(afu, reg: CXL_PSL_DAR_An); |
1090 | if (cxl_is_power8()) |
1091 | info->dsr = cxl_p2n_read(afu, reg: CXL_PSL_DSR_An); |
1092 | info->afu_err = cxl_p2n_read(afu, reg: CXL_AFU_ERR_An); |
1093 | info->errstat = cxl_p2n_read(afu, reg: CXL_PSL_ErrStat_An); |
1094 | info->proc_handle = 0; |
1095 | |
1096 | return 0; |
1097 | } |
1098 | |
1099 | void cxl_native_irq_dump_regs_psl9(struct cxl_context *ctx) |
1100 | { |
1101 | u64 fir1, serr; |
1102 | |
1103 | fir1 = cxl_p1_read(cxl: ctx->afu->adapter, reg: CXL_PSL9_FIR1); |
1104 | |
1105 | dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n" , fir1); |
1106 | if (ctx->afu->adapter->native->sl_ops->register_serr_irq) { |
1107 | serr = cxl_p1n_read(afu: ctx->afu, reg: CXL_PSL_SERR_An); |
1108 | cxl_afu_decode_psl_serr(afu: ctx->afu, serr); |
1109 | } |
1110 | } |
1111 | |
1112 | void cxl_native_irq_dump_regs_psl8(struct cxl_context *ctx) |
1113 | { |
1114 | u64 fir1, fir2, fir_slice, serr, afu_debug; |
1115 | |
1116 | fir1 = cxl_p1_read(cxl: ctx->afu->adapter, reg: CXL_PSL_FIR1); |
1117 | fir2 = cxl_p1_read(cxl: ctx->afu->adapter, reg: CXL_PSL_FIR2); |
1118 | fir_slice = cxl_p1n_read(afu: ctx->afu, reg: CXL_PSL_FIR_SLICE_An); |
1119 | afu_debug = cxl_p1n_read(afu: ctx->afu, reg: CXL_AFU_DEBUG_An); |
1120 | |
1121 | dev_crit(&ctx->afu->dev, "PSL_FIR1: 0x%016llx\n" , fir1); |
1122 | dev_crit(&ctx->afu->dev, "PSL_FIR2: 0x%016llx\n" , fir2); |
1123 | if (ctx->afu->adapter->native->sl_ops->register_serr_irq) { |
1124 | serr = cxl_p1n_read(afu: ctx->afu, reg: CXL_PSL_SERR_An); |
1125 | cxl_afu_decode_psl_serr(afu: ctx->afu, serr); |
1126 | } |
1127 | dev_crit(&ctx->afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n" , fir_slice); |
1128 | dev_crit(&ctx->afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n" , afu_debug); |
1129 | } |
1130 | |
1131 | static irqreturn_t native_handle_psl_slice_error(struct cxl_context *ctx, |
1132 | u64 dsisr, u64 errstat) |
1133 | { |
1134 | |
1135 | dev_crit(&ctx->afu->dev, "PSL ERROR STATUS: 0x%016llx\n" , errstat); |
1136 | |
1137 | if (ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers) |
1138 | ctx->afu->adapter->native->sl_ops->psl_irq_dump_registers(ctx); |
1139 | |
1140 | if (ctx->afu->adapter->native->sl_ops->debugfs_stop_trace) { |
1141 | dev_crit(&ctx->afu->dev, "STOPPING CXL TRACE\n" ); |
1142 | ctx->afu->adapter->native->sl_ops->debugfs_stop_trace(ctx->afu->adapter); |
1143 | } |
1144 | |
1145 | return cxl_ops->ack_irq(ctx, 0, errstat); |
1146 | } |
1147 | |
1148 | static bool cxl_is_translation_fault(struct cxl_afu *afu, u64 dsisr) |
1149 | { |
1150 | if ((cxl_is_power8()) && (dsisr & CXL_PSL_DSISR_TRANS)) |
1151 | return true; |
1152 | |
1153 | if ((cxl_is_power9()) && (dsisr & CXL_PSL9_DSISR_An_TF)) |
1154 | return true; |
1155 | |
1156 | return false; |
1157 | } |
1158 | |
1159 | irqreturn_t cxl_fail_irq_psl(struct cxl_afu *afu, struct cxl_irq_info *irq_info) |
1160 | { |
1161 | if (cxl_is_translation_fault(afu, dsisr: irq_info->dsisr)) |
1162 | cxl_p2n_write(afu, reg: CXL_PSL_TFC_An, CXL_PSL_TFC_An_AE); |
1163 | else |
1164 | cxl_p2n_write(afu, reg: CXL_PSL_TFC_An, CXL_PSL_TFC_An_A); |
1165 | |
1166 | return IRQ_HANDLED; |
1167 | } |
1168 | |
1169 | static irqreturn_t native_irq_multiplexed(int irq, void *data) |
1170 | { |
1171 | struct cxl_afu *afu = data; |
1172 | struct cxl_context *ctx; |
1173 | struct cxl_irq_info irq_info; |
1174 | u64 phreg = cxl_p2n_read(afu, reg: CXL_PSL_PEHandle_An); |
1175 | int ph, ret = IRQ_HANDLED, res; |
1176 | |
1177 | /* check if eeh kicked in while the interrupt was in flight */ |
1178 | if (unlikely(phreg == ~0ULL)) { |
1179 | dev_warn(&afu->dev, |
1180 | "Ignoring slice interrupt(%d) due to fenced card" , |
1181 | irq); |
1182 | return IRQ_HANDLED; |
1183 | } |
1184 | /* Mask the pe-handle from register value */ |
1185 | ph = phreg & 0xffff; |
1186 | if ((res = native_get_irq_info(afu, info: &irq_info))) { |
1187 | WARN(1, "Unable to get CXL IRQ Info: %i\n" , res); |
1188 | if (afu->adapter->native->sl_ops->fail_irq) |
1189 | return afu->adapter->native->sl_ops->fail_irq(afu, &irq_info); |
1190 | return ret; |
1191 | } |
1192 | |
1193 | rcu_read_lock(); |
1194 | ctx = idr_find(&afu->contexts_idr, id: ph); |
1195 | if (ctx) { |
1196 | if (afu->adapter->native->sl_ops->handle_interrupt) |
1197 | ret = afu->adapter->native->sl_ops->handle_interrupt(irq, ctx, &irq_info); |
1198 | rcu_read_unlock(); |
1199 | return ret; |
1200 | } |
1201 | rcu_read_unlock(); |
1202 | |
1203 | WARN(1, "Unable to demultiplex CXL PSL IRQ for PE %i DSISR %016llx DAR" |
1204 | " %016llx\n(Possible AFU HW issue - was a term/remove acked" |
1205 | " with outstanding transactions?)\n" , ph, irq_info.dsisr, |
1206 | irq_info.dar); |
1207 | if (afu->adapter->native->sl_ops->fail_irq) |
1208 | ret = afu->adapter->native->sl_ops->fail_irq(afu, &irq_info); |
1209 | return ret; |
1210 | } |
1211 | |
1212 | static void native_irq_wait(struct cxl_context *ctx) |
1213 | { |
1214 | u64 dsisr; |
1215 | int timeout = 1000; |
1216 | int ph; |
1217 | |
1218 | /* |
1219 | * Wait until no further interrupts are presented by the PSL |
1220 | * for this context. |
1221 | */ |
1222 | while (timeout--) { |
1223 | ph = cxl_p2n_read(afu: ctx->afu, reg: CXL_PSL_PEHandle_An) & 0xffff; |
1224 | if (ph != ctx->pe) |
1225 | return; |
1226 | dsisr = cxl_p2n_read(afu: ctx->afu, reg: CXL_PSL_DSISR_An); |
1227 | if (cxl_is_power8() && |
1228 | ((dsisr & CXL_PSL_DSISR_PENDING) == 0)) |
1229 | return; |
1230 | if (cxl_is_power9() && |
1231 | ((dsisr & CXL_PSL9_DSISR_PENDING) == 0)) |
1232 | return; |
1233 | /* |
1234 | * We are waiting for the workqueue to process our |
1235 | * irq, so need to let that run here. |
1236 | */ |
1237 | msleep(msecs: 1); |
1238 | } |
1239 | |
1240 | dev_warn(&ctx->afu->dev, "WARNING: waiting on DSI for PE %i" |
1241 | " DSISR %016llx!\n" , ph, dsisr); |
1242 | return; |
1243 | } |
1244 | |
1245 | static irqreturn_t native_slice_irq_err(int irq, void *data) |
1246 | { |
1247 | struct cxl_afu *afu = data; |
1248 | u64 errstat, serr, afu_error, dsisr; |
1249 | u64 fir_slice, afu_debug, irq_mask; |
1250 | |
1251 | /* |
1252 | * slice err interrupt is only used with full PSL (no XSL) |
1253 | */ |
1254 | serr = cxl_p1n_read(afu, reg: CXL_PSL_SERR_An); |
1255 | errstat = cxl_p2n_read(afu, reg: CXL_PSL_ErrStat_An); |
1256 | afu_error = cxl_p2n_read(afu, reg: CXL_AFU_ERR_An); |
1257 | dsisr = cxl_p2n_read(afu, reg: CXL_PSL_DSISR_An); |
1258 | cxl_afu_decode_psl_serr(afu, serr); |
1259 | |
1260 | if (cxl_is_power8()) { |
1261 | fir_slice = cxl_p1n_read(afu, reg: CXL_PSL_FIR_SLICE_An); |
1262 | afu_debug = cxl_p1n_read(afu, reg: CXL_AFU_DEBUG_An); |
1263 | dev_crit(&afu->dev, "PSL_FIR_SLICE_An: 0x%016llx\n" , fir_slice); |
1264 | dev_crit(&afu->dev, "CXL_PSL_AFU_DEBUG_An: 0x%016llx\n" , afu_debug); |
1265 | } |
1266 | dev_crit(&afu->dev, "CXL_PSL_ErrStat_An: 0x%016llx\n" , errstat); |
1267 | dev_crit(&afu->dev, "AFU_ERR_An: 0x%.16llx\n" , afu_error); |
1268 | dev_crit(&afu->dev, "PSL_DSISR_An: 0x%.16llx\n" , dsisr); |
1269 | |
1270 | /* mask off the IRQ so it won't retrigger until the AFU is reset */ |
1271 | irq_mask = (serr & CXL_PSL_SERR_An_IRQS) >> 32; |
1272 | serr |= irq_mask; |
1273 | cxl_p1n_write(afu, reg: CXL_PSL_SERR_An, val: serr); |
1274 | dev_info(&afu->dev, "Further such interrupts will be masked until the AFU is reset\n" ); |
1275 | |
1276 | return IRQ_HANDLED; |
1277 | } |
1278 | |
1279 | void cxl_native_err_irq_dump_regs_psl9(struct cxl *adapter) |
1280 | { |
1281 | u64 fir1; |
1282 | |
1283 | fir1 = cxl_p1_read(cxl: adapter, reg: CXL_PSL9_FIR1); |
1284 | dev_crit(&adapter->dev, "PSL_FIR: 0x%016llx\n" , fir1); |
1285 | } |
1286 | |
1287 | void cxl_native_err_irq_dump_regs_psl8(struct cxl *adapter) |
1288 | { |
1289 | u64 fir1, fir2; |
1290 | |
1291 | fir1 = cxl_p1_read(cxl: adapter, reg: CXL_PSL_FIR1); |
1292 | fir2 = cxl_p1_read(cxl: adapter, reg: CXL_PSL_FIR2); |
1293 | dev_crit(&adapter->dev, |
1294 | "PSL_FIR1: 0x%016llx\nPSL_FIR2: 0x%016llx\n" , |
1295 | fir1, fir2); |
1296 | } |
1297 | |
1298 | static irqreturn_t native_irq_err(int irq, void *data) |
1299 | { |
1300 | struct cxl *adapter = data; |
1301 | u64 err_ivte; |
1302 | |
1303 | WARN(1, "CXL ERROR interrupt %i\n" , irq); |
1304 | |
1305 | err_ivte = cxl_p1_read(cxl: adapter, reg: CXL_PSL_ErrIVTE); |
1306 | dev_crit(&adapter->dev, "PSL_ErrIVTE: 0x%016llx\n" , err_ivte); |
1307 | |
1308 | if (adapter->native->sl_ops->debugfs_stop_trace) { |
1309 | dev_crit(&adapter->dev, "STOPPING CXL TRACE\n" ); |
1310 | adapter->native->sl_ops->debugfs_stop_trace(adapter); |
1311 | } |
1312 | |
1313 | if (adapter->native->sl_ops->err_irq_dump_registers) |
1314 | adapter->native->sl_ops->err_irq_dump_registers(adapter); |
1315 | |
1316 | return IRQ_HANDLED; |
1317 | } |
1318 | |
1319 | int cxl_native_register_psl_err_irq(struct cxl *adapter) |
1320 | { |
1321 | int rc; |
1322 | |
1323 | adapter->irq_name = kasprintf(GFP_KERNEL, fmt: "cxl-%s-err" , |
1324 | dev_name(dev: &adapter->dev)); |
1325 | if (!adapter->irq_name) |
1326 | return -ENOMEM; |
1327 | |
1328 | if ((rc = cxl_register_one_irq(adapter, handler: native_irq_err, cookie: adapter, |
1329 | dest_hwirq: &adapter->native->err_hwirq, |
1330 | dest_virq: &adapter->native->err_virq, |
1331 | name: adapter->irq_name))) { |
1332 | kfree(objp: adapter->irq_name); |
1333 | adapter->irq_name = NULL; |
1334 | return rc; |
1335 | } |
1336 | |
1337 | cxl_p1_write(cxl: adapter, reg: CXL_PSL_ErrIVTE, val: adapter->native->err_hwirq & 0xffff); |
1338 | |
1339 | return 0; |
1340 | } |
1341 | |
1342 | void cxl_native_release_psl_err_irq(struct cxl *adapter) |
1343 | { |
1344 | if (adapter->native->err_virq == 0 || |
1345 | adapter->native->err_virq != |
1346 | irq_find_mapping(NULL, hwirq: adapter->native->err_hwirq)) |
1347 | return; |
1348 | |
1349 | cxl_p1_write(cxl: adapter, reg: CXL_PSL_ErrIVTE, val: 0x0000000000000000); |
1350 | cxl_unmap_irq(virq: adapter->native->err_virq, cookie: adapter); |
1351 | cxl_ops->release_one_irq(adapter, adapter->native->err_hwirq); |
1352 | kfree(objp: adapter->irq_name); |
1353 | adapter->native->err_virq = 0; |
1354 | } |
1355 | |
1356 | int cxl_native_register_serr_irq(struct cxl_afu *afu) |
1357 | { |
1358 | u64 serr; |
1359 | int rc; |
1360 | |
1361 | afu->err_irq_name = kasprintf(GFP_KERNEL, fmt: "cxl-%s-err" , |
1362 | dev_name(dev: &afu->dev)); |
1363 | if (!afu->err_irq_name) |
1364 | return -ENOMEM; |
1365 | |
1366 | if ((rc = cxl_register_one_irq(adapter: afu->adapter, handler: native_slice_irq_err, cookie: afu, |
1367 | dest_hwirq: &afu->serr_hwirq, |
1368 | dest_virq: &afu->serr_virq, name: afu->err_irq_name))) { |
1369 | kfree(objp: afu->err_irq_name); |
1370 | afu->err_irq_name = NULL; |
1371 | return rc; |
1372 | } |
1373 | |
1374 | serr = cxl_p1n_read(afu, reg: CXL_PSL_SERR_An); |
1375 | if (cxl_is_power8()) |
1376 | serr = (serr & 0x00ffffffffff0000ULL) | (afu->serr_hwirq & 0xffff); |
1377 | if (cxl_is_power9()) { |
1378 | /* |
1379 | * By default, all errors are masked. So don't set all masks. |
1380 | * Slice errors will be transfered. |
1381 | */ |
1382 | serr = (serr & ~0xff0000007fffffffULL) | (afu->serr_hwirq & 0xffff); |
1383 | } |
1384 | cxl_p1n_write(afu, reg: CXL_PSL_SERR_An, val: serr); |
1385 | |
1386 | return 0; |
1387 | } |
1388 | |
1389 | void cxl_native_release_serr_irq(struct cxl_afu *afu) |
1390 | { |
1391 | if (afu->serr_virq == 0 || |
1392 | afu->serr_virq != irq_find_mapping(NULL, hwirq: afu->serr_hwirq)) |
1393 | return; |
1394 | |
1395 | cxl_p1n_write(afu, reg: CXL_PSL_SERR_An, val: 0x0000000000000000); |
1396 | cxl_unmap_irq(virq: afu->serr_virq, cookie: afu); |
1397 | cxl_ops->release_one_irq(afu->adapter, afu->serr_hwirq); |
1398 | kfree(objp: afu->err_irq_name); |
1399 | afu->serr_virq = 0; |
1400 | } |
1401 | |
1402 | int cxl_native_register_psl_irq(struct cxl_afu *afu) |
1403 | { |
1404 | int rc; |
1405 | |
1406 | afu->psl_irq_name = kasprintf(GFP_KERNEL, fmt: "cxl-%s" , |
1407 | dev_name(dev: &afu->dev)); |
1408 | if (!afu->psl_irq_name) |
1409 | return -ENOMEM; |
1410 | |
1411 | if ((rc = cxl_register_one_irq(adapter: afu->adapter, handler: native_irq_multiplexed, |
1412 | cookie: afu, dest_hwirq: &afu->native->psl_hwirq, dest_virq: &afu->native->psl_virq, |
1413 | name: afu->psl_irq_name))) { |
1414 | kfree(objp: afu->psl_irq_name); |
1415 | afu->psl_irq_name = NULL; |
1416 | } |
1417 | return rc; |
1418 | } |
1419 | |
1420 | void cxl_native_release_psl_irq(struct cxl_afu *afu) |
1421 | { |
1422 | if (afu->native->psl_virq == 0 || |
1423 | afu->native->psl_virq != |
1424 | irq_find_mapping(NULL, hwirq: afu->native->psl_hwirq)) |
1425 | return; |
1426 | |
1427 | cxl_unmap_irq(virq: afu->native->psl_virq, cookie: afu); |
1428 | cxl_ops->release_one_irq(afu->adapter, afu->native->psl_hwirq); |
1429 | kfree(objp: afu->psl_irq_name); |
1430 | afu->native->psl_virq = 0; |
1431 | } |
1432 | |
1433 | static void recover_psl_err(struct cxl_afu *afu, u64 errstat) |
1434 | { |
1435 | u64 dsisr; |
1436 | |
1437 | pr_devel("RECOVERING FROM PSL ERROR... (0x%016llx)\n" , errstat); |
1438 | |
1439 | /* Clear PSL_DSISR[PE] */ |
1440 | dsisr = cxl_p2n_read(afu, reg: CXL_PSL_DSISR_An); |
1441 | cxl_p2n_write(afu, reg: CXL_PSL_DSISR_An, val: dsisr & ~CXL_PSL_DSISR_An_PE); |
1442 | |
1443 | /* Write 1s to clear error status bits */ |
1444 | cxl_p2n_write(afu, reg: CXL_PSL_ErrStat_An, val: errstat); |
1445 | } |
1446 | |
1447 | static int native_ack_irq(struct cxl_context *ctx, u64 tfc, u64 psl_reset_mask) |
1448 | { |
1449 | trace_cxl_psl_irq_ack(ctx, tfc); |
1450 | if (tfc) |
1451 | cxl_p2n_write(afu: ctx->afu, reg: CXL_PSL_TFC_An, val: tfc); |
1452 | if (psl_reset_mask) |
1453 | recover_psl_err(afu: ctx->afu, errstat: psl_reset_mask); |
1454 | |
1455 | return 0; |
1456 | } |
1457 | |
1458 | int cxl_check_error(struct cxl_afu *afu) |
1459 | { |
1460 | return (cxl_p1n_read(afu, reg: CXL_PSL_SCNTL_An) == ~0ULL); |
1461 | } |
1462 | |
1463 | static bool native_support_attributes(const char *attr_name, |
1464 | enum cxl_attrs type) |
1465 | { |
1466 | return true; |
1467 | } |
1468 | |
1469 | static int native_afu_cr_read64(struct cxl_afu *afu, int cr, u64 off, u64 *out) |
1470 | { |
1471 | if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) |
1472 | return -EIO; |
1473 | if (unlikely(off >= afu->crs_len)) |
1474 | return -ERANGE; |
1475 | *out = in_le64(afu->native->afu_desc_mmio + afu->crs_offset + |
1476 | (cr * afu->crs_len) + off); |
1477 | return 0; |
1478 | } |
1479 | |
1480 | static int native_afu_cr_read32(struct cxl_afu *afu, int cr, u64 off, u32 *out) |
1481 | { |
1482 | if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) |
1483 | return -EIO; |
1484 | if (unlikely(off >= afu->crs_len)) |
1485 | return -ERANGE; |
1486 | *out = in_le32(afu->native->afu_desc_mmio + afu->crs_offset + |
1487 | (cr * afu->crs_len) + off); |
1488 | return 0; |
1489 | } |
1490 | |
1491 | static int native_afu_cr_read16(struct cxl_afu *afu, int cr, u64 off, u16 *out) |
1492 | { |
1493 | u64 aligned_off = off & ~0x3L; |
1494 | u32 val; |
1495 | int rc; |
1496 | |
1497 | rc = native_afu_cr_read32(afu, cr, off: aligned_off, out: &val); |
1498 | if (!rc) |
1499 | *out = (val >> ((off & 0x3) * 8)) & 0xffff; |
1500 | return rc; |
1501 | } |
1502 | |
1503 | static int native_afu_cr_read8(struct cxl_afu *afu, int cr, u64 off, u8 *out) |
1504 | { |
1505 | u64 aligned_off = off & ~0x3L; |
1506 | u32 val; |
1507 | int rc; |
1508 | |
1509 | rc = native_afu_cr_read32(afu, cr, off: aligned_off, out: &val); |
1510 | if (!rc) |
1511 | *out = (val >> ((off & 0x3) * 8)) & 0xff; |
1512 | return rc; |
1513 | } |
1514 | |
1515 | static int native_afu_cr_write32(struct cxl_afu *afu, int cr, u64 off, u32 in) |
1516 | { |
1517 | if (unlikely(!cxl_ops->link_ok(afu->adapter, afu))) |
1518 | return -EIO; |
1519 | if (unlikely(off >= afu->crs_len)) |
1520 | return -ERANGE; |
1521 | out_le32(afu->native->afu_desc_mmio + afu->crs_offset + |
1522 | (cr * afu->crs_len) + off, in); |
1523 | return 0; |
1524 | } |
1525 | |
1526 | static int native_afu_cr_write16(struct cxl_afu *afu, int cr, u64 off, u16 in) |
1527 | { |
1528 | u64 aligned_off = off & ~0x3L; |
1529 | u32 val32, mask, shift; |
1530 | int rc; |
1531 | |
1532 | rc = native_afu_cr_read32(afu, cr, off: aligned_off, out: &val32); |
1533 | if (rc) |
1534 | return rc; |
1535 | shift = (off & 0x3) * 8; |
1536 | WARN_ON(shift == 24); |
1537 | mask = 0xffff << shift; |
1538 | val32 = (val32 & ~mask) | (in << shift); |
1539 | |
1540 | rc = native_afu_cr_write32(afu, cr, off: aligned_off, in: val32); |
1541 | return rc; |
1542 | } |
1543 | |
1544 | static int native_afu_cr_write8(struct cxl_afu *afu, int cr, u64 off, u8 in) |
1545 | { |
1546 | u64 aligned_off = off & ~0x3L; |
1547 | u32 val32, mask, shift; |
1548 | int rc; |
1549 | |
1550 | rc = native_afu_cr_read32(afu, cr, off: aligned_off, out: &val32); |
1551 | if (rc) |
1552 | return rc; |
1553 | shift = (off & 0x3) * 8; |
1554 | mask = 0xff << shift; |
1555 | val32 = (val32 & ~mask) | (in << shift); |
1556 | |
1557 | rc = native_afu_cr_write32(afu, cr, off: aligned_off, in: val32); |
1558 | return rc; |
1559 | } |
1560 | |
1561 | const struct cxl_backend_ops cxl_native_ops = { |
1562 | .module = THIS_MODULE, |
1563 | .adapter_reset = cxl_pci_reset, |
1564 | .alloc_one_irq = cxl_pci_alloc_one_irq, |
1565 | .release_one_irq = cxl_pci_release_one_irq, |
1566 | .alloc_irq_ranges = cxl_pci_alloc_irq_ranges, |
1567 | .release_irq_ranges = cxl_pci_release_irq_ranges, |
1568 | .setup_irq = cxl_pci_setup_irq, |
1569 | .handle_psl_slice_error = native_handle_psl_slice_error, |
1570 | .psl_interrupt = NULL, |
1571 | .ack_irq = native_ack_irq, |
1572 | .irq_wait = native_irq_wait, |
1573 | .attach_process = native_attach_process, |
1574 | .detach_process = native_detach_process, |
1575 | .update_ivtes = native_update_ivtes, |
1576 | .support_attributes = native_support_attributes, |
1577 | .link_ok = cxl_adapter_link_ok, |
1578 | .release_afu = cxl_pci_release_afu, |
1579 | .afu_read_err_buffer = cxl_pci_afu_read_err_buffer, |
1580 | .afu_check_and_enable = native_afu_check_and_enable, |
1581 | .afu_activate_mode = native_afu_activate_mode, |
1582 | .afu_deactivate_mode = native_afu_deactivate_mode, |
1583 | .afu_reset = native_afu_reset, |
1584 | .afu_cr_read8 = native_afu_cr_read8, |
1585 | .afu_cr_read16 = native_afu_cr_read16, |
1586 | .afu_cr_read32 = native_afu_cr_read32, |
1587 | .afu_cr_read64 = native_afu_cr_read64, |
1588 | .afu_cr_write8 = native_afu_cr_write8, |
1589 | .afu_cr_write16 = native_afu_cr_write16, |
1590 | .afu_cr_write32 = native_afu_cr_write32, |
1591 | .read_adapter_vpd = cxl_pci_read_adapter_vpd, |
1592 | }; |
1593 | |