1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Low-level SPU handling |
4 | * |
5 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 |
6 | * |
7 | * Author: Arnd Bergmann <arndb@de.ibm.com> |
8 | */ |
9 | |
10 | #undef DEBUG |
11 | |
12 | #include <linux/interrupt.h> |
13 | #include <linux/list.h> |
14 | #include <linux/init.h> |
15 | #include <linux/ptrace.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/wait.h> |
18 | #include <linux/mm.h> |
19 | #include <linux/io.h> |
20 | #include <linux/mutex.h> |
21 | #include <linux/linux_logo.h> |
22 | #include <linux/syscore_ops.h> |
23 | #include <asm/spu.h> |
24 | #include <asm/spu_priv1.h> |
25 | #include <asm/spu_csa.h> |
26 | #include <asm/kexec.h> |
27 | |
28 | const struct spu_management_ops *spu_management_ops; |
29 | EXPORT_SYMBOL_GPL(spu_management_ops); |
30 | |
31 | const struct spu_priv1_ops *spu_priv1_ops; |
32 | EXPORT_SYMBOL_GPL(spu_priv1_ops); |
33 | |
34 | struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; |
35 | EXPORT_SYMBOL_GPL(cbe_spu_info); |
36 | |
37 | /* |
38 | * The spufs fault-handling code needs to call force_sig_fault to raise signals |
39 | * on DMA errors. Export it here to avoid general kernel-wide access to this |
40 | * function |
41 | */ |
42 | EXPORT_SYMBOL_GPL(force_sig_fault); |
43 | |
44 | /* |
45 | * Protects cbe_spu_info and spu->number. |
46 | */ |
47 | static DEFINE_SPINLOCK(spu_lock); |
48 | |
49 | /* |
50 | * List of all spus in the system. |
51 | * |
52 | * This list is iterated by callers from irq context and callers that |
53 | * want to sleep. Thus modifications need to be done with both |
54 | * spu_full_list_lock and spu_full_list_mutex held, while iterating |
55 | * through it requires either of these locks. |
56 | * |
57 | * In addition spu_full_list_lock protects all assignments to |
58 | * spu->mm. |
59 | */ |
60 | static LIST_HEAD(spu_full_list); |
61 | static DEFINE_SPINLOCK(spu_full_list_lock); |
62 | static DEFINE_MUTEX(spu_full_list_mutex); |
63 | |
64 | void spu_invalidate_slbs(struct spu *spu) |
65 | { |
66 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
67 | unsigned long flags; |
68 | |
69 | spin_lock_irqsave(&spu->register_lock, flags); |
70 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) |
71 | out_be64(&priv2->slb_invalidate_all_W, 0UL); |
72 | spin_unlock_irqrestore(lock: &spu->register_lock, flags); |
73 | } |
74 | EXPORT_SYMBOL_GPL(spu_invalidate_slbs); |
75 | |
76 | /* This is called by the MM core when a segment size is changed, to |
77 | * request a flush of all the SPEs using a given mm |
78 | */ |
79 | void spu_flush_all_slbs(struct mm_struct *mm) |
80 | { |
81 | struct spu *spu; |
82 | unsigned long flags; |
83 | |
84 | spin_lock_irqsave(&spu_full_list_lock, flags); |
85 | list_for_each_entry(spu, &spu_full_list, full_list) { |
86 | if (spu->mm == mm) |
87 | spu_invalidate_slbs(spu); |
88 | } |
89 | spin_unlock_irqrestore(lock: &spu_full_list_lock, flags); |
90 | } |
91 | |
92 | /* The hack below stinks... try to do something better one of |
93 | * these days... Does it even work properly with NR_CPUS == 1 ? |
94 | */ |
95 | static inline void mm_needs_global_tlbie(struct mm_struct *mm) |
96 | { |
97 | int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; |
98 | |
99 | /* Global TLBIE broadcast required with SPEs. */ |
100 | bitmap_fill(cpumask_bits(mm_cpumask(mm)), nbits: nr); |
101 | } |
102 | |
103 | void spu_associate_mm(struct spu *spu, struct mm_struct *mm) |
104 | { |
105 | unsigned long flags; |
106 | |
107 | spin_lock_irqsave(&spu_full_list_lock, flags); |
108 | spu->mm = mm; |
109 | spin_unlock_irqrestore(lock: &spu_full_list_lock, flags); |
110 | if (mm) |
111 | mm_needs_global_tlbie(mm); |
112 | } |
113 | EXPORT_SYMBOL_GPL(spu_associate_mm); |
114 | |
115 | int spu_64k_pages_available(void) |
116 | { |
117 | return mmu_psize_defs[MMU_PAGE_64K].shift != 0; |
118 | } |
119 | EXPORT_SYMBOL_GPL(spu_64k_pages_available); |
120 | |
121 | static void spu_restart_dma(struct spu *spu) |
122 | { |
123 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
124 | |
125 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) |
126 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
127 | else { |
128 | set_bit(nr: SPU_CONTEXT_FAULT_PENDING, addr: &spu->flags); |
129 | mb(); |
130 | } |
131 | } |
132 | |
133 | static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb) |
134 | { |
135 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
136 | |
137 | pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n" , |
138 | __func__, slbe, slb->vsid, slb->esid); |
139 | |
140 | out_be64(&priv2->slb_index_W, slbe); |
141 | /* set invalid before writing vsid */ |
142 | out_be64(&priv2->slb_esid_RW, 0); |
143 | /* now it's safe to write the vsid */ |
144 | out_be64(&priv2->slb_vsid_RW, slb->vsid); |
145 | /* setting the new esid makes the entry valid again */ |
146 | out_be64(&priv2->slb_esid_RW, slb->esid); |
147 | } |
148 | |
149 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) |
150 | { |
151 | struct copro_slb slb; |
152 | int ret; |
153 | |
154 | ret = copro_calculate_slb(spu->mm, ea, &slb); |
155 | if (ret) |
156 | return ret; |
157 | |
158 | spu_load_slb(spu, slbe: spu->slb_replace, slb: &slb); |
159 | |
160 | spu->slb_replace++; |
161 | if (spu->slb_replace >= 8) |
162 | spu->slb_replace = 0; |
163 | |
164 | spu_restart_dma(spu); |
165 | spu->stats.slb_flt++; |
166 | return 0; |
167 | } |
168 | |
169 | extern int hash_page(unsigned long ea, unsigned long access, |
170 | unsigned long trap, unsigned long dsisr); //XXX |
171 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) |
172 | { |
173 | int ret; |
174 | |
175 | pr_debug("%s, %llx, %lx\n" , __func__, dsisr, ea); |
176 | |
177 | /* |
178 | * Handle kernel space hash faults immediately. User hash |
179 | * faults need to be deferred to process context. |
180 | */ |
181 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) && |
182 | (get_region_id(ea) != USER_REGION_ID)) { |
183 | |
184 | spin_unlock(lock: &spu->register_lock); |
185 | ret = hash_page(ea, |
186 | _PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED, |
187 | trap: 0x300, dsisr); |
188 | spin_lock(lock: &spu->register_lock); |
189 | |
190 | if (!ret) { |
191 | spu_restart_dma(spu); |
192 | return 0; |
193 | } |
194 | } |
195 | |
196 | spu->class_1_dar = ea; |
197 | spu->class_1_dsisr = dsisr; |
198 | |
199 | spu->stop_callback(spu, 1); |
200 | |
201 | spu->class_1_dar = 0; |
202 | spu->class_1_dsisr = 0; |
203 | |
204 | return 0; |
205 | } |
206 | |
207 | static void __spu_kernel_slb(void *addr, struct copro_slb *slb) |
208 | { |
209 | unsigned long ea = (unsigned long)addr; |
210 | u64 llp; |
211 | |
212 | if (get_region_id(ea) == LINEAR_MAP_REGION_ID) |
213 | llp = mmu_psize_defs[mmu_linear_psize].sllp; |
214 | else |
215 | llp = mmu_psize_defs[mmu_virtual_psize].sllp; |
216 | |
217 | slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | |
218 | SLB_VSID_KERNEL | llp; |
219 | slb->esid = (ea & ESID_MASK) | SLB_ESID_V; |
220 | } |
221 | |
222 | /** |
223 | * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the |
224 | * address @new_addr is present. |
225 | */ |
226 | static inline int __slb_present(struct copro_slb *slbs, int nr_slbs, |
227 | void *new_addr) |
228 | { |
229 | unsigned long ea = (unsigned long)new_addr; |
230 | int i; |
231 | |
232 | for (i = 0; i < nr_slbs; i++) |
233 | if (!((slbs[i].esid ^ ea) & ESID_MASK)) |
234 | return 1; |
235 | |
236 | return 0; |
237 | } |
238 | |
239 | /** |
240 | * Setup the SPU kernel SLBs, in preparation for a context save/restore. We |
241 | * need to map both the context save area, and the save/restore code. |
242 | * |
243 | * Because the lscsa and code may cross segment boundaries, we check to see |
244 | * if mappings are required for the start and end of each range. We currently |
245 | * assume that the mappings are smaller that one segment - if not, something |
246 | * is seriously wrong. |
247 | */ |
248 | void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, |
249 | void *code, int code_size) |
250 | { |
251 | struct copro_slb slbs[4]; |
252 | int i, nr_slbs = 0; |
253 | /* start and end addresses of both mappings */ |
254 | void *addrs[] = { |
255 | lscsa, (void *)lscsa + sizeof(*lscsa) - 1, |
256 | code, code + code_size - 1 |
257 | }; |
258 | |
259 | /* check the set of addresses, and create a new entry in the slbs array |
260 | * if there isn't already a SLB for that address */ |
261 | for (i = 0; i < ARRAY_SIZE(addrs); i++) { |
262 | if (__slb_present(slbs, nr_slbs, addrs[i])) |
263 | continue; |
264 | |
265 | __spu_kernel_slb(addrs[i], &slbs[nr_slbs]); |
266 | nr_slbs++; |
267 | } |
268 | |
269 | spin_lock_irq(lock: &spu->register_lock); |
270 | /* Add the set of SLBs */ |
271 | for (i = 0; i < nr_slbs; i++) |
272 | spu_load_slb(spu, slbe: i, slb: &slbs[i]); |
273 | spin_unlock_irq(lock: &spu->register_lock); |
274 | } |
275 | EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); |
276 | |
277 | static irqreturn_t |
278 | spu_irq_class_0(int irq, void *data) |
279 | { |
280 | struct spu *spu; |
281 | unsigned long stat, mask; |
282 | |
283 | spu = data; |
284 | |
285 | spin_lock(lock: &spu->register_lock); |
286 | mask = spu_int_mask_get(spu, 0); |
287 | stat = spu_int_stat_get(spu, 0) & mask; |
288 | |
289 | spu->class_0_pending |= stat; |
290 | spu->class_0_dar = spu_mfc_dar_get(spu); |
291 | spu->stop_callback(spu, 0); |
292 | spu->class_0_pending = 0; |
293 | spu->class_0_dar = 0; |
294 | |
295 | spu_int_stat_clear(spu, 0, stat); |
296 | spin_unlock(lock: &spu->register_lock); |
297 | |
298 | return IRQ_HANDLED; |
299 | } |
300 | |
301 | static irqreturn_t |
302 | spu_irq_class_1(int irq, void *data) |
303 | { |
304 | struct spu *spu; |
305 | unsigned long stat, mask, dar, dsisr; |
306 | |
307 | spu = data; |
308 | |
309 | /* atomically read & clear class1 status. */ |
310 | spin_lock(lock: &spu->register_lock); |
311 | mask = spu_int_mask_get(spu, 1); |
312 | stat = spu_int_stat_get(spu, 1) & mask; |
313 | dar = spu_mfc_dar_get(spu); |
314 | dsisr = spu_mfc_dsisr_get(spu); |
315 | if (stat & CLASS1_STORAGE_FAULT_INTR) |
316 | spu_mfc_dsisr_set(spu, 0ul); |
317 | spu_int_stat_clear(spu, 1, stat); |
318 | |
319 | pr_debug("%s: %lx %lx %lx %lx\n" , __func__, mask, stat, |
320 | dar, dsisr); |
321 | |
322 | if (stat & CLASS1_SEGMENT_FAULT_INTR) |
323 | __spu_trap_data_seg(spu, ea: dar); |
324 | |
325 | if (stat & CLASS1_STORAGE_FAULT_INTR) |
326 | __spu_trap_data_map(spu, ea: dar, dsisr); |
327 | |
328 | spu->class_1_dsisr = 0; |
329 | spu->class_1_dar = 0; |
330 | |
331 | spin_unlock(lock: &spu->register_lock); |
332 | |
333 | return stat ? IRQ_HANDLED : IRQ_NONE; |
334 | } |
335 | |
336 | static irqreturn_t |
337 | spu_irq_class_2(int irq, void *data) |
338 | { |
339 | struct spu *spu; |
340 | unsigned long stat; |
341 | unsigned long mask; |
342 | const int mailbox_intrs = |
343 | CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR; |
344 | |
345 | spu = data; |
346 | spin_lock(lock: &spu->register_lock); |
347 | stat = spu_int_stat_get(spu, 2); |
348 | mask = spu_int_mask_get(spu, 2); |
349 | /* ignore interrupts we're not waiting for */ |
350 | stat &= mask; |
351 | /* mailbox interrupts are level triggered. mask them now before |
352 | * acknowledging */ |
353 | if (stat & mailbox_intrs) |
354 | spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs)); |
355 | /* acknowledge all interrupts before the callbacks */ |
356 | spu_int_stat_clear(spu, 2, stat); |
357 | |
358 | pr_debug("class 2 interrupt %d, %lx, %lx\n" , irq, stat, mask); |
359 | |
360 | if (stat & CLASS2_MAILBOX_INTR) |
361 | spu->ibox_callback(spu); |
362 | |
363 | if (stat & CLASS2_SPU_STOP_INTR) |
364 | spu->stop_callback(spu, 2); |
365 | |
366 | if (stat & CLASS2_SPU_HALT_INTR) |
367 | spu->stop_callback(spu, 2); |
368 | |
369 | if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR) |
370 | spu->mfc_callback(spu); |
371 | |
372 | if (stat & CLASS2_MAILBOX_THRESHOLD_INTR) |
373 | spu->wbox_callback(spu); |
374 | |
375 | spu->stats.class2_intr++; |
376 | |
377 | spin_unlock(lock: &spu->register_lock); |
378 | |
379 | return stat ? IRQ_HANDLED : IRQ_NONE; |
380 | } |
381 | |
382 | static int __init spu_request_irqs(struct spu *spu) |
383 | { |
384 | int ret = 0; |
385 | |
386 | if (spu->irqs[0]) { |
387 | snprintf(buf: spu->irq_c0, size: sizeof (spu->irq_c0), fmt: "spe%02d.0" , |
388 | spu->number); |
389 | ret = request_irq(irq: spu->irqs[0], handler: spu_irq_class_0, |
390 | flags: 0, name: spu->irq_c0, dev: spu); |
391 | if (ret) |
392 | goto bail0; |
393 | } |
394 | if (spu->irqs[1]) { |
395 | snprintf(buf: spu->irq_c1, size: sizeof (spu->irq_c1), fmt: "spe%02d.1" , |
396 | spu->number); |
397 | ret = request_irq(irq: spu->irqs[1], handler: spu_irq_class_1, |
398 | flags: 0, name: spu->irq_c1, dev: spu); |
399 | if (ret) |
400 | goto bail1; |
401 | } |
402 | if (spu->irqs[2]) { |
403 | snprintf(buf: spu->irq_c2, size: sizeof (spu->irq_c2), fmt: "spe%02d.2" , |
404 | spu->number); |
405 | ret = request_irq(irq: spu->irqs[2], handler: spu_irq_class_2, |
406 | flags: 0, name: spu->irq_c2, dev: spu); |
407 | if (ret) |
408 | goto bail2; |
409 | } |
410 | return 0; |
411 | |
412 | bail2: |
413 | if (spu->irqs[1]) |
414 | free_irq(spu->irqs[1], spu); |
415 | bail1: |
416 | if (spu->irqs[0]) |
417 | free_irq(spu->irqs[0], spu); |
418 | bail0: |
419 | return ret; |
420 | } |
421 | |
422 | static void spu_free_irqs(struct spu *spu) |
423 | { |
424 | if (spu->irqs[0]) |
425 | free_irq(spu->irqs[0], spu); |
426 | if (spu->irqs[1]) |
427 | free_irq(spu->irqs[1], spu); |
428 | if (spu->irqs[2]) |
429 | free_irq(spu->irqs[2], spu); |
430 | } |
431 | |
432 | void spu_init_channels(struct spu *spu) |
433 | { |
434 | static const struct { |
435 | unsigned channel; |
436 | unsigned count; |
437 | } zero_list[] = { |
438 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, |
439 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, |
440 | }, count_list[] = { |
441 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, |
442 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, |
443 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, |
444 | }; |
445 | struct spu_priv2 __iomem *priv2; |
446 | int i; |
447 | |
448 | priv2 = spu->priv2; |
449 | |
450 | /* initialize all channel data to zero */ |
451 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { |
452 | int count; |
453 | |
454 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); |
455 | for (count = 0; count < zero_list[i].count; count++) |
456 | out_be64(&priv2->spu_chnldata_RW, 0); |
457 | } |
458 | |
459 | /* initialize channel counts to meaningful values */ |
460 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { |
461 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); |
462 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); |
463 | } |
464 | } |
465 | EXPORT_SYMBOL_GPL(spu_init_channels); |
466 | |
467 | static struct bus_type spu_subsys = { |
468 | .name = "spu" , |
469 | .dev_name = "spu" , |
470 | }; |
471 | |
472 | int spu_add_dev_attr(struct device_attribute *attr) |
473 | { |
474 | struct spu *spu; |
475 | |
476 | mutex_lock(&spu_full_list_mutex); |
477 | list_for_each_entry(spu, &spu_full_list, full_list) |
478 | device_create_file(&spu->dev, attr); |
479 | mutex_unlock(lock: &spu_full_list_mutex); |
480 | |
481 | return 0; |
482 | } |
483 | EXPORT_SYMBOL_GPL(spu_add_dev_attr); |
484 | |
485 | int spu_add_dev_attr_group(const struct attribute_group *attrs) |
486 | { |
487 | struct spu *spu; |
488 | int rc = 0; |
489 | |
490 | mutex_lock(&spu_full_list_mutex); |
491 | list_for_each_entry(spu, &spu_full_list, full_list) { |
492 | rc = sysfs_create_group(kobj: &spu->dev.kobj, grp: attrs); |
493 | |
494 | /* we're in trouble here, but try unwinding anyway */ |
495 | if (rc) { |
496 | printk(KERN_ERR "%s: can't create sysfs group '%s'\n" , |
497 | __func__, attrs->name); |
498 | |
499 | list_for_each_entry_continue_reverse(spu, |
500 | &spu_full_list, full_list) |
501 | sysfs_remove_group(kobj: &spu->dev.kobj, grp: attrs); |
502 | break; |
503 | } |
504 | } |
505 | |
506 | mutex_unlock(lock: &spu_full_list_mutex); |
507 | |
508 | return rc; |
509 | } |
510 | EXPORT_SYMBOL_GPL(spu_add_dev_attr_group); |
511 | |
512 | |
513 | void spu_remove_dev_attr(struct device_attribute *attr) |
514 | { |
515 | struct spu *spu; |
516 | |
517 | mutex_lock(&spu_full_list_mutex); |
518 | list_for_each_entry(spu, &spu_full_list, full_list) |
519 | device_remove_file(&spu->dev, attr); |
520 | mutex_unlock(lock: &spu_full_list_mutex); |
521 | } |
522 | EXPORT_SYMBOL_GPL(spu_remove_dev_attr); |
523 | |
524 | void spu_remove_dev_attr_group(const struct attribute_group *attrs) |
525 | { |
526 | struct spu *spu; |
527 | |
528 | mutex_lock(&spu_full_list_mutex); |
529 | list_for_each_entry(spu, &spu_full_list, full_list) |
530 | sysfs_remove_group(kobj: &spu->dev.kobj, grp: attrs); |
531 | mutex_unlock(lock: &spu_full_list_mutex); |
532 | } |
533 | EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group); |
534 | |
535 | static int __init spu_create_dev(struct spu *spu) |
536 | { |
537 | int ret; |
538 | |
539 | spu->dev.id = spu->number; |
540 | spu->dev.bus = &spu_subsys; |
541 | ret = device_register(&spu->dev); |
542 | if (ret) { |
543 | printk(KERN_ERR "Can't register SPU %d with sysfs\n" , |
544 | spu->number); |
545 | return ret; |
546 | } |
547 | |
548 | sysfs_add_device_to_node(&spu->dev, spu->node); |
549 | |
550 | return 0; |
551 | } |
552 | |
553 | static int __init create_spu(void *data) |
554 | { |
555 | struct spu *spu; |
556 | int ret; |
557 | static int number; |
558 | unsigned long flags; |
559 | |
560 | ret = -ENOMEM; |
561 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); |
562 | if (!spu) |
563 | goto out; |
564 | |
565 | spu->alloc_state = SPU_FREE; |
566 | |
567 | spin_lock_init(&spu->register_lock); |
568 | spin_lock(lock: &spu_lock); |
569 | spu->number = number++; |
570 | spin_unlock(lock: &spu_lock); |
571 | |
572 | ret = spu_create_spu(spu, data); |
573 | |
574 | if (ret) |
575 | goto out_free; |
576 | |
577 | spu_mfc_sdr_setup(spu); |
578 | spu_mfc_sr1_set(spu, 0x33); |
579 | ret = spu_request_irqs(spu); |
580 | if (ret) |
581 | goto out_destroy; |
582 | |
583 | ret = spu_create_dev(spu); |
584 | if (ret) |
585 | goto out_free_irqs; |
586 | |
587 | mutex_lock(&cbe_spu_info[spu->node].list_mutex); |
588 | list_add(new: &spu->cbe_list, head: &cbe_spu_info[spu->node].spus); |
589 | cbe_spu_info[spu->node].n_spus++; |
590 | mutex_unlock(lock: &cbe_spu_info[spu->node].list_mutex); |
591 | |
592 | mutex_lock(&spu_full_list_mutex); |
593 | spin_lock_irqsave(&spu_full_list_lock, flags); |
594 | list_add(new: &spu->full_list, head: &spu_full_list); |
595 | spin_unlock_irqrestore(lock: &spu_full_list_lock, flags); |
596 | mutex_unlock(lock: &spu_full_list_mutex); |
597 | |
598 | spu->stats.util_state = SPU_UTIL_IDLE_LOADED; |
599 | spu->stats.tstamp = ktime_get_ns(); |
600 | |
601 | INIT_LIST_HEAD(list: &spu->aff_list); |
602 | |
603 | goto out; |
604 | |
605 | out_free_irqs: |
606 | spu_free_irqs(spu); |
607 | out_destroy: |
608 | spu_destroy_spu(spu); |
609 | out_free: |
610 | kfree(objp: spu); |
611 | out: |
612 | return ret; |
613 | } |
614 | |
615 | static const char *spu_state_names[] = { |
616 | "user" , "system" , "iowait" , "idle" |
617 | }; |
618 | |
619 | static unsigned long long spu_acct_time(struct spu *spu, |
620 | enum spu_utilization_state state) |
621 | { |
622 | unsigned long long time = spu->stats.times[state]; |
623 | |
624 | /* |
625 | * If the spu is idle or the context is stopped, utilization |
626 | * statistics are not updated. Apply the time delta from the |
627 | * last recorded state of the spu. |
628 | */ |
629 | if (spu->stats.util_state == state) |
630 | time += ktime_get_ns() - spu->stats.tstamp; |
631 | |
632 | return time / NSEC_PER_MSEC; |
633 | } |
634 | |
635 | |
636 | static ssize_t spu_stat_show(struct device *dev, |
637 | struct device_attribute *attr, char *buf) |
638 | { |
639 | struct spu *spu = container_of(dev, struct spu, dev); |
640 | |
641 | return sprintf(buf, "%s %llu %llu %llu %llu " |
642 | "%llu %llu %llu %llu %llu %llu %llu %llu\n" , |
643 | spu_state_names[spu->stats.util_state], |
644 | spu_acct_time(spu, SPU_UTIL_USER), |
645 | spu_acct_time(spu, SPU_UTIL_SYSTEM), |
646 | spu_acct_time(spu, SPU_UTIL_IOWAIT), |
647 | spu_acct_time(spu, SPU_UTIL_IDLE_LOADED), |
648 | spu->stats.vol_ctx_switch, |
649 | spu->stats.invol_ctx_switch, |
650 | spu->stats.slb_flt, |
651 | spu->stats.hash_flt, |
652 | spu->stats.min_flt, |
653 | spu->stats.maj_flt, |
654 | spu->stats.class2_intr, |
655 | spu->stats.libassist); |
656 | } |
657 | |
658 | static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL); |
659 | |
660 | #ifdef CONFIG_KEXEC_CORE |
661 | |
662 | struct crash_spu_info { |
663 | struct spu *spu; |
664 | u32 saved_spu_runcntl_RW; |
665 | u32 saved_spu_status_R; |
666 | u32 saved_spu_npc_RW; |
667 | u64 saved_mfc_sr1_RW; |
668 | u64 saved_mfc_dar; |
669 | u64 saved_mfc_dsisr; |
670 | }; |
671 | |
672 | #define CRASH_NUM_SPUS 16 /* Enough for current hardware */ |
673 | static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS]; |
674 | |
675 | static void crash_kexec_stop_spus(void) |
676 | { |
677 | struct spu *spu; |
678 | int i; |
679 | u64 tmp; |
680 | |
681 | for (i = 0; i < CRASH_NUM_SPUS; i++) { |
682 | if (!crash_spu_info[i].spu) |
683 | continue; |
684 | |
685 | spu = crash_spu_info[i].spu; |
686 | |
687 | crash_spu_info[i].saved_spu_runcntl_RW = |
688 | in_be32(&spu->problem->spu_runcntl_RW); |
689 | crash_spu_info[i].saved_spu_status_R = |
690 | in_be32(&spu->problem->spu_status_R); |
691 | crash_spu_info[i].saved_spu_npc_RW = |
692 | in_be32(&spu->problem->spu_npc_RW); |
693 | |
694 | crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu); |
695 | crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu); |
696 | tmp = spu_mfc_sr1_get(spu); |
697 | crash_spu_info[i].saved_mfc_sr1_RW = tmp; |
698 | |
699 | tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; |
700 | spu_mfc_sr1_set(spu, tmp); |
701 | |
702 | __delay(200); |
703 | } |
704 | } |
705 | |
706 | static void __init crash_register_spus(struct list_head *list) |
707 | { |
708 | struct spu *spu; |
709 | int ret; |
710 | |
711 | list_for_each_entry(spu, list, full_list) { |
712 | if (WARN_ON(spu->number >= CRASH_NUM_SPUS)) |
713 | continue; |
714 | |
715 | crash_spu_info[spu->number].spu = spu; |
716 | } |
717 | |
718 | ret = crash_shutdown_register(&crash_kexec_stop_spus); |
719 | if (ret) |
720 | printk(KERN_ERR "Could not register SPU crash handler" ); |
721 | } |
722 | |
723 | #else |
724 | static inline void crash_register_spus(struct list_head *list) |
725 | { |
726 | } |
727 | #endif |
728 | |
729 | static void spu_shutdown(void) |
730 | { |
731 | struct spu *spu; |
732 | |
733 | mutex_lock(&spu_full_list_mutex); |
734 | list_for_each_entry(spu, &spu_full_list, full_list) { |
735 | spu_free_irqs(spu); |
736 | spu_destroy_spu(spu); |
737 | } |
738 | mutex_unlock(lock: &spu_full_list_mutex); |
739 | } |
740 | |
741 | static struct syscore_ops spu_syscore_ops = { |
742 | .shutdown = spu_shutdown, |
743 | }; |
744 | |
745 | static int __init init_spu_base(void) |
746 | { |
747 | int i, ret = 0; |
748 | |
749 | for (i = 0; i < MAX_NUMNODES; i++) { |
750 | mutex_init(&cbe_spu_info[i].list_mutex); |
751 | INIT_LIST_HEAD(list: &cbe_spu_info[i].spus); |
752 | } |
753 | |
754 | if (!spu_management_ops) |
755 | goto out; |
756 | |
757 | /* create system subsystem for spus */ |
758 | ret = subsys_system_register(&spu_subsys, NULL); |
759 | if (ret) |
760 | goto out; |
761 | |
762 | ret = spu_enumerate_spus(create_spu); |
763 | |
764 | if (ret < 0) { |
765 | printk(KERN_WARNING "%s: Error initializing spus\n" , |
766 | __func__); |
767 | goto out_unregister_subsys; |
768 | } |
769 | |
770 | if (ret > 0) |
771 | fb_append_extra_logo(logo: &logo_spe_clut224, n: ret); |
772 | |
773 | mutex_lock(&spu_full_list_mutex); |
774 | crash_register_spus(list: &spu_full_list); |
775 | mutex_unlock(lock: &spu_full_list_mutex); |
776 | spu_add_dev_attr(&dev_attr_stat); |
777 | register_syscore_ops(ops: &spu_syscore_ops); |
778 | |
779 | spu_init_affinity(); |
780 | |
781 | return 0; |
782 | |
783 | out_unregister_subsys: |
784 | bus_unregister(&spu_subsys); |
785 | out: |
786 | return ret; |
787 | } |
788 | device_initcall(init_spu_base); |
789 | |