1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Low-level SPU handling |
4 | * |
5 | * (C) Copyright IBM Deutschland Entwicklung GmbH 2005 |
6 | * |
7 | * Author: Arnd Bergmann <arndb@de.ibm.com> |
8 | */ |
9 | |
10 | #undef DEBUG |
11 | |
12 | #include <linux/interrupt.h> |
13 | #include <linux/list.h> |
14 | #include <linux/init.h> |
15 | #include <linux/ptrace.h> |
16 | #include <linux/slab.h> |
17 | #include <linux/wait.h> |
18 | #include <linux/mm.h> |
19 | #include <linux/io.h> |
20 | #include <linux/mutex.h> |
21 | #include <linux/linux_logo.h> |
22 | #include <linux/syscore_ops.h> |
23 | #include <asm/spu.h> |
24 | #include <asm/spu_priv1.h> |
25 | #include <asm/spu_csa.h> |
26 | #include <asm/xmon.h> |
27 | #include <asm/kexec.h> |
28 | |
29 | const struct spu_management_ops *spu_management_ops; |
30 | EXPORT_SYMBOL_GPL(spu_management_ops); |
31 | |
32 | const struct spu_priv1_ops *spu_priv1_ops; |
33 | EXPORT_SYMBOL_GPL(spu_priv1_ops); |
34 | |
35 | struct cbe_spu_info cbe_spu_info[MAX_NUMNODES]; |
36 | EXPORT_SYMBOL_GPL(cbe_spu_info); |
37 | |
38 | /* |
39 | * The spufs fault-handling code needs to call force_sig_fault to raise signals |
40 | * on DMA errors. Export it here to avoid general kernel-wide access to this |
41 | * function |
42 | */ |
43 | EXPORT_SYMBOL_GPL(force_sig_fault); |
44 | |
45 | /* |
46 | * Protects cbe_spu_info and spu->number. |
47 | */ |
48 | static DEFINE_SPINLOCK(spu_lock); |
49 | |
50 | /* |
51 | * List of all spus in the system. |
52 | * |
53 | * This list is iterated by callers from irq context and callers that |
54 | * want to sleep. Thus modifications need to be done with both |
55 | * spu_full_list_lock and spu_full_list_mutex held, while iterating |
56 | * through it requires either of these locks. |
57 | * |
58 | * In addition spu_full_list_lock protects all assignments to |
59 | * spu->mm. |
60 | */ |
61 | static LIST_HEAD(spu_full_list); |
62 | static DEFINE_SPINLOCK(spu_full_list_lock); |
63 | static DEFINE_MUTEX(spu_full_list_mutex); |
64 | |
65 | void spu_invalidate_slbs(struct spu *spu) |
66 | { |
67 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
68 | unsigned long flags; |
69 | |
70 | spin_lock_irqsave(&spu->register_lock, flags); |
71 | if (spu_mfc_sr1_get(spu) & MFC_STATE1_RELOCATE_MASK) |
72 | out_be64(&priv2->slb_invalidate_all_W, 0UL); |
73 | spin_unlock_irqrestore(lock: &spu->register_lock, flags); |
74 | } |
75 | EXPORT_SYMBOL_GPL(spu_invalidate_slbs); |
76 | |
77 | /* This is called by the MM core when a segment size is changed, to |
78 | * request a flush of all the SPEs using a given mm |
79 | */ |
80 | void spu_flush_all_slbs(struct mm_struct *mm) |
81 | { |
82 | struct spu *spu; |
83 | unsigned long flags; |
84 | |
85 | spin_lock_irqsave(&spu_full_list_lock, flags); |
86 | list_for_each_entry(spu, &spu_full_list, full_list) { |
87 | if (spu->mm == mm) |
88 | spu_invalidate_slbs(spu); |
89 | } |
90 | spin_unlock_irqrestore(lock: &spu_full_list_lock, flags); |
91 | } |
92 | |
93 | /* The hack below stinks... try to do something better one of |
94 | * these days... Does it even work properly with NR_CPUS == 1 ? |
95 | */ |
96 | static inline void mm_needs_global_tlbie(struct mm_struct *mm) |
97 | { |
98 | int nr = (NR_CPUS > 1) ? NR_CPUS : NR_CPUS + 1; |
99 | |
100 | /* Global TLBIE broadcast required with SPEs. */ |
101 | bitmap_fill(cpumask_bits(mm_cpumask(mm)), nbits: nr); |
102 | } |
103 | |
104 | void spu_associate_mm(struct spu *spu, struct mm_struct *mm) |
105 | { |
106 | unsigned long flags; |
107 | |
108 | spin_lock_irqsave(&spu_full_list_lock, flags); |
109 | spu->mm = mm; |
110 | spin_unlock_irqrestore(lock: &spu_full_list_lock, flags); |
111 | if (mm) |
112 | mm_needs_global_tlbie(mm); |
113 | } |
114 | EXPORT_SYMBOL_GPL(spu_associate_mm); |
115 | |
116 | int spu_64k_pages_available(void) |
117 | { |
118 | return mmu_psize_defs[MMU_PAGE_64K].shift != 0; |
119 | } |
120 | EXPORT_SYMBOL_GPL(spu_64k_pages_available); |
121 | |
122 | static void spu_restart_dma(struct spu *spu) |
123 | { |
124 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
125 | |
126 | if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags)) |
127 | out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND); |
128 | else { |
129 | set_bit(nr: SPU_CONTEXT_FAULT_PENDING, addr: &spu->flags); |
130 | mb(); |
131 | } |
132 | } |
133 | |
134 | static inline void spu_load_slb(struct spu *spu, int slbe, struct copro_slb *slb) |
135 | { |
136 | struct spu_priv2 __iomem *priv2 = spu->priv2; |
137 | |
138 | pr_debug("%s: adding SLB[%d] 0x%016llx 0x%016llx\n" , |
139 | __func__, slbe, slb->vsid, slb->esid); |
140 | |
141 | out_be64(&priv2->slb_index_W, slbe); |
142 | /* set invalid before writing vsid */ |
143 | out_be64(&priv2->slb_esid_RW, 0); |
144 | /* now it's safe to write the vsid */ |
145 | out_be64(&priv2->slb_vsid_RW, slb->vsid); |
146 | /* setting the new esid makes the entry valid again */ |
147 | out_be64(&priv2->slb_esid_RW, slb->esid); |
148 | } |
149 | |
150 | static int __spu_trap_data_seg(struct spu *spu, unsigned long ea) |
151 | { |
152 | struct copro_slb slb; |
153 | int ret; |
154 | |
155 | ret = copro_calculate_slb(spu->mm, ea, &slb); |
156 | if (ret) |
157 | return ret; |
158 | |
159 | spu_load_slb(spu, slbe: spu->slb_replace, slb: &slb); |
160 | |
161 | spu->slb_replace++; |
162 | if (spu->slb_replace >= 8) |
163 | spu->slb_replace = 0; |
164 | |
165 | spu_restart_dma(spu); |
166 | spu->stats.slb_flt++; |
167 | return 0; |
168 | } |
169 | |
170 | extern int hash_page(unsigned long ea, unsigned long access, |
171 | unsigned long trap, unsigned long dsisr); //XXX |
172 | static int __spu_trap_data_map(struct spu *spu, unsigned long ea, u64 dsisr) |
173 | { |
174 | int ret; |
175 | |
176 | pr_debug("%s, %llx, %lx\n" , __func__, dsisr, ea); |
177 | |
178 | /* |
179 | * Handle kernel space hash faults immediately. User hash |
180 | * faults need to be deferred to process context. |
181 | */ |
182 | if ((dsisr & MFC_DSISR_PTE_NOT_FOUND) && |
183 | (get_region_id(ea) != USER_REGION_ID)) { |
184 | |
185 | spin_unlock(lock: &spu->register_lock); |
186 | ret = hash_page(ea, |
187 | _PAGE_PRESENT | _PAGE_READ | _PAGE_PRIVILEGED, |
188 | trap: 0x300, dsisr); |
189 | spin_lock(lock: &spu->register_lock); |
190 | |
191 | if (!ret) { |
192 | spu_restart_dma(spu); |
193 | return 0; |
194 | } |
195 | } |
196 | |
197 | spu->class_1_dar = ea; |
198 | spu->class_1_dsisr = dsisr; |
199 | |
200 | spu->stop_callback(spu, 1); |
201 | |
202 | spu->class_1_dar = 0; |
203 | spu->class_1_dsisr = 0; |
204 | |
205 | return 0; |
206 | } |
207 | |
208 | static void __spu_kernel_slb(void *addr, struct copro_slb *slb) |
209 | { |
210 | unsigned long ea = (unsigned long)addr; |
211 | u64 llp; |
212 | |
213 | if (get_region_id(ea) == LINEAR_MAP_REGION_ID) |
214 | llp = mmu_psize_defs[mmu_linear_psize].sllp; |
215 | else |
216 | llp = mmu_psize_defs[mmu_virtual_psize].sllp; |
217 | |
218 | slb->vsid = (get_kernel_vsid(ea, MMU_SEGSIZE_256M) << SLB_VSID_SHIFT) | |
219 | SLB_VSID_KERNEL | llp; |
220 | slb->esid = (ea & ESID_MASK) | SLB_ESID_V; |
221 | } |
222 | |
223 | /** |
224 | * Given an array of @nr_slbs SLB entries, @slbs, return non-zero if the |
225 | * address @new_addr is present. |
226 | */ |
227 | static inline int __slb_present(struct copro_slb *slbs, int nr_slbs, |
228 | void *new_addr) |
229 | { |
230 | unsigned long ea = (unsigned long)new_addr; |
231 | int i; |
232 | |
233 | for (i = 0; i < nr_slbs; i++) |
234 | if (!((slbs[i].esid ^ ea) & ESID_MASK)) |
235 | return 1; |
236 | |
237 | return 0; |
238 | } |
239 | |
240 | /** |
241 | * Setup the SPU kernel SLBs, in preparation for a context save/restore. We |
242 | * need to map both the context save area, and the save/restore code. |
243 | * |
244 | * Because the lscsa and code may cross segment boundaries, we check to see |
245 | * if mappings are required for the start and end of each range. We currently |
246 | * assume that the mappings are smaller that one segment - if not, something |
247 | * is seriously wrong. |
248 | */ |
249 | void spu_setup_kernel_slbs(struct spu *spu, struct spu_lscsa *lscsa, |
250 | void *code, int code_size) |
251 | { |
252 | struct copro_slb slbs[4]; |
253 | int i, nr_slbs = 0; |
254 | /* start and end addresses of both mappings */ |
255 | void *addrs[] = { |
256 | lscsa, (void *)lscsa + sizeof(*lscsa) - 1, |
257 | code, code + code_size - 1 |
258 | }; |
259 | |
260 | /* check the set of addresses, and create a new entry in the slbs array |
261 | * if there isn't already a SLB for that address */ |
262 | for (i = 0; i < ARRAY_SIZE(addrs); i++) { |
263 | if (__slb_present(slbs, nr_slbs, addrs[i])) |
264 | continue; |
265 | |
266 | __spu_kernel_slb(addrs[i], &slbs[nr_slbs]); |
267 | nr_slbs++; |
268 | } |
269 | |
270 | spin_lock_irq(lock: &spu->register_lock); |
271 | /* Add the set of SLBs */ |
272 | for (i = 0; i < nr_slbs; i++) |
273 | spu_load_slb(spu, slbe: i, slb: &slbs[i]); |
274 | spin_unlock_irq(lock: &spu->register_lock); |
275 | } |
276 | EXPORT_SYMBOL_GPL(spu_setup_kernel_slbs); |
277 | |
278 | static irqreturn_t |
279 | spu_irq_class_0(int irq, void *data) |
280 | { |
281 | struct spu *spu; |
282 | unsigned long stat, mask; |
283 | |
284 | spu = data; |
285 | |
286 | spin_lock(lock: &spu->register_lock); |
287 | mask = spu_int_mask_get(spu, 0); |
288 | stat = spu_int_stat_get(spu, 0) & mask; |
289 | |
290 | spu->class_0_pending |= stat; |
291 | spu->class_0_dar = spu_mfc_dar_get(spu); |
292 | spu->stop_callback(spu, 0); |
293 | spu->class_0_pending = 0; |
294 | spu->class_0_dar = 0; |
295 | |
296 | spu_int_stat_clear(spu, 0, stat); |
297 | spin_unlock(lock: &spu->register_lock); |
298 | |
299 | return IRQ_HANDLED; |
300 | } |
301 | |
302 | static irqreturn_t |
303 | spu_irq_class_1(int irq, void *data) |
304 | { |
305 | struct spu *spu; |
306 | unsigned long stat, mask, dar, dsisr; |
307 | |
308 | spu = data; |
309 | |
310 | /* atomically read & clear class1 status. */ |
311 | spin_lock(lock: &spu->register_lock); |
312 | mask = spu_int_mask_get(spu, 1); |
313 | stat = spu_int_stat_get(spu, 1) & mask; |
314 | dar = spu_mfc_dar_get(spu); |
315 | dsisr = spu_mfc_dsisr_get(spu); |
316 | if (stat & CLASS1_STORAGE_FAULT_INTR) |
317 | spu_mfc_dsisr_set(spu, 0ul); |
318 | spu_int_stat_clear(spu, 1, stat); |
319 | |
320 | pr_debug("%s: %lx %lx %lx %lx\n" , __func__, mask, stat, |
321 | dar, dsisr); |
322 | |
323 | if (stat & CLASS1_SEGMENT_FAULT_INTR) |
324 | __spu_trap_data_seg(spu, ea: dar); |
325 | |
326 | if (stat & CLASS1_STORAGE_FAULT_INTR) |
327 | __spu_trap_data_map(spu, ea: dar, dsisr); |
328 | |
329 | spu->class_1_dsisr = 0; |
330 | spu->class_1_dar = 0; |
331 | |
332 | spin_unlock(lock: &spu->register_lock); |
333 | |
334 | return stat ? IRQ_HANDLED : IRQ_NONE; |
335 | } |
336 | |
337 | static irqreturn_t |
338 | spu_irq_class_2(int irq, void *data) |
339 | { |
340 | struct spu *spu; |
341 | unsigned long stat; |
342 | unsigned long mask; |
343 | const int mailbox_intrs = |
344 | CLASS2_MAILBOX_THRESHOLD_INTR | CLASS2_MAILBOX_INTR; |
345 | |
346 | spu = data; |
347 | spin_lock(lock: &spu->register_lock); |
348 | stat = spu_int_stat_get(spu, 2); |
349 | mask = spu_int_mask_get(spu, 2); |
350 | /* ignore interrupts we're not waiting for */ |
351 | stat &= mask; |
352 | /* mailbox interrupts are level triggered. mask them now before |
353 | * acknowledging */ |
354 | if (stat & mailbox_intrs) |
355 | spu_int_mask_and(spu, 2, ~(stat & mailbox_intrs)); |
356 | /* acknowledge all interrupts before the callbacks */ |
357 | spu_int_stat_clear(spu, 2, stat); |
358 | |
359 | pr_debug("class 2 interrupt %d, %lx, %lx\n" , irq, stat, mask); |
360 | |
361 | if (stat & CLASS2_MAILBOX_INTR) |
362 | spu->ibox_callback(spu); |
363 | |
364 | if (stat & CLASS2_SPU_STOP_INTR) |
365 | spu->stop_callback(spu, 2); |
366 | |
367 | if (stat & CLASS2_SPU_HALT_INTR) |
368 | spu->stop_callback(spu, 2); |
369 | |
370 | if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR) |
371 | spu->mfc_callback(spu); |
372 | |
373 | if (stat & CLASS2_MAILBOX_THRESHOLD_INTR) |
374 | spu->wbox_callback(spu); |
375 | |
376 | spu->stats.class2_intr++; |
377 | |
378 | spin_unlock(lock: &spu->register_lock); |
379 | |
380 | return stat ? IRQ_HANDLED : IRQ_NONE; |
381 | } |
382 | |
383 | static int __init spu_request_irqs(struct spu *spu) |
384 | { |
385 | int ret = 0; |
386 | |
387 | if (spu->irqs[0]) { |
388 | snprintf(buf: spu->irq_c0, size: sizeof (spu->irq_c0), fmt: "spe%02d.0" , |
389 | spu->number); |
390 | ret = request_irq(irq: spu->irqs[0], handler: spu_irq_class_0, |
391 | flags: 0, name: spu->irq_c0, dev: spu); |
392 | if (ret) |
393 | goto bail0; |
394 | } |
395 | if (spu->irqs[1]) { |
396 | snprintf(buf: spu->irq_c1, size: sizeof (spu->irq_c1), fmt: "spe%02d.1" , |
397 | spu->number); |
398 | ret = request_irq(irq: spu->irqs[1], handler: spu_irq_class_1, |
399 | flags: 0, name: spu->irq_c1, dev: spu); |
400 | if (ret) |
401 | goto bail1; |
402 | } |
403 | if (spu->irqs[2]) { |
404 | snprintf(buf: spu->irq_c2, size: sizeof (spu->irq_c2), fmt: "spe%02d.2" , |
405 | spu->number); |
406 | ret = request_irq(irq: spu->irqs[2], handler: spu_irq_class_2, |
407 | flags: 0, name: spu->irq_c2, dev: spu); |
408 | if (ret) |
409 | goto bail2; |
410 | } |
411 | return 0; |
412 | |
413 | bail2: |
414 | if (spu->irqs[1]) |
415 | free_irq(spu->irqs[1], spu); |
416 | bail1: |
417 | if (spu->irqs[0]) |
418 | free_irq(spu->irqs[0], spu); |
419 | bail0: |
420 | return ret; |
421 | } |
422 | |
423 | static void spu_free_irqs(struct spu *spu) |
424 | { |
425 | if (spu->irqs[0]) |
426 | free_irq(spu->irqs[0], spu); |
427 | if (spu->irqs[1]) |
428 | free_irq(spu->irqs[1], spu); |
429 | if (spu->irqs[2]) |
430 | free_irq(spu->irqs[2], spu); |
431 | } |
432 | |
433 | void spu_init_channels(struct spu *spu) |
434 | { |
435 | static const struct { |
436 | unsigned channel; |
437 | unsigned count; |
438 | } zero_list[] = { |
439 | { 0x00, 1, }, { 0x01, 1, }, { 0x03, 1, }, { 0x04, 1, }, |
440 | { 0x18, 1, }, { 0x19, 1, }, { 0x1b, 1, }, { 0x1d, 1, }, |
441 | }, count_list[] = { |
442 | { 0x00, 0, }, { 0x03, 0, }, { 0x04, 0, }, { 0x15, 16, }, |
443 | { 0x17, 1, }, { 0x18, 0, }, { 0x19, 0, }, { 0x1b, 0, }, |
444 | { 0x1c, 1, }, { 0x1d, 0, }, { 0x1e, 1, }, |
445 | }; |
446 | struct spu_priv2 __iomem *priv2; |
447 | int i; |
448 | |
449 | priv2 = spu->priv2; |
450 | |
451 | /* initialize all channel data to zero */ |
452 | for (i = 0; i < ARRAY_SIZE(zero_list); i++) { |
453 | int count; |
454 | |
455 | out_be64(&priv2->spu_chnlcntptr_RW, zero_list[i].channel); |
456 | for (count = 0; count < zero_list[i].count; count++) |
457 | out_be64(&priv2->spu_chnldata_RW, 0); |
458 | } |
459 | |
460 | /* initialize channel counts to meaningful values */ |
461 | for (i = 0; i < ARRAY_SIZE(count_list); i++) { |
462 | out_be64(&priv2->spu_chnlcntptr_RW, count_list[i].channel); |
463 | out_be64(&priv2->spu_chnlcnt_RW, count_list[i].count); |
464 | } |
465 | } |
466 | EXPORT_SYMBOL_GPL(spu_init_channels); |
467 | |
468 | static struct bus_type spu_subsys = { |
469 | .name = "spu" , |
470 | .dev_name = "spu" , |
471 | }; |
472 | |
473 | int spu_add_dev_attr(struct device_attribute *attr) |
474 | { |
475 | struct spu *spu; |
476 | |
477 | mutex_lock(&spu_full_list_mutex); |
478 | list_for_each_entry(spu, &spu_full_list, full_list) |
479 | device_create_file(&spu->dev, attr); |
480 | mutex_unlock(lock: &spu_full_list_mutex); |
481 | |
482 | return 0; |
483 | } |
484 | EXPORT_SYMBOL_GPL(spu_add_dev_attr); |
485 | |
486 | int spu_add_dev_attr_group(const struct attribute_group *attrs) |
487 | { |
488 | struct spu *spu; |
489 | int rc = 0; |
490 | |
491 | mutex_lock(&spu_full_list_mutex); |
492 | list_for_each_entry(spu, &spu_full_list, full_list) { |
493 | rc = sysfs_create_group(&spu->dev.kobj, attrs); |
494 | |
495 | /* we're in trouble here, but try unwinding anyway */ |
496 | if (rc) { |
497 | printk(KERN_ERR "%s: can't create sysfs group '%s'\n" , |
498 | __func__, attrs->name); |
499 | |
500 | list_for_each_entry_continue_reverse(spu, |
501 | &spu_full_list, full_list) |
502 | sysfs_remove_group(&spu->dev.kobj, attrs); |
503 | break; |
504 | } |
505 | } |
506 | |
507 | mutex_unlock(lock: &spu_full_list_mutex); |
508 | |
509 | return rc; |
510 | } |
511 | EXPORT_SYMBOL_GPL(spu_add_dev_attr_group); |
512 | |
513 | |
514 | void spu_remove_dev_attr(struct device_attribute *attr) |
515 | { |
516 | struct spu *spu; |
517 | |
518 | mutex_lock(&spu_full_list_mutex); |
519 | list_for_each_entry(spu, &spu_full_list, full_list) |
520 | device_remove_file(&spu->dev, attr); |
521 | mutex_unlock(lock: &spu_full_list_mutex); |
522 | } |
523 | EXPORT_SYMBOL_GPL(spu_remove_dev_attr); |
524 | |
525 | void spu_remove_dev_attr_group(const struct attribute_group *attrs) |
526 | { |
527 | struct spu *spu; |
528 | |
529 | mutex_lock(&spu_full_list_mutex); |
530 | list_for_each_entry(spu, &spu_full_list, full_list) |
531 | sysfs_remove_group(&spu->dev.kobj, attrs); |
532 | mutex_unlock(lock: &spu_full_list_mutex); |
533 | } |
534 | EXPORT_SYMBOL_GPL(spu_remove_dev_attr_group); |
535 | |
536 | static int __init spu_create_dev(struct spu *spu) |
537 | { |
538 | int ret; |
539 | |
540 | spu->dev.id = spu->number; |
541 | spu->dev.bus = &spu_subsys; |
542 | ret = device_register(&spu->dev); |
543 | if (ret) { |
544 | printk(KERN_ERR "Can't register SPU %d with sysfs\n" , |
545 | spu->number); |
546 | return ret; |
547 | } |
548 | |
549 | sysfs_add_device_to_node(&spu->dev, spu->node); |
550 | |
551 | return 0; |
552 | } |
553 | |
554 | static int __init create_spu(void *data) |
555 | { |
556 | struct spu *spu; |
557 | int ret; |
558 | static int number; |
559 | unsigned long flags; |
560 | |
561 | ret = -ENOMEM; |
562 | spu = kzalloc(sizeof (*spu), GFP_KERNEL); |
563 | if (!spu) |
564 | goto out; |
565 | |
566 | spu->alloc_state = SPU_FREE; |
567 | |
568 | spin_lock_init(&spu->register_lock); |
569 | spin_lock(lock: &spu_lock); |
570 | spu->number = number++; |
571 | spin_unlock(lock: &spu_lock); |
572 | |
573 | ret = spu_create_spu(spu, data); |
574 | |
575 | if (ret) |
576 | goto out_free; |
577 | |
578 | spu_mfc_sdr_setup(spu); |
579 | spu_mfc_sr1_set(spu, 0x33); |
580 | ret = spu_request_irqs(spu); |
581 | if (ret) |
582 | goto out_destroy; |
583 | |
584 | ret = spu_create_dev(spu); |
585 | if (ret) |
586 | goto out_free_irqs; |
587 | |
588 | mutex_lock(&cbe_spu_info[spu->node].list_mutex); |
589 | list_add(new: &spu->cbe_list, head: &cbe_spu_info[spu->node].spus); |
590 | cbe_spu_info[spu->node].n_spus++; |
591 | mutex_unlock(lock: &cbe_spu_info[spu->node].list_mutex); |
592 | |
593 | mutex_lock(&spu_full_list_mutex); |
594 | spin_lock_irqsave(&spu_full_list_lock, flags); |
595 | list_add(new: &spu->full_list, head: &spu_full_list); |
596 | spin_unlock_irqrestore(lock: &spu_full_list_lock, flags); |
597 | mutex_unlock(lock: &spu_full_list_mutex); |
598 | |
599 | spu->stats.util_state = SPU_UTIL_IDLE_LOADED; |
600 | spu->stats.tstamp = ktime_get_ns(); |
601 | |
602 | INIT_LIST_HEAD(list: &spu->aff_list); |
603 | |
604 | goto out; |
605 | |
606 | out_free_irqs: |
607 | spu_free_irqs(spu); |
608 | out_destroy: |
609 | spu_destroy_spu(spu); |
610 | out_free: |
611 | kfree(objp: spu); |
612 | out: |
613 | return ret; |
614 | } |
615 | |
616 | static const char *spu_state_names[] = { |
617 | "user" , "system" , "iowait" , "idle" |
618 | }; |
619 | |
620 | static unsigned long long spu_acct_time(struct spu *spu, |
621 | enum spu_utilization_state state) |
622 | { |
623 | unsigned long long time = spu->stats.times[state]; |
624 | |
625 | /* |
626 | * If the spu is idle or the context is stopped, utilization |
627 | * statistics are not updated. Apply the time delta from the |
628 | * last recorded state of the spu. |
629 | */ |
630 | if (spu->stats.util_state == state) |
631 | time += ktime_get_ns() - spu->stats.tstamp; |
632 | |
633 | return time / NSEC_PER_MSEC; |
634 | } |
635 | |
636 | |
637 | static ssize_t spu_stat_show(struct device *dev, |
638 | struct device_attribute *attr, char *buf) |
639 | { |
640 | struct spu *spu = container_of(dev, struct spu, dev); |
641 | |
642 | return sprintf(buf, "%s %llu %llu %llu %llu " |
643 | "%llu %llu %llu %llu %llu %llu %llu %llu\n" , |
644 | spu_state_names[spu->stats.util_state], |
645 | spu_acct_time(spu, SPU_UTIL_USER), |
646 | spu_acct_time(spu, SPU_UTIL_SYSTEM), |
647 | spu_acct_time(spu, SPU_UTIL_IOWAIT), |
648 | spu_acct_time(spu, SPU_UTIL_IDLE_LOADED), |
649 | spu->stats.vol_ctx_switch, |
650 | spu->stats.invol_ctx_switch, |
651 | spu->stats.slb_flt, |
652 | spu->stats.hash_flt, |
653 | spu->stats.min_flt, |
654 | spu->stats.maj_flt, |
655 | spu->stats.class2_intr, |
656 | spu->stats.libassist); |
657 | } |
658 | |
659 | static DEVICE_ATTR(stat, 0444, spu_stat_show, NULL); |
660 | |
661 | #ifdef CONFIG_KEXEC_CORE |
662 | |
663 | struct crash_spu_info { |
664 | struct spu *spu; |
665 | u32 saved_spu_runcntl_RW; |
666 | u32 saved_spu_status_R; |
667 | u32 saved_spu_npc_RW; |
668 | u64 saved_mfc_sr1_RW; |
669 | u64 saved_mfc_dar; |
670 | u64 saved_mfc_dsisr; |
671 | }; |
672 | |
673 | #define CRASH_NUM_SPUS 16 /* Enough for current hardware */ |
674 | static struct crash_spu_info crash_spu_info[CRASH_NUM_SPUS]; |
675 | |
676 | static void crash_kexec_stop_spus(void) |
677 | { |
678 | struct spu *spu; |
679 | int i; |
680 | u64 tmp; |
681 | |
682 | for (i = 0; i < CRASH_NUM_SPUS; i++) { |
683 | if (!crash_spu_info[i].spu) |
684 | continue; |
685 | |
686 | spu = crash_spu_info[i].spu; |
687 | |
688 | crash_spu_info[i].saved_spu_runcntl_RW = |
689 | in_be32(&spu->problem->spu_runcntl_RW); |
690 | crash_spu_info[i].saved_spu_status_R = |
691 | in_be32(&spu->problem->spu_status_R); |
692 | crash_spu_info[i].saved_spu_npc_RW = |
693 | in_be32(&spu->problem->spu_npc_RW); |
694 | |
695 | crash_spu_info[i].saved_mfc_dar = spu_mfc_dar_get(spu); |
696 | crash_spu_info[i].saved_mfc_dsisr = spu_mfc_dsisr_get(spu); |
697 | tmp = spu_mfc_sr1_get(spu); |
698 | crash_spu_info[i].saved_mfc_sr1_RW = tmp; |
699 | |
700 | tmp &= ~MFC_STATE1_MASTER_RUN_CONTROL_MASK; |
701 | spu_mfc_sr1_set(spu, tmp); |
702 | |
703 | __delay(200); |
704 | } |
705 | } |
706 | |
707 | static void __init crash_register_spus(struct list_head *list) |
708 | { |
709 | struct spu *spu; |
710 | int ret; |
711 | |
712 | list_for_each_entry(spu, list, full_list) { |
713 | if (WARN_ON(spu->number >= CRASH_NUM_SPUS)) |
714 | continue; |
715 | |
716 | crash_spu_info[spu->number].spu = spu; |
717 | } |
718 | |
719 | ret = crash_shutdown_register(&crash_kexec_stop_spus); |
720 | if (ret) |
721 | printk(KERN_ERR "Could not register SPU crash handler" ); |
722 | } |
723 | |
724 | #else |
725 | static inline void crash_register_spus(struct list_head *list) |
726 | { |
727 | } |
728 | #endif |
729 | |
730 | static void spu_shutdown(void) |
731 | { |
732 | struct spu *spu; |
733 | |
734 | mutex_lock(&spu_full_list_mutex); |
735 | list_for_each_entry(spu, &spu_full_list, full_list) { |
736 | spu_free_irqs(spu); |
737 | spu_destroy_spu(spu); |
738 | } |
739 | mutex_unlock(lock: &spu_full_list_mutex); |
740 | } |
741 | |
742 | static struct syscore_ops spu_syscore_ops = { |
743 | .shutdown = spu_shutdown, |
744 | }; |
745 | |
746 | static int __init init_spu_base(void) |
747 | { |
748 | int i, ret = 0; |
749 | |
750 | for (i = 0; i < MAX_NUMNODES; i++) { |
751 | mutex_init(&cbe_spu_info[i].list_mutex); |
752 | INIT_LIST_HEAD(list: &cbe_spu_info[i].spus); |
753 | } |
754 | |
755 | if (!spu_management_ops) |
756 | goto out; |
757 | |
758 | /* create system subsystem for spus */ |
759 | ret = subsys_system_register(&spu_subsys, NULL); |
760 | if (ret) |
761 | goto out; |
762 | |
763 | ret = spu_enumerate_spus(create_spu); |
764 | |
765 | if (ret < 0) { |
766 | printk(KERN_WARNING "%s: Error initializing spus\n" , |
767 | __func__); |
768 | goto out_unregister_subsys; |
769 | } |
770 | |
771 | if (ret > 0) |
772 | fb_append_extra_logo(logo: &logo_spe_clut224, n: ret); |
773 | |
774 | mutex_lock(&spu_full_list_mutex); |
775 | xmon_register_spus(&spu_full_list); |
776 | crash_register_spus(list: &spu_full_list); |
777 | mutex_unlock(lock: &spu_full_list_mutex); |
778 | spu_add_dev_attr(&dev_attr_stat); |
779 | register_syscore_ops(ops: &spu_syscore_ops); |
780 | |
781 | spu_init_affinity(); |
782 | |
783 | return 0; |
784 | |
785 | out_unregister_subsys: |
786 | bus_unregister(&spu_subsys); |
787 | out: |
788 | return ret; |
789 | } |
790 | device_initcall(init_spu_base); |
791 | |