1// SPDX-License-Identifier: GPL-2.0-only
2/*
3 * spu management operations for of based platforms
4 *
5 * (C) Copyright IBM Deutschland Entwicklung GmbH 2005
6 * Copyright 2006 Sony Corp.
7 * (C) Copyright 2007 TOSHIBA CORPORATION
8 */
9
10#include <linux/interrupt.h>
11#include <linux/list.h>
12#include <linux/export.h>
13#include <linux/ptrace.h>
14#include <linux/wait.h>
15#include <linux/mm.h>
16#include <linux/io.h>
17#include <linux/mutex.h>
18#include <linux/device.h>
19#include <linux/of_address.h>
20#include <linux/of_irq.h>
21
22#include <asm/spu.h>
23#include <asm/spu_priv1.h>
24#include <asm/firmware.h>
25
26#include "spufs/spufs.h"
27#include "interrupt.h"
28#include "spu_priv1_mmio.h"
29
30struct device_node *spu_devnode(struct spu *spu)
31{
32 return spu->devnode;
33}
34
35EXPORT_SYMBOL_GPL(spu_devnode);
36
37static u64 __init find_spu_unit_number(struct device_node *spe)
38{
39 const unsigned int *prop;
40 int proplen;
41
42 /* new device trees should provide the physical-id attribute */
43 prop = of_get_property(node: spe, name: "physical-id", lenp: &proplen);
44 if (proplen == 4)
45 return (u64)*prop;
46
47 /* celleb device tree provides the unit-id */
48 prop = of_get_property(node: spe, name: "unit-id", lenp: &proplen);
49 if (proplen == 4)
50 return (u64)*prop;
51
52 /* legacy device trees provide the id in the reg attribute */
53 prop = of_get_property(node: spe, name: "reg", lenp: &proplen);
54 if (proplen == 4)
55 return (u64)*prop;
56
57 return 0;
58}
59
60static void spu_unmap(struct spu *spu)
61{
62 if (!firmware_has_feature(FW_FEATURE_LPAR))
63 iounmap(addr: spu->priv1);
64 iounmap(addr: spu->priv2);
65 iounmap(addr: spu->problem);
66 iounmap(addr: (__force u8 __iomem *)spu->local_store);
67}
68
69static int __init spu_map_interrupts_old(struct spu *spu,
70 struct device_node *np)
71{
72 unsigned int isrc;
73 const u32 *tmp;
74 int nid;
75
76 /* Get the interrupt source unit from the device-tree */
77 tmp = of_get_property(node: np, name: "isrc", NULL);
78 if (!tmp)
79 return -ENODEV;
80 isrc = tmp[0];
81
82 tmp = of_get_property(node: np->parent->parent, name: "node-id", NULL);
83 if (!tmp) {
84 printk(KERN_WARNING "%s: can't find node-id\n", __func__);
85 nid = spu->node;
86 } else
87 nid = tmp[0];
88
89 /* Add the node number */
90 isrc |= nid << IIC_IRQ_NODE_SHIFT;
91
92 /* Now map interrupts of all 3 classes */
93 spu->irqs[0] = irq_create_mapping(NULL, hwirq: IIC_IRQ_CLASS_0 | isrc);
94 spu->irqs[1] = irq_create_mapping(NULL, hwirq: IIC_IRQ_CLASS_1 | isrc);
95 spu->irqs[2] = irq_create_mapping(NULL, hwirq: IIC_IRQ_CLASS_2 | isrc);
96
97 /* Right now, we only fail if class 2 failed */
98 if (!spu->irqs[2])
99 return -EINVAL;
100
101 return 0;
102}
103
104static void __iomem * __init spu_map_prop_old(struct spu *spu,
105 struct device_node *n,
106 const char *name)
107{
108 const struct address_prop {
109 unsigned long address;
110 unsigned int len;
111 } __attribute__((packed)) *prop;
112 int proplen;
113
114 prop = of_get_property(node: n, name, lenp: &proplen);
115 if (prop == NULL || proplen != sizeof (struct address_prop))
116 return NULL;
117
118 return ioremap(offset: prop->address, size: prop->len);
119}
120
121static int __init spu_map_device_old(struct spu *spu)
122{
123 struct device_node *node = spu->devnode;
124 const char *prop;
125 int ret;
126
127 ret = -ENODEV;
128 spu->name = of_get_property(node, name: "name", NULL);
129 if (!spu->name)
130 goto out;
131
132 prop = of_get_property(node, name: "local-store", NULL);
133 if (!prop)
134 goto out;
135 spu->local_store_phys = *(unsigned long *)prop;
136
137 /* we use local store as ram, not io memory */
138 spu->local_store = (void __force *)
139 spu_map_prop_old(spu, n: node, name: "local-store");
140 if (!spu->local_store)
141 goto out;
142
143 prop = of_get_property(node, name: "problem", NULL);
144 if (!prop)
145 goto out_unmap;
146 spu->problem_phys = *(unsigned long *)prop;
147
148 spu->problem = spu_map_prop_old(spu, n: node, name: "problem");
149 if (!spu->problem)
150 goto out_unmap;
151
152 spu->priv2 = spu_map_prop_old(spu, n: node, name: "priv2");
153 if (!spu->priv2)
154 goto out_unmap;
155
156 if (!firmware_has_feature(FW_FEATURE_LPAR)) {
157 spu->priv1 = spu_map_prop_old(spu, n: node, name: "priv1");
158 if (!spu->priv1)
159 goto out_unmap;
160 }
161
162 ret = 0;
163 goto out;
164
165out_unmap:
166 spu_unmap(spu);
167out:
168 return ret;
169}
170
171static int __init spu_map_interrupts(struct spu *spu, struct device_node *np)
172{
173 int i;
174
175 for (i=0; i < 3; i++) {
176 spu->irqs[i] = irq_of_parse_and_map(node: np, index: i);
177 if (!spu->irqs[i])
178 goto err;
179 }
180 return 0;
181
182err:
183 pr_debug("failed to map irq %x for spu %s\n", i, spu->name);
184 for (; i >= 0; i--) {
185 if (spu->irqs[i])
186 irq_dispose_mapping(virq: spu->irqs[i]);
187 }
188 return -EINVAL;
189}
190
191static int __init spu_map_resource(struct spu *spu, int nr,
192 void __iomem** virt, unsigned long *phys)
193{
194 struct device_node *np = spu->devnode;
195 struct resource resource = { };
196 unsigned long len;
197 int ret;
198
199 ret = of_address_to_resource(dev: np, index: nr, r: &resource);
200 if (ret)
201 return ret;
202 if (phys)
203 *phys = resource.start;
204 len = resource_size(res: &resource);
205 *virt = ioremap(offset: resource.start, size: len);
206 if (!*virt)
207 return -EINVAL;
208 return 0;
209}
210
211static int __init spu_map_device(struct spu *spu)
212{
213 struct device_node *np = spu->devnode;
214 int ret = -ENODEV;
215
216 spu->name = of_get_property(node: np, name: "name", NULL);
217 if (!spu->name)
218 goto out;
219
220 ret = spu_map_resource(spu, nr: 0, virt: (void __iomem**)&spu->local_store,
221 phys: &spu->local_store_phys);
222 if (ret) {
223 pr_debug("spu_new: failed to map %pOF resource 0\n",
224 np);
225 goto out;
226 }
227 ret = spu_map_resource(spu, nr: 1, virt: (void __iomem**)&spu->problem,
228 phys: &spu->problem_phys);
229 if (ret) {
230 pr_debug("spu_new: failed to map %pOF resource 1\n",
231 np);
232 goto out_unmap;
233 }
234 ret = spu_map_resource(spu, nr: 2, virt: (void __iomem**)&spu->priv2, NULL);
235 if (ret) {
236 pr_debug("spu_new: failed to map %pOF resource 2\n",
237 np);
238 goto out_unmap;
239 }
240 if (!firmware_has_feature(FW_FEATURE_LPAR))
241 ret = spu_map_resource(spu, nr: 3,
242 virt: (void __iomem**)&spu->priv1, NULL);
243 if (ret) {
244 pr_debug("spu_new: failed to map %pOF resource 3\n",
245 np);
246 goto out_unmap;
247 }
248 pr_debug("spu_new: %pOF maps:\n", np);
249 pr_debug(" local store : 0x%016lx -> 0x%p\n",
250 spu->local_store_phys, spu->local_store);
251 pr_debug(" problem state : 0x%016lx -> 0x%p\n",
252 spu->problem_phys, spu->problem);
253 pr_debug(" priv2 : 0x%p\n", spu->priv2);
254 pr_debug(" priv1 : 0x%p\n", spu->priv1);
255
256 return 0;
257
258out_unmap:
259 spu_unmap(spu);
260out:
261 pr_debug("failed to map spe %s: %d\n", spu->name, ret);
262 return ret;
263}
264
265static int __init of_enumerate_spus(int (*fn)(void *data))
266{
267 int ret;
268 struct device_node *node;
269 unsigned int n = 0;
270
271 ret = -ENODEV;
272 for_each_node_by_type(node, "spe") {
273 ret = fn(node);
274 if (ret) {
275 printk(KERN_WARNING "%s: Error initializing %pOFn\n",
276 __func__, node);
277 of_node_put(node);
278 break;
279 }
280 n++;
281 }
282 return ret ? ret : n;
283}
284
285static int __init of_create_spu(struct spu *spu, void *data)
286{
287 int ret;
288 struct device_node *spe = (struct device_node *)data;
289 static int legacy_map = 0, legacy_irq = 0;
290
291 spu->devnode = of_node_get(node: spe);
292 spu->spe_id = find_spu_unit_number(spe);
293
294 spu->node = of_node_to_nid(np: spe);
295 if (spu->node >= MAX_NUMNODES) {
296 printk(KERN_WARNING "SPE %pOF on node %d ignored,"
297 " node number too big\n", spe, spu->node);
298 printk(KERN_WARNING "Check if CONFIG_NUMA is enabled.\n");
299 ret = -ENODEV;
300 goto out;
301 }
302
303 ret = spu_map_device(spu);
304 if (ret) {
305 if (!legacy_map) {
306 legacy_map = 1;
307 printk(KERN_WARNING "%s: Legacy device tree found, "
308 "trying to map old style\n", __func__);
309 }
310 ret = spu_map_device_old(spu);
311 if (ret) {
312 printk(KERN_ERR "Unable to map %s\n",
313 spu->name);
314 goto out;
315 }
316 }
317
318 ret = spu_map_interrupts(spu, np: spe);
319 if (ret) {
320 if (!legacy_irq) {
321 legacy_irq = 1;
322 printk(KERN_WARNING "%s: Legacy device tree found, "
323 "trying old style irq\n", __func__);
324 }
325 ret = spu_map_interrupts_old(spu, np: spe);
326 if (ret) {
327 printk(KERN_ERR "%s: could not map interrupts\n",
328 spu->name);
329 goto out_unmap;
330 }
331 }
332
333 pr_debug("Using SPE %s %p %p %p %p %d\n", spu->name,
334 spu->local_store, spu->problem, spu->priv1,
335 spu->priv2, spu->number);
336 goto out;
337
338out_unmap:
339 spu_unmap(spu);
340out:
341 return ret;
342}
343
344static int of_destroy_spu(struct spu *spu)
345{
346 spu_unmap(spu);
347 of_node_put(node: spu->devnode);
348 return 0;
349}
350
351static void enable_spu_by_master_run(struct spu_context *ctx)
352{
353 ctx->ops->master_start(ctx);
354}
355
356static void disable_spu_by_master_run(struct spu_context *ctx)
357{
358 ctx->ops->master_stop(ctx);
359}
360
361/* Hardcoded affinity idxs for qs20 */
362#define QS20_SPES_PER_BE 8
363static int qs20_reg_idxs[QS20_SPES_PER_BE] = { 0, 2, 4, 6, 7, 5, 3, 1 };
364static int qs20_reg_memory[QS20_SPES_PER_BE] = { 1, 1, 0, 0, 0, 0, 0, 0 };
365
366static struct spu *__init spu_lookup_reg(int node, u32 reg)
367{
368 struct spu *spu;
369 const u32 *spu_reg;
370
371 list_for_each_entry(spu, &cbe_spu_info[node].spus, cbe_list) {
372 spu_reg = of_get_property(spu_devnode(spu), "reg", NULL);
373 if (*spu_reg == reg)
374 return spu;
375 }
376 return NULL;
377}
378
379static void __init init_affinity_qs20_harcoded(void)
380{
381 int node, i;
382 struct spu *last_spu, *spu;
383 u32 reg;
384
385 for (node = 0; node < MAX_NUMNODES; node++) {
386 last_spu = NULL;
387 for (i = 0; i < QS20_SPES_PER_BE; i++) {
388 reg = qs20_reg_idxs[i];
389 spu = spu_lookup_reg(node, reg);
390 if (!spu)
391 continue;
392 spu->has_mem_affinity = qs20_reg_memory[reg];
393 if (last_spu)
394 list_add_tail(new: &spu->aff_list,
395 head: &last_spu->aff_list);
396 last_spu = spu;
397 }
398 }
399}
400
401static int __init of_has_vicinity(void)
402{
403 struct device_node *dn;
404
405 for_each_node_by_type(dn, "spe") {
406 if (of_property_present(np: dn, propname: "vicinity")) {
407 of_node_put(node: dn);
408 return 1;
409 }
410 }
411 return 0;
412}
413
414static struct spu *__init devnode_spu(int cbe, struct device_node *dn)
415{
416 struct spu *spu;
417
418 list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list)
419 if (spu_devnode(spu) == dn)
420 return spu;
421 return NULL;
422}
423
424static struct spu * __init
425neighbour_spu(int cbe, struct device_node *target, struct device_node *avoid)
426{
427 struct spu *spu;
428 struct device_node *spu_dn;
429 const phandle *vic_handles;
430 int lenp, i;
431
432 list_for_each_entry(spu, &cbe_spu_info[cbe].spus, cbe_list) {
433 spu_dn = spu_devnode(spu);
434 if (spu_dn == avoid)
435 continue;
436 vic_handles = of_get_property(spu_dn, "vicinity", &lenp);
437 for (i=0; i < (lenp / sizeof(phandle)); i++) {
438 if (vic_handles[i] == target->phandle)
439 return spu;
440 }
441 }
442 return NULL;
443}
444
445static void __init init_affinity_node(int cbe)
446{
447 struct spu *spu, *last_spu;
448 struct device_node *vic_dn, *last_spu_dn;
449 phandle avoid_ph;
450 const phandle *vic_handles;
451 int lenp, i, added;
452
453 last_spu = list_first_entry(&cbe_spu_info[cbe].spus, struct spu,
454 cbe_list);
455 avoid_ph = 0;
456 for (added = 1; added < cbe_spu_info[cbe].n_spus; added++) {
457 last_spu_dn = spu_devnode(last_spu);
458 vic_handles = of_get_property(last_spu_dn, "vicinity", &lenp);
459
460 /*
461 * Walk through each phandle in vicinity property of the spu
462 * (typically two vicinity phandles per spe node)
463 */
464 for (i = 0; i < (lenp / sizeof(phandle)); i++) {
465 if (vic_handles[i] == avoid_ph)
466 continue;
467
468 vic_dn = of_find_node_by_phandle(vic_handles[i]);
469 if (!vic_dn)
470 continue;
471
472 if (of_node_name_eq(vic_dn, "spe") ) {
473 spu = devnode_spu(cbe, vic_dn);
474 avoid_ph = last_spu_dn->phandle;
475 } else {
476 /*
477 * "mic-tm" and "bif0" nodes do not have
478 * vicinity property. So we need to find the
479 * spe which has vic_dn as neighbour, but
480 * skipping the one we came from (last_spu_dn)
481 */
482 spu = neighbour_spu(cbe, vic_dn, last_spu_dn);
483 if (!spu)
484 continue;
485 if (of_node_name_eq(vic_dn, "mic-tm")) {
486 last_spu->has_mem_affinity = 1;
487 spu->has_mem_affinity = 1;
488 }
489 avoid_ph = vic_dn->phandle;
490 }
491
492 of_node_put(vic_dn);
493
494 list_add_tail(&spu->aff_list, &last_spu->aff_list);
495 last_spu = spu;
496 break;
497 }
498 }
499}
500
501static void __init init_affinity_fw(void)
502{
503 int cbe;
504
505 for (cbe = 0; cbe < MAX_NUMNODES; cbe++)
506 init_affinity_node(cbe);
507}
508
509static int __init init_affinity(void)
510{
511 if (of_has_vicinity()) {
512 init_affinity_fw();
513 } else {
514 if (of_machine_is_compatible(compat: "IBM,CPBW-1.0"))
515 init_affinity_qs20_harcoded();
516 else
517 printk("No affinity configuration found\n");
518 }
519
520 return 0;
521}
522
523const struct spu_management_ops spu_management_of_ops = {
524 .enumerate_spus = of_enumerate_spus,
525 .create_spu = of_create_spu,
526 .destroy_spu = of_destroy_spu,
527 .enable_spu = enable_spu_by_master_run,
528 .disable_spu = disable_spu_by_master_run,
529 .init_affinity = init_affinity,
530};
531

source code of linux/arch/powerpc/platforms/cell/spu_manage.c