1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * |
4 | * Copyright (C) 2013 Freescale Semiconductor, Inc. |
5 | */ |
6 | |
7 | #define pr_fmt(fmt) "fsl-pamu: %s: " fmt, __func__ |
8 | |
9 | #include "fsl_pamu.h" |
10 | |
11 | #include <linux/fsl/guts.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/genalloc.h> |
14 | #include <linux/of_address.h> |
15 | #include <linux/of_irq.h> |
16 | #include <linux/platform_device.h> |
17 | |
18 | #include <asm/mpc85xx.h> |
19 | |
20 | /* define indexes for each operation mapping scenario */ |
21 | #define OMI_QMAN 0x00 |
22 | #define OMI_FMAN 0x01 |
23 | #define OMI_QMAN_PRIV 0x02 |
24 | #define OMI_CAAM 0x03 |
25 | |
26 | #define make64(high, low) (((u64)(high) << 32) | (low)) |
27 | |
28 | struct pamu_isr_data { |
29 | void __iomem *pamu_reg_base; /* Base address of PAMU regs */ |
30 | unsigned int count; /* The number of PAMUs */ |
31 | }; |
32 | |
33 | static struct paace *ppaact; |
34 | static struct paace *spaact; |
35 | |
36 | static bool probed; /* Has PAMU been probed? */ |
37 | |
38 | /* |
39 | * Table for matching compatible strings, for device tree |
40 | * guts node, for QorIQ SOCs. |
41 | * "fsl,qoriq-device-config-2.0" corresponds to T4 & B4 |
42 | * SOCs. For the older SOCs "fsl,qoriq-device-config-1.0" |
43 | * string would be used. |
44 | */ |
45 | static const struct of_device_id guts_device_ids[] = { |
46 | { .compatible = "fsl,qoriq-device-config-1.0" , }, |
47 | { .compatible = "fsl,qoriq-device-config-2.0" , }, |
48 | {} |
49 | }; |
50 | |
51 | /* |
52 | * Table for matching compatible strings, for device tree |
53 | * L3 cache controller node. |
54 | * "fsl,t4240-l3-cache-controller" corresponds to T4, |
55 | * "fsl,b4860-l3-cache-controller" corresponds to B4 & |
56 | * "fsl,p4080-l3-cache-controller" corresponds to other, |
57 | * SOCs. |
58 | */ |
59 | static const struct of_device_id l3_device_ids[] = { |
60 | { .compatible = "fsl,t4240-l3-cache-controller" , }, |
61 | { .compatible = "fsl,b4860-l3-cache-controller" , }, |
62 | { .compatible = "fsl,p4080-l3-cache-controller" , }, |
63 | {} |
64 | }; |
65 | |
66 | /* maximum subwindows permitted per liodn */ |
67 | static u32 max_subwindow_count; |
68 | |
69 | /** |
70 | * pamu_get_ppaace() - Return the primary PACCE |
71 | * @liodn: liodn PAACT index for desired PAACE |
72 | * |
73 | * Returns the ppace pointer upon success else return |
74 | * null. |
75 | */ |
76 | static struct paace *pamu_get_ppaace(int liodn) |
77 | { |
78 | if (!ppaact || liodn >= PAACE_NUMBER_ENTRIES) { |
79 | pr_debug("PPAACT doesn't exist\n" ); |
80 | return NULL; |
81 | } |
82 | |
83 | return &ppaact[liodn]; |
84 | } |
85 | |
86 | /** |
87 | * pamu_enable_liodn() - Set valid bit of PACCE |
88 | * @liodn: liodn PAACT index for desired PAACE |
89 | * |
90 | * Returns 0 upon success else error code < 0 returned |
91 | */ |
92 | int pamu_enable_liodn(int liodn) |
93 | { |
94 | struct paace *ppaace; |
95 | |
96 | ppaace = pamu_get_ppaace(liodn); |
97 | if (!ppaace) { |
98 | pr_debug("Invalid primary paace entry\n" ); |
99 | return -ENOENT; |
100 | } |
101 | |
102 | if (!get_bf(ppaace->addr_bitfields, PPAACE_AF_WSE)) { |
103 | pr_debug("liodn %d not configured\n" , liodn); |
104 | return -EINVAL; |
105 | } |
106 | |
107 | /* Ensure that all other stores to the ppaace complete first */ |
108 | mb(); |
109 | |
110 | set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_VALID); |
111 | mb(); |
112 | |
113 | return 0; |
114 | } |
115 | |
116 | /** |
117 | * pamu_disable_liodn() - Clears valid bit of PACCE |
118 | * @liodn: liodn PAACT index for desired PAACE |
119 | * |
120 | * Returns 0 upon success else error code < 0 returned |
121 | */ |
122 | int pamu_disable_liodn(int liodn) |
123 | { |
124 | struct paace *ppaace; |
125 | |
126 | ppaace = pamu_get_ppaace(liodn); |
127 | if (!ppaace) { |
128 | pr_debug("Invalid primary paace entry\n" ); |
129 | return -ENOENT; |
130 | } |
131 | |
132 | set_bf(ppaace->addr_bitfields, PAACE_AF_V, PAACE_V_INVALID); |
133 | mb(); |
134 | |
135 | return 0; |
136 | } |
137 | |
138 | /* Derive the window size encoding for a particular PAACE entry */ |
139 | static unsigned int map_addrspace_size_to_wse(phys_addr_t addrspace_size) |
140 | { |
141 | /* Bug if not a power of 2 */ |
142 | BUG_ON(addrspace_size & (addrspace_size - 1)); |
143 | |
144 | /* window size is 2^(WSE+1) bytes */ |
145 | return fls64(x: addrspace_size) - 2; |
146 | } |
147 | |
148 | /* |
149 | * Set the PAACE type as primary and set the coherency required domain |
150 | * attribute |
151 | */ |
152 | static void pamu_init_ppaace(struct paace *ppaace) |
153 | { |
154 | set_bf(ppaace->addr_bitfields, PAACE_AF_PT, PAACE_PT_PRIMARY); |
155 | |
156 | set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR, |
157 | PAACE_M_COHERENCE_REQ); |
158 | } |
159 | |
160 | /* |
161 | * Function used for updating stash destination for the coressponding |
162 | * LIODN. |
163 | */ |
164 | int pamu_update_paace_stash(int liodn, u32 value) |
165 | { |
166 | struct paace *paace; |
167 | |
168 | paace = pamu_get_ppaace(liodn); |
169 | if (!paace) { |
170 | pr_debug("Invalid liodn entry\n" ); |
171 | return -ENOENT; |
172 | } |
173 | set_bf(paace->impl_attr, PAACE_IA_CID, value); |
174 | |
175 | mb(); |
176 | |
177 | return 0; |
178 | } |
179 | |
180 | /** |
181 | * pamu_config_ppaace() - Sets up PPAACE entry for specified liodn |
182 | * |
183 | * @liodn: Logical IO device number |
184 | * @omi: Operation mapping index -- if ~omi == 0 then omi not defined |
185 | * @stashid: cache stash id for associated cpu -- if ~stashid == 0 then |
186 | * stashid not defined |
187 | * @prot: window permissions |
188 | * |
189 | * Returns 0 upon success else error code < 0 returned |
190 | */ |
191 | int pamu_config_ppaace(int liodn, u32 omi, u32 stashid, int prot) |
192 | { |
193 | struct paace *ppaace; |
194 | |
195 | ppaace = pamu_get_ppaace(liodn); |
196 | if (!ppaace) |
197 | return -ENOENT; |
198 | |
199 | /* window size is 2^(WSE+1) bytes */ |
200 | set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, |
201 | map_addrspace_size_to_wse(1ULL << 36)); |
202 | |
203 | pamu_init_ppaace(ppaace); |
204 | |
205 | ppaace->wbah = 0; |
206 | set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0); |
207 | |
208 | /* set up operation mapping if it's configured */ |
209 | if (omi < OME_NUMBER_ENTRIES) { |
210 | set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED); |
211 | ppaace->op_encode.index_ot.omi = omi; |
212 | } else if (~omi != 0) { |
213 | pr_debug("bad operation mapping index: %d\n" , omi); |
214 | return -ENODEV; |
215 | } |
216 | |
217 | /* configure stash id */ |
218 | if (~stashid != 0) |
219 | set_bf(ppaace->impl_attr, PAACE_IA_CID, stashid); |
220 | |
221 | set_bf(ppaace->impl_attr, PAACE_IA_ATM, PAACE_ATM_WINDOW_XLATE); |
222 | ppaace->twbah = 0; |
223 | set_bf(ppaace->win_bitfields, PAACE_WIN_TWBAL, 0); |
224 | set_bf(ppaace->addr_bitfields, PAACE_AF_AP, prot); |
225 | set_bf(ppaace->impl_attr, PAACE_IA_WCE, 0); |
226 | set_bf(ppaace->addr_bitfields, PPAACE_AF_MW, 0); |
227 | mb(); |
228 | |
229 | return 0; |
230 | } |
231 | |
232 | /** |
233 | * get_ome_index() - Returns the index in the operation mapping table |
234 | * for device. |
235 | * @omi_index: pointer for storing the index value |
236 | * @dev: target device |
237 | * |
238 | */ |
239 | void get_ome_index(u32 *omi_index, struct device *dev) |
240 | { |
241 | if (of_device_is_compatible(device: dev->of_node, "fsl,qman-portal" )) |
242 | *omi_index = OMI_QMAN; |
243 | if (of_device_is_compatible(device: dev->of_node, "fsl,qman" )) |
244 | *omi_index = OMI_QMAN_PRIV; |
245 | } |
246 | |
247 | /** |
248 | * get_stash_id - Returns stash destination id corresponding to a |
249 | * cache type and vcpu. |
250 | * @stash_dest_hint: L1, L2 or L3 |
251 | * @vcpu: vpcu target for a particular cache type. |
252 | * |
253 | * Returs stash on success or ~(u32)0 on failure. |
254 | * |
255 | */ |
256 | u32 get_stash_id(u32 stash_dest_hint, u32 vcpu) |
257 | { |
258 | const u32 *prop; |
259 | struct device_node *node; |
260 | u32 cache_level; |
261 | int len, found = 0; |
262 | int i; |
263 | |
264 | /* Fastpath, exit early if L3/CPC cache is target for stashing */ |
265 | if (stash_dest_hint == PAMU_ATTR_CACHE_L3) { |
266 | node = of_find_matching_node(NULL, matches: l3_device_ids); |
267 | if (node) { |
268 | prop = of_get_property(node, name: "cache-stash-id" , NULL); |
269 | if (!prop) { |
270 | pr_debug("missing cache-stash-id at %pOF\n" , |
271 | node); |
272 | of_node_put(node); |
273 | return ~(u32)0; |
274 | } |
275 | of_node_put(node); |
276 | return be32_to_cpup(p: prop); |
277 | } |
278 | return ~(u32)0; |
279 | } |
280 | |
281 | for_each_of_cpu_node(node) { |
282 | prop = of_get_property(node, name: "reg" , lenp: &len); |
283 | for (i = 0; i < len / sizeof(u32); i++) { |
284 | if (be32_to_cpup(p: &prop[i]) == vcpu) { |
285 | found = 1; |
286 | goto found_cpu_node; |
287 | } |
288 | } |
289 | } |
290 | found_cpu_node: |
291 | |
292 | /* find the hwnode that represents the cache */ |
293 | for (cache_level = PAMU_ATTR_CACHE_L1; (cache_level < PAMU_ATTR_CACHE_L3) && found; cache_level++) { |
294 | if (stash_dest_hint == cache_level) { |
295 | prop = of_get_property(node, "cache-stash-id" , NULL); |
296 | if (!prop) { |
297 | pr_debug("missing cache-stash-id at %pOF\n" , |
298 | node); |
299 | of_node_put(node); |
300 | return ~(u32)0; |
301 | } |
302 | of_node_put(node); |
303 | return be32_to_cpup(prop); |
304 | } |
305 | |
306 | prop = of_get_property(node, "next-level-cache" , NULL); |
307 | if (!prop) { |
308 | pr_debug("can't find next-level-cache at %pOF\n" , node); |
309 | of_node_put(node); |
310 | return ~(u32)0; /* can't traverse any further */ |
311 | } |
312 | of_node_put(node); |
313 | |
314 | /* advance to next node in cache hierarchy */ |
315 | node = of_find_node_by_phandle(*prop); |
316 | if (!node) { |
317 | pr_debug("Invalid node for cache hierarchy\n" ); |
318 | return ~(u32)0; |
319 | } |
320 | } |
321 | |
322 | pr_debug("stash dest not found for %d on vcpu %d\n" , |
323 | stash_dest_hint, vcpu); |
324 | return ~(u32)0; |
325 | } |
326 | |
327 | /* Identify if the PAACT table entry belongs to QMAN, BMAN or QMAN Portal */ |
328 | #define QMAN_PAACE 1 |
329 | #define QMAN_PORTAL_PAACE 2 |
330 | #define BMAN_PAACE 3 |
331 | |
332 | /* |
333 | * Setup operation mapping and stash destinations for QMAN and QMAN portal. |
334 | * Memory accesses to QMAN and BMAN private memory need not be coherent, so |
335 | * clear the PAACE entry coherency attribute for them. |
336 | */ |
337 | static void setup_qbman_paace(struct paace *ppaace, int paace_type) |
338 | { |
339 | switch (paace_type) { |
340 | case QMAN_PAACE: |
341 | set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED); |
342 | ppaace->op_encode.index_ot.omi = OMI_QMAN_PRIV; |
343 | /* setup QMAN Private data stashing for the L3 cache */ |
344 | set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0)); |
345 | set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR, |
346 | 0); |
347 | break; |
348 | case QMAN_PORTAL_PAACE: |
349 | set_bf(ppaace->impl_attr, PAACE_IA_OTM, PAACE_OTM_INDEXED); |
350 | ppaace->op_encode.index_ot.omi = OMI_QMAN; |
351 | /* Set DQRR and Frame stashing for the L3 cache */ |
352 | set_bf(ppaace->impl_attr, PAACE_IA_CID, get_stash_id(PAMU_ATTR_CACHE_L3, 0)); |
353 | break; |
354 | case BMAN_PAACE: |
355 | set_bf(ppaace->domain_attr.to_host.coherency_required, PAACE_DA_HOST_CR, |
356 | 0); |
357 | break; |
358 | } |
359 | } |
360 | |
361 | /* |
362 | * Setup the operation mapping table for various devices. This is a static |
363 | * table where each table index corresponds to a particular device. PAMU uses |
364 | * this table to translate device transaction to appropriate corenet |
365 | * transaction. |
366 | */ |
367 | static void setup_omt(struct ome *omt) |
368 | { |
369 | struct ome *ome; |
370 | |
371 | /* Configure OMI_QMAN */ |
372 | ome = &omt[OMI_QMAN]; |
373 | |
374 | ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ; |
375 | ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA; |
376 | ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE; |
377 | ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSAO; |
378 | |
379 | ome->moe[IOE_DIRECT0_IDX] = EOE_VALID | EOE_LDEC; |
380 | ome->moe[IOE_DIRECT1_IDX] = EOE_VALID | EOE_LDECPE; |
381 | |
382 | /* Configure OMI_FMAN */ |
383 | ome = &omt[OMI_FMAN]; |
384 | ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READI; |
385 | ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE; |
386 | |
387 | /* Configure OMI_QMAN private */ |
388 | ome = &omt[OMI_QMAN_PRIV]; |
389 | ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READ; |
390 | ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE; |
391 | ome->moe[IOE_EREAD0_IDX] = EOE_VALID | EOE_RSA; |
392 | ome->moe[IOE_EWRITE0_IDX] = EOE_VALID | EOE_WWSA; |
393 | |
394 | /* Configure OMI_CAAM */ |
395 | ome = &omt[OMI_CAAM]; |
396 | ome->moe[IOE_READ_IDX] = EOE_VALID | EOE_READI; |
397 | ome->moe[IOE_WRITE_IDX] = EOE_VALID | EOE_WRITE; |
398 | } |
399 | |
400 | /* |
401 | * Get the maximum number of PAACT table entries |
402 | * and subwindows supported by PAMU |
403 | */ |
404 | static void get_pamu_cap_values(unsigned long pamu_reg_base) |
405 | { |
406 | u32 pc_val; |
407 | |
408 | pc_val = in_be32((u32 *)(pamu_reg_base + PAMU_PC3)); |
409 | /* Maximum number of subwindows per liodn */ |
410 | max_subwindow_count = 1 << (1 + PAMU_PC3_MWCE(pc_val)); |
411 | } |
412 | |
413 | /* Setup PAMU registers pointing to PAACT, SPAACT and OMT */ |
414 | static int setup_one_pamu(unsigned long pamu_reg_base, unsigned long pamu_reg_size, |
415 | phys_addr_t ppaact_phys, phys_addr_t spaact_phys, |
416 | phys_addr_t omt_phys) |
417 | { |
418 | u32 *pc; |
419 | struct pamu_mmap_regs *pamu_regs; |
420 | |
421 | pc = (u32 *) (pamu_reg_base + PAMU_PC); |
422 | pamu_regs = (struct pamu_mmap_regs *) |
423 | (pamu_reg_base + PAMU_MMAP_REGS_BASE); |
424 | |
425 | /* set up pointers to corenet control blocks */ |
426 | |
427 | out_be32(&pamu_regs->ppbah, upper_32_bits(ppaact_phys)); |
428 | out_be32(&pamu_regs->ppbal, lower_32_bits(ppaact_phys)); |
429 | ppaact_phys = ppaact_phys + PAACT_SIZE; |
430 | out_be32(&pamu_regs->pplah, upper_32_bits(ppaact_phys)); |
431 | out_be32(&pamu_regs->pplal, lower_32_bits(ppaact_phys)); |
432 | |
433 | out_be32(&pamu_regs->spbah, upper_32_bits(spaact_phys)); |
434 | out_be32(&pamu_regs->spbal, lower_32_bits(spaact_phys)); |
435 | spaact_phys = spaact_phys + SPAACT_SIZE; |
436 | out_be32(&pamu_regs->splah, upper_32_bits(spaact_phys)); |
437 | out_be32(&pamu_regs->splal, lower_32_bits(spaact_phys)); |
438 | |
439 | out_be32(&pamu_regs->obah, upper_32_bits(omt_phys)); |
440 | out_be32(&pamu_regs->obal, lower_32_bits(omt_phys)); |
441 | omt_phys = omt_phys + OMT_SIZE; |
442 | out_be32(&pamu_regs->olah, upper_32_bits(omt_phys)); |
443 | out_be32(&pamu_regs->olal, lower_32_bits(omt_phys)); |
444 | |
445 | /* |
446 | * set PAMU enable bit, |
447 | * allow ppaact & omt to be cached |
448 | * & enable PAMU access violation interrupts. |
449 | */ |
450 | |
451 | out_be32((u32 *)(pamu_reg_base + PAMU_PICS), |
452 | PAMU_ACCESS_VIOLATION_ENABLE); |
453 | out_be32(pc, PAMU_PC_PE | PAMU_PC_OCE | PAMU_PC_SPCC | PAMU_PC_PPCC); |
454 | return 0; |
455 | } |
456 | |
457 | /* Enable all device LIODNS */ |
458 | static void setup_liodns(void) |
459 | { |
460 | int i, len; |
461 | struct paace *ppaace; |
462 | struct device_node *node = NULL; |
463 | const u32 *prop; |
464 | |
465 | for_each_node_with_property(node, "fsl,liodn" ) { |
466 | prop = of_get_property(node, name: "fsl,liodn" , lenp: &len); |
467 | for (i = 0; i < len / sizeof(u32); i++) { |
468 | int liodn; |
469 | |
470 | liodn = be32_to_cpup(p: &prop[i]); |
471 | if (liodn >= PAACE_NUMBER_ENTRIES) { |
472 | pr_debug("Invalid LIODN value %d\n" , liodn); |
473 | continue; |
474 | } |
475 | ppaace = pamu_get_ppaace(liodn); |
476 | pamu_init_ppaace(ppaace); |
477 | /* window size is 2^(WSE+1) bytes */ |
478 | set_bf(ppaace->addr_bitfields, PPAACE_AF_WSE, 35); |
479 | ppaace->wbah = 0; |
480 | set_bf(ppaace->addr_bitfields, PPAACE_AF_WBAL, 0); |
481 | set_bf(ppaace->impl_attr, PAACE_IA_ATM, |
482 | PAACE_ATM_NO_XLATE); |
483 | set_bf(ppaace->addr_bitfields, PAACE_AF_AP, |
484 | PAACE_AP_PERMS_ALL); |
485 | if (of_device_is_compatible(device: node, "fsl,qman-portal" )) |
486 | setup_qbman_paace(ppaace, QMAN_PORTAL_PAACE); |
487 | if (of_device_is_compatible(device: node, "fsl,qman" )) |
488 | setup_qbman_paace(ppaace, QMAN_PAACE); |
489 | if (of_device_is_compatible(device: node, "fsl,bman" )) |
490 | setup_qbman_paace(ppaace, BMAN_PAACE); |
491 | mb(); |
492 | pamu_enable_liodn(liodn); |
493 | } |
494 | } |
495 | } |
496 | |
497 | static irqreturn_t pamu_av_isr(int irq, void *arg) |
498 | { |
499 | struct pamu_isr_data *data = arg; |
500 | phys_addr_t phys; |
501 | unsigned int i, j, ret; |
502 | |
503 | pr_emerg("access violation interrupt\n" ); |
504 | |
505 | for (i = 0; i < data->count; i++) { |
506 | void __iomem *p = data->pamu_reg_base + i * PAMU_OFFSET; |
507 | u32 pics = in_be32(p + PAMU_PICS); |
508 | |
509 | if (pics & PAMU_ACCESS_VIOLATION_STAT) { |
510 | u32 avs1 = in_be32(p + PAMU_AVS1); |
511 | struct paace *paace; |
512 | |
513 | pr_emerg("POES1=%08x\n" , in_be32(p + PAMU_POES1)); |
514 | pr_emerg("POES2=%08x\n" , in_be32(p + PAMU_POES2)); |
515 | pr_emerg("AVS1=%08x\n" , avs1); |
516 | pr_emerg("AVS2=%08x\n" , in_be32(p + PAMU_AVS2)); |
517 | pr_emerg("AVA=%016llx\n" , |
518 | make64(in_be32(p + PAMU_AVAH), |
519 | in_be32(p + PAMU_AVAL))); |
520 | pr_emerg("UDAD=%08x\n" , in_be32(p + PAMU_UDAD)); |
521 | pr_emerg("POEA=%016llx\n" , |
522 | make64(in_be32(p + PAMU_POEAH), |
523 | in_be32(p + PAMU_POEAL))); |
524 | |
525 | phys = make64(in_be32(p + PAMU_POEAH), |
526 | in_be32(p + PAMU_POEAL)); |
527 | |
528 | /* Assume that POEA points to a PAACE */ |
529 | if (phys) { |
530 | u32 *paace = phys_to_virt(address: phys); |
531 | |
532 | /* Only the first four words are relevant */ |
533 | for (j = 0; j < 4; j++) |
534 | pr_emerg("PAACE[%u]=%08x\n" , |
535 | j, in_be32(paace + j)); |
536 | } |
537 | |
538 | /* clear access violation condition */ |
539 | out_be32(p + PAMU_AVS1, avs1 & PAMU_AV_MASK); |
540 | paace = pamu_get_ppaace(liodn: avs1 >> PAMU_AVS1_LIODN_SHIFT); |
541 | BUG_ON(!paace); |
542 | /* check if we got a violation for a disabled LIODN */ |
543 | if (!get_bf(paace->addr_bitfields, PAACE_AF_V)) { |
544 | /* |
545 | * As per hardware erratum A-003638, access |
546 | * violation can be reported for a disabled |
547 | * LIODN. If we hit that condition, disable |
548 | * access violation reporting. |
549 | */ |
550 | pics &= ~PAMU_ACCESS_VIOLATION_ENABLE; |
551 | } else { |
552 | /* Disable the LIODN */ |
553 | ret = pamu_disable_liodn(liodn: avs1 >> PAMU_AVS1_LIODN_SHIFT); |
554 | BUG_ON(ret); |
555 | pr_emerg("Disabling liodn %x\n" , |
556 | avs1 >> PAMU_AVS1_LIODN_SHIFT); |
557 | } |
558 | out_be32((p + PAMU_PICS), pics); |
559 | } |
560 | } |
561 | |
562 | return IRQ_HANDLED; |
563 | } |
564 | |
565 | #define LAWAR_EN 0x80000000 |
566 | #define LAWAR_TARGET_MASK 0x0FF00000 |
567 | #define LAWAR_TARGET_SHIFT 20 |
568 | #define LAWAR_SIZE_MASK 0x0000003F |
569 | #define LAWAR_CSDID_MASK 0x000FF000 |
570 | #define LAWAR_CSDID_SHIFT 12 |
571 | |
572 | #define LAW_SIZE_4K 0xb |
573 | |
574 | struct ccsr_law { |
575 | u32 lawbarh; /* LAWn base address high */ |
576 | u32 lawbarl; /* LAWn base address low */ |
577 | u32 lawar; /* LAWn attributes */ |
578 | u32 reserved; |
579 | }; |
580 | |
581 | /* |
582 | * Create a coherence subdomain for a given memory block. |
583 | */ |
584 | static int create_csd(phys_addr_t phys, size_t size, u32 csd_port_id) |
585 | { |
586 | struct device_node *np; |
587 | const __be32 *iprop; |
588 | void __iomem *lac = NULL; /* Local Access Control registers */ |
589 | struct ccsr_law __iomem *law; |
590 | void __iomem *ccm = NULL; |
591 | u32 __iomem *csdids; |
592 | unsigned int i, num_laws, num_csds; |
593 | u32 law_target = 0; |
594 | u32 csd_id = 0; |
595 | int ret = 0; |
596 | |
597 | np = of_find_compatible_node(NULL, NULL, compat: "fsl,corenet-law" ); |
598 | if (!np) |
599 | return -ENODEV; |
600 | |
601 | iprop = of_get_property(node: np, name: "fsl,num-laws" , NULL); |
602 | if (!iprop) { |
603 | ret = -ENODEV; |
604 | goto error; |
605 | } |
606 | |
607 | num_laws = be32_to_cpup(p: iprop); |
608 | if (!num_laws) { |
609 | ret = -ENODEV; |
610 | goto error; |
611 | } |
612 | |
613 | lac = of_iomap(node: np, index: 0); |
614 | if (!lac) { |
615 | ret = -ENODEV; |
616 | goto error; |
617 | } |
618 | |
619 | /* LAW registers are at offset 0xC00 */ |
620 | law = lac + 0xC00; |
621 | |
622 | of_node_put(node: np); |
623 | |
624 | np = of_find_compatible_node(NULL, NULL, compat: "fsl,corenet-cf" ); |
625 | if (!np) { |
626 | ret = -ENODEV; |
627 | goto error; |
628 | } |
629 | |
630 | iprop = of_get_property(node: np, name: "fsl,ccf-num-csdids" , NULL); |
631 | if (!iprop) { |
632 | ret = -ENODEV; |
633 | goto error; |
634 | } |
635 | |
636 | num_csds = be32_to_cpup(p: iprop); |
637 | if (!num_csds) { |
638 | ret = -ENODEV; |
639 | goto error; |
640 | } |
641 | |
642 | ccm = of_iomap(node: np, index: 0); |
643 | if (!ccm) { |
644 | ret = -ENOMEM; |
645 | goto error; |
646 | } |
647 | |
648 | /* The undocumented CSDID registers are at offset 0x600 */ |
649 | csdids = ccm + 0x600; |
650 | |
651 | of_node_put(node: np); |
652 | np = NULL; |
653 | |
654 | /* Find an unused coherence subdomain ID */ |
655 | for (csd_id = 0; csd_id < num_csds; csd_id++) { |
656 | if (!csdids[csd_id]) |
657 | break; |
658 | } |
659 | |
660 | /* Store the Port ID in the (undocumented) proper CIDMRxx register */ |
661 | csdids[csd_id] = csd_port_id; |
662 | |
663 | /* Find the DDR LAW that maps to our buffer. */ |
664 | for (i = 0; i < num_laws; i++) { |
665 | if (law[i].lawar & LAWAR_EN) { |
666 | phys_addr_t law_start, law_end; |
667 | |
668 | law_start = make64(law[i].lawbarh, law[i].lawbarl); |
669 | law_end = law_start + |
670 | (2ULL << (law[i].lawar & LAWAR_SIZE_MASK)); |
671 | |
672 | if (law_start <= phys && phys < law_end) { |
673 | law_target = law[i].lawar & LAWAR_TARGET_MASK; |
674 | break; |
675 | } |
676 | } |
677 | } |
678 | |
679 | if (i == 0 || i == num_laws) { |
680 | /* This should never happen */ |
681 | ret = -ENOENT; |
682 | goto error; |
683 | } |
684 | |
685 | /* Find a free LAW entry */ |
686 | while (law[--i].lawar & LAWAR_EN) { |
687 | if (i == 0) { |
688 | /* No higher priority LAW slots available */ |
689 | ret = -ENOENT; |
690 | goto error; |
691 | } |
692 | } |
693 | |
694 | law[i].lawbarh = upper_32_bits(phys); |
695 | law[i].lawbarl = lower_32_bits(phys); |
696 | wmb(); |
697 | law[i].lawar = LAWAR_EN | law_target | (csd_id << LAWAR_CSDID_SHIFT) | |
698 | (LAW_SIZE_4K + get_order(size)); |
699 | wmb(); |
700 | |
701 | error: |
702 | if (ccm) |
703 | iounmap(addr: ccm); |
704 | |
705 | if (lac) |
706 | iounmap(addr: lac); |
707 | |
708 | if (np) |
709 | of_node_put(node: np); |
710 | |
711 | return ret; |
712 | } |
713 | |
714 | /* |
715 | * Table of SVRs and the corresponding PORT_ID values. Port ID corresponds to a |
716 | * bit map of snoopers for a given range of memory mapped by a LAW. |
717 | * |
718 | * All future CoreNet-enabled SOCs will have this erratum(A-004510) fixed, so this |
719 | * table should never need to be updated. SVRs are guaranteed to be unique, so |
720 | * there is no worry that a future SOC will inadvertently have one of these |
721 | * values. |
722 | */ |
723 | static const struct { |
724 | u32 svr; |
725 | u32 port_id; |
726 | } port_id_map[] = { |
727 | {(SVR_P2040 << 8) | 0x10, 0xFF000000}, /* P2040 1.0 */ |
728 | {(SVR_P2040 << 8) | 0x11, 0xFF000000}, /* P2040 1.1 */ |
729 | {(SVR_P2041 << 8) | 0x10, 0xFF000000}, /* P2041 1.0 */ |
730 | {(SVR_P2041 << 8) | 0x11, 0xFF000000}, /* P2041 1.1 */ |
731 | {(SVR_P3041 << 8) | 0x10, 0xFF000000}, /* P3041 1.0 */ |
732 | {(SVR_P3041 << 8) | 0x11, 0xFF000000}, /* P3041 1.1 */ |
733 | {(SVR_P4040 << 8) | 0x20, 0xFFF80000}, /* P4040 2.0 */ |
734 | {(SVR_P4080 << 8) | 0x20, 0xFFF80000}, /* P4080 2.0 */ |
735 | {(SVR_P5010 << 8) | 0x10, 0xFC000000}, /* P5010 1.0 */ |
736 | {(SVR_P5010 << 8) | 0x20, 0xFC000000}, /* P5010 2.0 */ |
737 | {(SVR_P5020 << 8) | 0x10, 0xFC000000}, /* P5020 1.0 */ |
738 | {(SVR_P5021 << 8) | 0x10, 0xFF800000}, /* P5021 1.0 */ |
739 | {(SVR_P5040 << 8) | 0x10, 0xFF800000}, /* P5040 1.0 */ |
740 | }; |
741 | |
742 | #define SVR_SECURITY 0x80000 /* The Security (E) bit */ |
743 | |
744 | static int fsl_pamu_probe(struct platform_device *pdev) |
745 | { |
746 | struct device *dev = &pdev->dev; |
747 | void __iomem *pamu_regs = NULL; |
748 | struct ccsr_guts __iomem *guts_regs = NULL; |
749 | u32 pamubypenr, pamu_counter; |
750 | unsigned long pamu_reg_off; |
751 | unsigned long pamu_reg_base; |
752 | struct pamu_isr_data *data = NULL; |
753 | struct device_node *guts_node; |
754 | u64 size; |
755 | struct page *p; |
756 | int ret = 0; |
757 | int irq; |
758 | phys_addr_t ppaact_phys; |
759 | phys_addr_t spaact_phys; |
760 | struct ome *omt; |
761 | phys_addr_t omt_phys; |
762 | size_t mem_size = 0; |
763 | unsigned int order = 0; |
764 | u32 csd_port_id = 0; |
765 | unsigned i; |
766 | /* |
767 | * enumerate all PAMUs and allocate and setup PAMU tables |
768 | * for each of them, |
769 | * NOTE : All PAMUs share the same LIODN tables. |
770 | */ |
771 | |
772 | if (WARN_ON(probed)) |
773 | return -EBUSY; |
774 | |
775 | pamu_regs = of_iomap(node: dev->of_node, index: 0); |
776 | if (!pamu_regs) { |
777 | dev_err(dev, "ioremap of PAMU node failed\n" ); |
778 | return -ENOMEM; |
779 | } |
780 | of_get_address(dev: dev->of_node, index: 0, size: &size, NULL); |
781 | |
782 | irq = irq_of_parse_and_map(node: dev->of_node, index: 0); |
783 | if (!irq) { |
784 | dev_warn(dev, "no interrupts listed in PAMU node\n" ); |
785 | goto error; |
786 | } |
787 | |
788 | data = kzalloc(size: sizeof(*data), GFP_KERNEL); |
789 | if (!data) { |
790 | ret = -ENOMEM; |
791 | goto error; |
792 | } |
793 | data->pamu_reg_base = pamu_regs; |
794 | data->count = size / PAMU_OFFSET; |
795 | |
796 | /* The ISR needs access to the regs, so we won't iounmap them */ |
797 | ret = request_irq(irq, handler: pamu_av_isr, flags: 0, name: "pamu" , dev: data); |
798 | if (ret < 0) { |
799 | dev_err(dev, "error %i installing ISR for irq %i\n" , ret, irq); |
800 | goto error; |
801 | } |
802 | |
803 | guts_node = of_find_matching_node(NULL, matches: guts_device_ids); |
804 | if (!guts_node) { |
805 | dev_err(dev, "could not find GUTS node %pOF\n" , dev->of_node); |
806 | ret = -ENODEV; |
807 | goto error; |
808 | } |
809 | |
810 | guts_regs = of_iomap(node: guts_node, index: 0); |
811 | of_node_put(node: guts_node); |
812 | if (!guts_regs) { |
813 | dev_err(dev, "ioremap of GUTS node failed\n" ); |
814 | ret = -ENODEV; |
815 | goto error; |
816 | } |
817 | |
818 | /* read in the PAMU capability registers */ |
819 | get_pamu_cap_values(pamu_reg_base: (unsigned long)pamu_regs); |
820 | /* |
821 | * To simplify the allocation of a coherency domain, we allocate the |
822 | * PAACT and the OMT in the same memory buffer. Unfortunately, this |
823 | * wastes more memory compared to allocating the buffers separately. |
824 | */ |
825 | /* Determine how much memory we need */ |
826 | mem_size = (PAGE_SIZE << get_order(PAACT_SIZE)) + |
827 | (PAGE_SIZE << get_order(SPAACT_SIZE)) + |
828 | (PAGE_SIZE << get_order(OMT_SIZE)); |
829 | order = get_order(size: mem_size); |
830 | |
831 | p = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); |
832 | if (!p) { |
833 | dev_err(dev, "unable to allocate PAACT/SPAACT/OMT block\n" ); |
834 | ret = -ENOMEM; |
835 | goto error; |
836 | } |
837 | |
838 | ppaact = page_address(p); |
839 | ppaact_phys = page_to_phys(p); |
840 | |
841 | /* Make sure the memory is naturally aligned */ |
842 | if (ppaact_phys & ((PAGE_SIZE << order) - 1)) { |
843 | dev_err(dev, "PAACT/OMT block is unaligned\n" ); |
844 | ret = -ENOMEM; |
845 | goto error; |
846 | } |
847 | |
848 | spaact = (void *)ppaact + (PAGE_SIZE << get_order(PAACT_SIZE)); |
849 | omt = (void *)spaact + (PAGE_SIZE << get_order(SPAACT_SIZE)); |
850 | |
851 | dev_dbg(dev, "ppaact virt=%p phys=%pa\n" , ppaact, &ppaact_phys); |
852 | |
853 | /* Check to see if we need to implement the work-around on this SOC */ |
854 | |
855 | /* Determine the Port ID for our coherence subdomain */ |
856 | for (i = 0; i < ARRAY_SIZE(port_id_map); i++) { |
857 | if (port_id_map[i].svr == (mfspr(SPRN_SVR) & ~SVR_SECURITY)) { |
858 | csd_port_id = port_id_map[i].port_id; |
859 | dev_dbg(dev, "found matching SVR %08x\n" , |
860 | port_id_map[i].svr); |
861 | break; |
862 | } |
863 | } |
864 | |
865 | if (csd_port_id) { |
866 | dev_dbg(dev, "creating coherency subdomain at address %pa, size %zu, port id 0x%08x" , |
867 | &ppaact_phys, mem_size, csd_port_id); |
868 | |
869 | ret = create_csd(phys: ppaact_phys, size: mem_size, csd_port_id); |
870 | if (ret) { |
871 | dev_err(dev, "could not create coherence subdomain\n" ); |
872 | goto error; |
873 | } |
874 | } |
875 | |
876 | spaact_phys = virt_to_phys(address: spaact); |
877 | omt_phys = virt_to_phys(address: omt); |
878 | |
879 | pamubypenr = in_be32(&guts_regs->pamubypenr); |
880 | |
881 | for (pamu_reg_off = 0, pamu_counter = 0x80000000; pamu_reg_off < size; |
882 | pamu_reg_off += PAMU_OFFSET, pamu_counter >>= 1) { |
883 | |
884 | pamu_reg_base = (unsigned long)pamu_regs + pamu_reg_off; |
885 | setup_one_pamu(pamu_reg_base, pamu_reg_size: pamu_reg_off, ppaact_phys, |
886 | spaact_phys, omt_phys); |
887 | /* Disable PAMU bypass for this PAMU */ |
888 | pamubypenr &= ~pamu_counter; |
889 | } |
890 | |
891 | setup_omt(omt); |
892 | |
893 | /* Enable all relevant PAMU(s) */ |
894 | out_be32(&guts_regs->pamubypenr, pamubypenr); |
895 | |
896 | iounmap(addr: guts_regs); |
897 | |
898 | /* Enable DMA for the LIODNs in the device tree */ |
899 | |
900 | setup_liodns(); |
901 | |
902 | probed = true; |
903 | |
904 | return 0; |
905 | |
906 | error: |
907 | if (irq) |
908 | free_irq(irq, data); |
909 | |
910 | kfree_sensitive(objp: data); |
911 | |
912 | if (pamu_regs) |
913 | iounmap(addr: pamu_regs); |
914 | |
915 | if (guts_regs) |
916 | iounmap(addr: guts_regs); |
917 | |
918 | if (ppaact) |
919 | free_pages(addr: (unsigned long)ppaact, order); |
920 | |
921 | ppaact = NULL; |
922 | |
923 | return ret; |
924 | } |
925 | |
926 | static struct platform_driver fsl_of_pamu_driver = { |
927 | .driver = { |
928 | .name = "fsl-of-pamu" , |
929 | }, |
930 | .probe = fsl_pamu_probe, |
931 | }; |
932 | |
933 | static __init int fsl_pamu_init(void) |
934 | { |
935 | struct platform_device *pdev = NULL; |
936 | struct device_node *np; |
937 | int ret; |
938 | |
939 | /* |
940 | * The normal OF process calls the probe function at some |
941 | * indeterminate later time, after most drivers have loaded. This is |
942 | * too late for us, because PAMU clients (like the Qman driver) |
943 | * depend on PAMU being initialized early. |
944 | * |
945 | * So instead, we "manually" call our probe function by creating the |
946 | * platform devices ourselves. |
947 | */ |
948 | |
949 | /* |
950 | * We assume that there is only one PAMU node in the device tree. A |
951 | * single PAMU node represents all of the PAMU devices in the SOC |
952 | * already. Everything else already makes that assumption, and the |
953 | * binding for the PAMU nodes doesn't allow for any parent-child |
954 | * relationships anyway. In other words, support for more than one |
955 | * PAMU node would require significant changes to a lot of code. |
956 | */ |
957 | |
958 | np = of_find_compatible_node(NULL, NULL, compat: "fsl,pamu" ); |
959 | if (!np) { |
960 | pr_err("could not find a PAMU node\n" ); |
961 | return -ENODEV; |
962 | } |
963 | |
964 | ret = platform_driver_register(&fsl_of_pamu_driver); |
965 | if (ret) { |
966 | pr_err("could not register driver (err=%i)\n" , ret); |
967 | goto error_driver_register; |
968 | } |
969 | |
970 | pdev = platform_device_alloc(name: "fsl-of-pamu" , id: 0); |
971 | if (!pdev) { |
972 | pr_err("could not allocate device %pOF\n" , np); |
973 | ret = -ENOMEM; |
974 | goto error_device_alloc; |
975 | } |
976 | pdev->dev.of_node = of_node_get(node: np); |
977 | |
978 | ret = pamu_domain_init(); |
979 | if (ret) |
980 | goto error_device_add; |
981 | |
982 | ret = platform_device_add(pdev); |
983 | if (ret) { |
984 | pr_err("could not add device %pOF (err=%i)\n" , np, ret); |
985 | goto error_device_add; |
986 | } |
987 | |
988 | return 0; |
989 | |
990 | error_device_add: |
991 | of_node_put(node: pdev->dev.of_node); |
992 | pdev->dev.of_node = NULL; |
993 | |
994 | platform_device_put(pdev); |
995 | |
996 | error_device_alloc: |
997 | platform_driver_unregister(&fsl_of_pamu_driver); |
998 | |
999 | error_driver_register: |
1000 | of_node_put(node: np); |
1001 | |
1002 | return ret; |
1003 | } |
1004 | arch_initcall(fsl_pamu_init); |
1005 | |