1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Coherency fabric: low level functions |
4 | * |
5 | * Copyright (C) 2012 Marvell |
6 | * |
7 | * Gregory CLEMENT <gregory.clement@free-electrons.com> |
8 | * |
9 | * This file implements the assembly function to add a CPU to the |
10 | * coherency fabric. This function is called by each of the secondary |
11 | * CPUs during their early boot in an SMP kernel, this why this |
12 | * function have to callable from assembly. It can also be called by a |
13 | * primary CPU from C code during its boot. |
14 | */ |
15 | |
16 | #include <linux/linkage.h> |
17 | #define ARMADA_XP_CFB_CTL_REG_OFFSET 0x0 |
18 | #define ARMADA_XP_CFB_CFG_REG_OFFSET 0x4 |
19 | |
20 | #include <asm/assembler.h> |
21 | #include <asm/cp15.h> |
22 | |
23 | .arch armv7-a |
24 | .text |
25 | /* |
26 | * Returns the coherency base address in r1 (r0 is untouched), or 0 if |
27 | * the coherency fabric is not enabled. |
28 | */ |
29 | ENTRY(ll_get_coherency_base) |
30 | mrc p15, 0, r1, c1, c0, 0 |
31 | tst r1, #CR_M @ Check MMU bit enabled |
32 | bne 1f |
33 | |
34 | /* |
35 | * MMU is disabled, use the physical address of the coherency |
36 | * base address, (or 0x0 if the coherency fabric is not mapped) |
37 | */ |
38 | adr r1, 3f |
39 | ldr r3, [r1] |
40 | ldr r1, [r1, r3] |
41 | b 2f |
42 | 1: |
43 | /* |
44 | * MMU is enabled, use the virtual address of the coherency |
45 | * base address. |
46 | */ |
47 | ldr r1, =coherency_base |
48 | ldr r1, [r1] |
49 | 2: |
50 | ret lr |
51 | ENDPROC(ll_get_coherency_base) |
52 | |
53 | /* |
54 | * Returns the coherency CPU mask in r3 (r0 is untouched). This |
55 | * coherency CPU mask can be used with the coherency fabric |
56 | * configuration and control registers. Note that the mask is already |
57 | * endian-swapped as appropriate so that the calling functions do not |
58 | * have to care about endianness issues while accessing the coherency |
59 | * fabric registers |
60 | */ |
61 | ENTRY(ll_get_coherency_cpumask) |
62 | mrc p15, 0, r3, cr0, cr0, 5 |
63 | and r3, r3, #15 |
64 | mov r2, #(1 << 24) |
65 | lsl r3, r2, r3 |
66 | ARM_BE8(rev r3, r3) |
67 | ret lr |
68 | ENDPROC(ll_get_coherency_cpumask) |
69 | |
70 | /* |
71 | * ll_add_cpu_to_smp_group(), ll_enable_coherency() and |
72 | * ll_disable_coherency() use the strex/ldrex instructions while the |
73 | * MMU can be disabled. The Armada XP SoC has an exclusive monitor |
74 | * that tracks transactions to Device and/or SO memory and thanks to |
75 | * that, exclusive transactions are functional even when the MMU is |
76 | * disabled. |
77 | */ |
78 | |
79 | ENTRY(ll_add_cpu_to_smp_group) |
80 | /* |
81 | * As r0 is not modified by ll_get_coherency_base() and |
82 | * ll_get_coherency_cpumask(), we use it to temporarly save lr |
83 | * and avoid it being modified by the branch and link |
84 | * calls. This function is used very early in the secondary |
85 | * CPU boot, and no stack is available at this point. |
86 | */ |
87 | mov r0, lr |
88 | bl ll_get_coherency_base |
89 | /* Bail out if the coherency is not enabled */ |
90 | cmp r1, #0 |
91 | reteq r0 |
92 | bl ll_get_coherency_cpumask |
93 | mov lr, r0 |
94 | add r0, r1, #ARMADA_XP_CFB_CFG_REG_OFFSET |
95 | 1: |
96 | ldrex r2, [r0] |
97 | orr r2, r2, r3 |
98 | strex r1, r2, [r0] |
99 | cmp r1, #0 |
100 | bne 1b |
101 | ret lr |
102 | ENDPROC(ll_add_cpu_to_smp_group) |
103 | |
104 | ENTRY(ll_enable_coherency) |
105 | /* |
106 | * As r0 is not modified by ll_get_coherency_base() and |
107 | * ll_get_coherency_cpumask(), we use it to temporarly save lr |
108 | * and avoid it being modified by the branch and link |
109 | * calls. This function is used very early in the secondary |
110 | * CPU boot, and no stack is available at this point. |
111 | */ |
112 | mov r0, lr |
113 | bl ll_get_coherency_base |
114 | /* Bail out if the coherency is not enabled */ |
115 | cmp r1, #0 |
116 | reteq r0 |
117 | bl ll_get_coherency_cpumask |
118 | mov lr, r0 |
119 | add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET |
120 | 1: |
121 | ldrex r2, [r0] |
122 | orr r2, r2, r3 |
123 | strex r1, r2, [r0] |
124 | cmp r1, #0 |
125 | bne 1b |
126 | dsb |
127 | mov r0, #0 |
128 | ret lr |
129 | ENDPROC(ll_enable_coherency) |
130 | |
131 | ENTRY(ll_disable_coherency) |
132 | /* |
133 | * As r0 is not modified by ll_get_coherency_base() and |
134 | * ll_get_coherency_cpumask(), we use it to temporarly save lr |
135 | * and avoid it being modified by the branch and link |
136 | * calls. This function is used very early in the secondary |
137 | * CPU boot, and no stack is available at this point. |
138 | */ |
139 | mov r0, lr |
140 | bl ll_get_coherency_base |
141 | /* Bail out if the coherency is not enabled */ |
142 | cmp r1, #0 |
143 | reteq r0 |
144 | bl ll_get_coherency_cpumask |
145 | mov lr, r0 |
146 | add r0, r1, #ARMADA_XP_CFB_CTL_REG_OFFSET |
147 | 1: |
148 | ldrex r2, [r0] |
149 | bic r2, r2, r3 |
150 | strex r1, r2, [r0] |
151 | cmp r1, #0 |
152 | bne 1b |
153 | dsb |
154 | ret lr |
155 | ENDPROC(ll_disable_coherency) |
156 | |
157 | .align 2 |
158 | 3: |
159 | .long coherency_phys_base - . |
160 | |