1/* SPDX-License-Identifier: GPL-2.0-or-later */
2/*
3 * This file contains low level CPU setup functions.
4 * Kumar Gala <galak@kernel.crashing.org>
5 * Copyright 2009 Freescale Semiconductor, Inc.
6 *
7 * Based on cpu_setup_6xx code by
8 * Benjamin Herrenschmidt <benh@kernel.crashing.org>
9 */
10
11#include <linux/linkage.h>
12
13#include <asm/page.h>
14#include <asm/processor.h>
15#include <asm/cputable.h>
16#include <asm/ppc_asm.h>
17#include <asm/nohash/mmu-e500.h>
18#include <asm/asm-offsets.h>
19#include <asm/mpc85xx.h>
20
21_GLOBAL(__e500_icache_setup)
22 mfspr r0, SPRN_L1CSR1
23 andi. r3, r0, L1CSR1_ICE
24 bnelr /* Already enabled */
25 oris r0, r0, L1CSR1_CPE@h
26 ori r0, r0, (L1CSR1_ICFI | L1CSR1_ICLFR | L1CSR1_ICE)
27 mtspr SPRN_L1CSR1, r0 /* Enable I-Cache */
28 isync
29 blr
30
31_GLOBAL(__e500_dcache_setup)
32 mfspr r0, SPRN_L1CSR0
33 andi. r3, r0, L1CSR0_DCE
34 bnelr /* Already enabled */
35 msync
36 isync
37 li r0, 0
38 mtspr SPRN_L1CSR0, r0 /* Disable */
39 msync
40 isync
41 li r0, (L1CSR0_DCFI | L1CSR0_CLFC)
42 mtspr SPRN_L1CSR0, r0 /* Invalidate */
43 isync
441: mfspr r0, SPRN_L1CSR0
45 andi. r3, r0, L1CSR0_CLFC
46 bne+ 1b /* Wait for lock bits reset */
47 oris r0, r0, L1CSR0_CPE@h
48 ori r0, r0, L1CSR0_DCE
49 msync
50 isync
51 mtspr SPRN_L1CSR0, r0 /* Enable */
52 isync
53 blr
54
55/*
56 * FIXME - we haven't yet done testing to determine a reasonable default
57 * value for PW20_WAIT_IDLE_BIT.
58 */
59#define PW20_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
60_GLOBAL(setup_pw20_idle)
61 mfspr r3, SPRN_PWRMGTCR0
62
63 /* Set PW20_WAIT bit, enable pw20 state*/
64 ori r3, r3, PWRMGTCR0_PW20_WAIT
65 li r11, PW20_WAIT_IDLE_BIT
66
67 /* Set Automatic PW20 Core Idle Count */
68 rlwimi r3, r11, PWRMGTCR0_PW20_ENT_SHIFT, PWRMGTCR0_PW20_ENT
69
70 mtspr SPRN_PWRMGTCR0, r3
71
72 blr
73
74/*
75 * FIXME - we haven't yet done testing to determine a reasonable default
76 * value for AV_WAIT_IDLE_BIT.
77 */
78#define AV_WAIT_IDLE_BIT 50 /* 1ms, TB frequency is 41.66MHZ */
79_GLOBAL(setup_altivec_idle)
80 mfspr r3, SPRN_PWRMGTCR0
81
82 /* Enable Altivec Idle */
83 oris r3, r3, PWRMGTCR0_AV_IDLE_PD_EN@h
84 li r11, AV_WAIT_IDLE_BIT
85
86 /* Set Automatic AltiVec Idle Count */
87 rlwimi r3, r11, PWRMGTCR0_AV_IDLE_CNT_SHIFT, PWRMGTCR0_AV_IDLE_CNT
88
89 mtspr SPRN_PWRMGTCR0, r3
90
91 blr
92
93#ifdef CONFIG_PPC_E500MC
94_GLOBAL(__setup_cpu_e6500)
95 mflr r6
96#ifdef CONFIG_PPC64
97 bl setup_altivec_ivors
98 /* Touch IVOR42 only if the CPU supports E.HV category */
99 mfspr r10,SPRN_MMUCFG
100 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
101 beq 1f
102 bl setup_lrat_ivor
1031:
104#endif
105 bl setup_pw20_idle
106 bl setup_altivec_idle
107 bl __setup_cpu_e5500
108 mtlr r6
109 blr
110#endif /* CONFIG_PPC_E500MC */
111
112#ifdef CONFIG_PPC32
113#ifdef CONFIG_PPC_E500
114#ifndef CONFIG_PPC_E500MC
115_GLOBAL(__setup_cpu_e500v1)
116_GLOBAL(__setup_cpu_e500v2)
117 mflr r4
118 bl __e500_icache_setup
119 bl __e500_dcache_setup
120 bl __setup_e500_ivors
121#if defined(CONFIG_FSL_RIO) || defined(CONFIG_FSL_PCI)
122 /* Ensure that RFXE is set */
123 mfspr r3,SPRN_HID1
124 oris r3,r3,HID1_RFXE@h
125 mtspr SPRN_HID1,r3
126#endif
127 mtlr r4
128 blr
129#else /* CONFIG_PPC_E500MC */
130_GLOBAL(__setup_cpu_e500mc)
131_GLOBAL(__setup_cpu_e5500)
132 mflr r5
133 bl __e500_icache_setup
134 bl __e500_dcache_setup
135 bl __setup_e500mc_ivors
136 /*
137 * We only want to touch IVOR38-41 if we're running on hardware
138 * that supports category E.HV. The architectural way to determine
139 * this is MMUCFG[LPIDSIZE].
140 */
141 mfspr r3, SPRN_MMUCFG
142 rlwinm. r3, r3, 0, MMUCFG_LPIDSIZE
143 beq 1f
144 bl __setup_ehv_ivors
145 b 2f
1461:
147 lwz r3, CPU_SPEC_FEATURES(r4)
148 /* We need this check as cpu_setup is also called for
149 * the secondary cores. So, if we have already cleared
150 * the feature on the primary core, avoid doing it on the
151 * secondary core.
152 */
153 andi. r6, r3, CPU_FTR_EMB_HV
154 beq 2f
155 rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
156 stw r3, CPU_SPEC_FEATURES(r4)
1572:
158 mtlr r5
159 blr
160#endif /* CONFIG_PPC_E500MC */
161#endif /* CONFIG_PPC_E500 */
162#endif /* CONFIG_PPC32 */
163
164#ifdef CONFIG_PPC_BOOK3E_64
165_GLOBAL(__restore_cpu_e6500)
166 mflr r5
167 bl setup_altivec_ivors
168 /* Touch IVOR42 only if the CPU supports E.HV category */
169 mfspr r10,SPRN_MMUCFG
170 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
171 beq 1f
172 bl setup_lrat_ivor
1731:
174 bl setup_pw20_idle
175 bl setup_altivec_idle
176 bl __restore_cpu_e5500
177 mtlr r5
178 blr
179
180_GLOBAL(__restore_cpu_e5500)
181 mflr r4
182 bl __e500_icache_setup
183 bl __e500_dcache_setup
184 bl __setup_base_ivors
185 bl setup_perfmon_ivor
186 bl setup_doorbell_ivors
187 /*
188 * We only want to touch IVOR38-41 if we're running on hardware
189 * that supports category E.HV. The architectural way to determine
190 * this is MMUCFG[LPIDSIZE].
191 */
192 mfspr r10,SPRN_MMUCFG
193 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
194 beq 1f
195 bl setup_ehv_ivors
1961:
197 mtlr r4
198 blr
199
200_GLOBAL(__setup_cpu_e5500)
201 mflr r5
202 bl __e500_icache_setup
203 bl __e500_dcache_setup
204 bl __setup_base_ivors
205 bl setup_perfmon_ivor
206 bl setup_doorbell_ivors
207 /*
208 * We only want to touch IVOR38-41 if we're running on hardware
209 * that supports category E.HV. The architectural way to determine
210 * this is MMUCFG[LPIDSIZE].
211 */
212 mfspr r10,SPRN_MMUCFG
213 rlwinm. r10,r10,0,MMUCFG_LPIDSIZE
214 beq 1f
215 bl setup_ehv_ivors
216 b 2f
2171:
218 ld r10,CPU_SPEC_FEATURES(r4)
219 LOAD_REG_IMMEDIATE(r9,CPU_FTR_EMB_HV)
220 andc r10,r10,r9
221 std r10,CPU_SPEC_FEATURES(r4)
2222:
223 mtlr r5
224 blr
225#endif
226
227/* flush L1 data cache, it can apply to e500v2, e500mc and e5500 */
228_GLOBAL(flush_dcache_L1)
229 mfmsr r10
230 wrteei 0
231
232 mfspr r3,SPRN_L1CFG0
233 rlwinm r5,r3,9,3 /* Extract cache block size */
234 twlgti r5,1 /* Only 32 and 64 byte cache blocks
235 * are currently defined.
236 */
237 li r4,32
238 subfic r6,r5,2 /* r6 = log2(1KiB / cache block size) -
239 * log2(number of ways)
240 */
241 slw r5,r4,r5 /* r5 = cache block size */
242
243 rlwinm r7,r3,0,0xff /* Extract number of KiB in the cache */
244 mulli r7,r7,13 /* An 8-way cache will require 13
245 * loads per set.
246 */
247 slw r7,r7,r6
248
249 /* save off HID0 and set DCFA */
250 mfspr r8,SPRN_HID0
251 ori r9,r8,HID0_DCFA@l
252 mtspr SPRN_HID0,r9
253 isync
254
255 LOAD_REG_IMMEDIATE(r6, KERNELBASE)
256 mr r4, r6
257 mtctr r7
258
2591: lwz r3,0(r4) /* Load... */
260 add r4,r4,r5
261 bdnz 1b
262
263 msync
264 mr r4, r6
265 mtctr r7
266
2671: dcbf 0,r4 /* ...and flush. */
268 add r4,r4,r5
269 bdnz 1b
270
271 /* restore HID0 */
272 mtspr SPRN_HID0,r8
273 isync
274
275 wrtee r10
276
277 blr
278
279SYM_FUNC_START_LOCAL(has_L2_cache)
280 /* skip L2 cache on P2040/P2040E as they have no L2 cache */
281 mfspr r3, SPRN_SVR
282 /* shift right by 8 bits and clear E bit of SVR */
283 rlwinm r4, r3, 24, ~0x800
284
285 lis r3, SVR_P2040@h
286 ori r3, r3, SVR_P2040@l
287 cmpw r4, r3
288 beq 1f
289
290 li r3, 1
291 blr
2921:
293 li r3, 0
294 blr
295SYM_FUNC_END(has_L2_cache)
296
297/* flush backside L2 cache */
298SYM_FUNC_START_LOCAL(flush_backside_L2_cache)
299 mflr r10
300 bl has_L2_cache
301 mtlr r10
302 cmpwi r3, 0
303 beq 2f
304
305 /* Flush the L2 cache */
306 mfspr r3, SPRN_L2CSR0
307 ori r3, r3, L2CSR0_L2FL@l
308 msync
309 isync
310 mtspr SPRN_L2CSR0,r3
311 isync
312
313 /* check if it is complete */
3141: mfspr r3,SPRN_L2CSR0
315 andi. r3, r3, L2CSR0_L2FL@l
316 bne 1b
3172:
318 blr
319SYM_FUNC_END(flush_backside_L2_cache)
320
321_GLOBAL(cpu_down_flush_e500v2)
322 mflr r0
323 bl flush_dcache_L1
324 mtlr r0
325 blr
326
327_GLOBAL(cpu_down_flush_e500mc)
328_GLOBAL(cpu_down_flush_e5500)
329 mflr r0
330 bl flush_dcache_L1
331 bl flush_backside_L2_cache
332 mtlr r0
333 blr
334
335/* L1 Data Cache of e6500 contains no modified data, no flush is required */
336_GLOBAL(cpu_down_flush_e6500)
337 blr
338

source code of linux/arch/powerpc/kernel/cpu_setup_e500.S