1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * PowerPC version |
4 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
5 | * |
6 | * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP |
7 | * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu> |
8 | * Adapted for Power Macintosh by Paul Mackerras. |
9 | * Low-level exception handlers and MMU support |
10 | * rewritten by Paul Mackerras. |
11 | * Copyright (C) 1996 Paul Mackerras. |
12 | * |
13 | * Adapted for 64bit PowerPC by Dave Engebretsen, Peter Bergner, and |
14 | * Mike Corrigan {engebret|bergner|mikejc}@us.ibm.com |
15 | * |
16 | * This file contains the entry point for the 64-bit kernel along |
17 | * with some early initialization code common to all 64-bit powerpc |
18 | * variants. |
19 | */ |
20 | |
21 | #include <linux/linkage.h> |
22 | #include <linux/threads.h> |
23 | #include <linux/init.h> |
24 | #include <asm/reg.h> |
25 | #include <asm/page.h> |
26 | #include <asm/mmu.h> |
27 | #include <asm/ppc_asm.h> |
28 | #include <asm/head-64.h> |
29 | #include <asm/asm-offsets.h> |
30 | #include <asm/bug.h> |
31 | #include <asm/cputable.h> |
32 | #include <asm/setup.h> |
33 | #include <asm/hvcall.h> |
34 | #include <asm/thread_info.h> |
35 | #include <asm/firmware.h> |
36 | #include <asm/page_64.h> |
37 | #include <asm/irqflags.h> |
38 | #include <asm/kvm_book3s_asm.h> |
39 | #include <asm/ptrace.h> |
40 | #include <asm/hw_irq.h> |
41 | #include <asm/cputhreads.h> |
42 | #include <asm/ppc-opcode.h> |
43 | #include <asm/feature-fixups.h> |
44 | #ifdef CONFIG_PPC_BOOK3S |
45 | #include <asm/exception-64s.h> |
46 | #else |
47 | #include <asm/exception-64e.h> |
48 | #endif |
49 | |
50 | /* The physical memory is laid out such that the secondary processor |
51 | * spin code sits at 0x0000...0x00ff. On server, the vectors follow |
52 | * using the layout described in exceptions-64s.S |
53 | */ |
54 | |
55 | /* |
56 | * Entering into this code we make the following assumptions: |
57 | * |
58 | * For pSeries or server processors: |
59 | * 1. The MMU is off & open firmware is running in real mode. |
60 | * 2. The primary CPU enters at __start. |
61 | * 3. If the RTAS supports "query-cpu-stopped-state", then secondary |
62 | * CPUs will enter as directed by "start-cpu" RTAS call, which is |
63 | * generic_secondary_smp_init, with PIR in r3. |
64 | * 4. Else the secondary CPUs will enter at secondary_hold (0x60) as |
65 | * directed by the "start-cpu" RTS call, with PIR in r3. |
66 | * -or- For OPAL entry: |
67 | * 1. The MMU is off, processor in HV mode. |
68 | * 2. The primary CPU enters at 0 with device-tree in r3, OPAL base |
69 | * in r8, and entry in r9 for debugging purposes. |
70 | * 3. Secondary CPUs enter as directed by OPAL_START_CPU call, which |
71 | * is at generic_secondary_smp_init, with PIR in r3. |
72 | * |
73 | * For Book3E processors: |
74 | * 1. The MMU is on running in AS0 in a state defined in ePAPR |
75 | * 2. The kernel is entered at __start |
76 | */ |
77 | |
78 | /* |
79 | * boot_from_prom and prom_init run at the physical address. Everything |
80 | * after prom and kexec entry run at the virtual address (PAGE_OFFSET). |
81 | * Secondaries run at the virtual address from generic_secondary_common_init |
82 | * onward. |
83 | */ |
84 | |
85 | OPEN_FIXED_SECTION(first_256B, 0x0, 0x100) |
86 | USE_FIXED_SECTION(first_256B) |
87 | /* |
88 | * Offsets are relative from the start of fixed section, and |
89 | * first_256B starts at 0. Offsets are a bit easier to use here |
90 | * than the fixed section entry macros. |
91 | */ |
92 | . = 0x0 |
93 | _GLOBAL(__start) |
94 | /* NOP this out unconditionally */ |
95 | BEGIN_FTR_SECTION |
96 | FIXUP_ENDIAN |
97 | b __start_initialization_multiplatform |
98 | END_FTR_SECTION(0, 1) |
99 | |
100 | /* Catch branch to 0 in real mode */ |
101 | trap |
102 | |
103 | /* Secondary processors spin on this value until it becomes non-zero. |
104 | * When non-zero, it contains the real address of the function the cpu |
105 | * should jump to. |
106 | */ |
107 | .balign 8 |
108 | .globl __secondary_hold_spinloop |
109 | __secondary_hold_spinloop: |
110 | .8byte 0x0 |
111 | |
112 | /* Secondary processors write this value with their cpu # */ |
113 | /* after they enter the spin loop immediately below. */ |
114 | .globl __secondary_hold_acknowledge |
115 | __secondary_hold_acknowledge: |
116 | .8byte 0x0 |
117 | |
118 | #ifdef CONFIG_RELOCATABLE |
119 | /* This flag is set to 1 by a loader if the kernel should run |
120 | * at the loaded address instead of the linked address. This |
121 | * is used by kexec-tools to keep the kdump kernel in the |
122 | * crash_kernel region. The loader is responsible for |
123 | * observing the alignment requirement. |
124 | */ |
125 | |
126 | #ifdef CONFIG_RELOCATABLE_TEST |
127 | #define RUN_AT_LOAD_DEFAULT 1 /* Test relocation, do not copy to 0 */ |
128 | #else |
129 | #define RUN_AT_LOAD_DEFAULT 0x72756e30 /* "run0" -- relocate to 0 by default */ |
130 | #endif |
131 | |
132 | /* Do not move this variable as kexec-tools knows about it. */ |
133 | . = 0x5c |
134 | .globl __run_at_load |
135 | __run_at_load: |
136 | DEFINE_FIXED_SYMBOL(__run_at_load, first_256B) |
137 | .long RUN_AT_LOAD_DEFAULT |
138 | #endif |
139 | |
140 | . = 0x60 |
141 | /* |
142 | * The following code is used to hold secondary processors |
143 | * in a spin loop after they have entered the kernel, but |
144 | * before the bulk of the kernel has been relocated. This code |
145 | * is relocated to physical address 0x60 before prom_init is run. |
146 | * All of it must fit below the first exception vector at 0x100. |
147 | * Use .globl here not _GLOBAL because we want __secondary_hold |
148 | * to be the actual text address, not a descriptor. |
149 | */ |
150 | .globl __secondary_hold |
151 | __secondary_hold: |
152 | FIXUP_ENDIAN |
153 | #ifndef CONFIG_PPC_BOOK3E_64 |
154 | mfmsr r24 |
155 | ori r24,r24,MSR_RI |
156 | mtmsrd r24 /* RI on */ |
157 | #endif |
158 | /* Grab our physical cpu number */ |
159 | mr r24,r3 |
160 | /* stash r4 for book3e */ |
161 | mr r25,r4 |
162 | |
163 | /* Tell the master cpu we're here */ |
164 | /* Relocation is off & we are located at an address less */ |
165 | /* than 0x100, so only need to grab low order offset. */ |
166 | std r24,(ABS_ADDR(__secondary_hold_acknowledge, first_256B))(0) |
167 | sync |
168 | |
169 | /* All secondary cpus wait here until told to start. */ |
170 | 100: ld r12,(ABS_ADDR(__secondary_hold_spinloop, first_256B))(0) |
171 | cmpdi 0,r12,0 |
172 | beq 100b |
173 | |
174 | #if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE) |
175 | #ifdef CONFIG_PPC_BOOK3E_64 |
176 | tovirt(r12,r12) |
177 | #endif |
178 | mtctr r12 |
179 | mr r3,r24 |
180 | /* |
181 | * it may be the case that other platforms have r4 right to |
182 | * begin with, this gives us some safety in case it is not |
183 | */ |
184 | #ifdef CONFIG_PPC_BOOK3E_64 |
185 | mr r4,r25 |
186 | #else |
187 | li r4,0 |
188 | #endif |
189 | /* Make sure that patched code is visible */ |
190 | isync |
191 | bctr |
192 | #else |
193 | 0: trap |
194 | EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 |
195 | #endif |
196 | CLOSE_FIXED_SECTION(first_256B) |
197 | |
198 | /* |
199 | * On server, we include the exception vectors code here as it |
200 | * relies on absolute addressing which is only possible within |
201 | * this compilation unit |
202 | */ |
203 | #ifdef CONFIG_PPC_BOOK3S |
204 | #include "exceptions-64s.S" |
205 | #else |
206 | OPEN_TEXT_SECTION(0x100) |
207 | #endif |
208 | |
209 | USE_TEXT_SECTION() |
210 | |
211 | #include "interrupt_64.S" |
212 | |
213 | #ifdef CONFIG_PPC_BOOK3E_64 |
214 | /* |
215 | * The booting_thread_hwid holds the thread id we want to boot in cpu |
216 | * hotplug case. It is set by cpu hotplug code, and is invalid by default. |
217 | * The thread id is the same as the initial value of SPRN_PIR[THREAD_ID] |
218 | * bit field. |
219 | */ |
220 | .globl booting_thread_hwid |
221 | booting_thread_hwid: |
222 | .long INVALID_THREAD_HWID |
223 | .align 3 |
224 | /* |
225 | * start a thread in the same core |
226 | * input parameters: |
227 | * r3 = the thread physical id |
228 | * r4 = the entry point where thread starts |
229 | */ |
230 | _GLOBAL(book3e_start_thread) |
231 | LOAD_REG_IMMEDIATE(r5, MSR_KERNEL) |
232 | cmpwi r3, 0 |
233 | beq 10f |
234 | cmpwi r3, 1 |
235 | beq 11f |
236 | /* If the thread id is invalid, just exit. */ |
237 | b 13f |
238 | 10: |
239 | MTTMR(TMRN_IMSR0, 5) |
240 | MTTMR(TMRN_INIA0, 4) |
241 | b 12f |
242 | 11: |
243 | MTTMR(TMRN_IMSR1, 5) |
244 | MTTMR(TMRN_INIA1, 4) |
245 | 12: |
246 | isync |
247 | li r6, 1 |
248 | sld r6, r6, r3 |
249 | mtspr SPRN_TENS, r6 |
250 | 13: |
251 | blr |
252 | |
253 | /* |
254 | * stop a thread in the same core |
255 | * input parameter: |
256 | * r3 = the thread physical id |
257 | */ |
258 | _GLOBAL(book3e_stop_thread) |
259 | cmpwi r3, 0 |
260 | beq 10f |
261 | cmpwi r3, 1 |
262 | beq 10f |
263 | /* If the thread id is invalid, just exit. */ |
264 | b 13f |
265 | 10: |
266 | li r4, 1 |
267 | sld r4, r4, r3 |
268 | mtspr SPRN_TENC, r4 |
269 | 13: |
270 | blr |
271 | |
272 | _GLOBAL(fsl_secondary_thread_init) |
273 | mfspr r4,SPRN_BUCSR |
274 | |
275 | /* Enable branch prediction */ |
276 | lis r3,BUCSR_INIT@h |
277 | ori r3,r3,BUCSR_INIT@l |
278 | mtspr SPRN_BUCSR,r3 |
279 | isync |
280 | |
281 | /* |
282 | * Fix PIR to match the linear numbering in the device tree. |
283 | * |
284 | * On e6500, the reset value of PIR uses the low three bits for |
285 | * the thread within a core, and the upper bits for the core |
286 | * number. There are two threads per core, so shift everything |
287 | * but the low bit right by two bits so that the cpu numbering is |
288 | * continuous. |
289 | * |
290 | * If the old value of BUCSR is non-zero, this thread has run |
291 | * before. Thus, we assume we are coming from kexec or a similar |
292 | * scenario, and PIR is already set to the correct value. This |
293 | * is a bit of a hack, but there are limited opportunities for |
294 | * getting information into the thread and the alternatives |
295 | * seemed like they'd be overkill. We can't tell just by looking |
296 | * at the old PIR value which state it's in, since the same value |
297 | * could be valid for one thread out of reset and for a different |
298 | * thread in Linux. |
299 | */ |
300 | |
301 | mfspr r3, SPRN_PIR |
302 | cmpwi r4,0 |
303 | bne 1f |
304 | rlwimi r3, r3, 30, 2, 30 |
305 | mtspr SPRN_PIR, r3 |
306 | 1: |
307 | mr r24,r3 |
308 | |
309 | /* turn on 64-bit mode */ |
310 | bl enable_64b_mode |
311 | |
312 | /* Book3E initialization */ |
313 | mr r3,r24 |
314 | bl book3e_secondary_thread_init |
315 | bl relative_toc |
316 | |
317 | b generic_secondary_common_init |
318 | |
319 | #endif /* CONFIG_PPC_BOOK3E_64 */ |
320 | |
321 | /* |
322 | * On pSeries and most other platforms, secondary processors spin |
323 | * in the following code. |
324 | * At entry, r3 = this processor's number (physical cpu id) |
325 | * |
326 | * On Book3E, r4 = 1 to indicate that the initial TLB entry for |
327 | * this core already exists (setup via some other mechanism such |
328 | * as SCOM before entry). |
329 | */ |
330 | _GLOBAL(generic_secondary_smp_init) |
331 | FIXUP_ENDIAN |
332 | |
333 | li r13,0 |
334 | |
335 | /* Poison TOC */ |
336 | li r2,-1 |
337 | |
338 | mr r24,r3 |
339 | mr r25,r4 |
340 | |
341 | /* turn on 64-bit mode */ |
342 | bl enable_64b_mode |
343 | |
344 | #ifdef CONFIG_PPC_BOOK3E_64 |
345 | /* Book3E initialization */ |
346 | mr r3,r24 |
347 | mr r4,r25 |
348 | bl book3e_secondary_core_init |
349 | /* Now NIA and r2 are relocated to PAGE_OFFSET if not already */ |
350 | /* |
351 | * After common core init has finished, check if the current thread is the |
352 | * one we wanted to boot. If not, start the specified thread and stop the |
353 | * current thread. |
354 | */ |
355 | LOAD_REG_ADDR(r4, booting_thread_hwid) |
356 | lwz r3, 0(r4) |
357 | li r5, INVALID_THREAD_HWID |
358 | cmpw r3, r5 |
359 | beq 20f |
360 | |
361 | /* |
362 | * The value of booting_thread_hwid has been stored in r3, |
363 | * so make it invalid. |
364 | */ |
365 | stw r5, 0(r4) |
366 | |
367 | /* |
368 | * Get the current thread id and check if it is the one we wanted. |
369 | * If not, start the one specified in booting_thread_hwid and stop |
370 | * the current thread. |
371 | */ |
372 | mfspr r8, SPRN_TIR |
373 | cmpw r3, r8 |
374 | beq 20f |
375 | |
376 | /* start the specified thread */ |
377 | LOAD_REG_ADDR(r5, DOTSYM(fsl_secondary_thread_init)) |
378 | bl book3e_start_thread |
379 | |
380 | /* stop the current thread */ |
381 | mr r3, r8 |
382 | bl book3e_stop_thread |
383 | 10: |
384 | b 10b |
385 | 20: |
386 | #else |
387 | /* Now the MMU is off, can branch to our PAGE_OFFSET address */ |
388 | bcl 20,31,$+4 |
389 | 1: mflr r11 |
390 | addi r11,r11,(2f - 1b) |
391 | tovirt(r11, r11) |
392 | mtctr r11 |
393 | bctr |
394 | 2: |
395 | bl relative_toc |
396 | #endif |
397 | |
398 | generic_secondary_common_init: |
399 | /* Set up a paca value for this processor. Since we have the |
400 | * physical cpu id in r24, we need to search the pacas to find |
401 | * which logical id maps to our physical one. |
402 | */ |
403 | #ifndef CONFIG_SMP |
404 | b kexec_wait /* wait for next kernel if !SMP */ |
405 | #else |
406 | LOAD_REG_ADDR(r8, paca_ptrs) /* Load paca_ptrs pointe */ |
407 | ld r8,0(r8) /* Get base vaddr of array */ |
408 | #if (NR_CPUS == 1) || defined(CONFIG_FORCE_NR_CPUS) |
409 | LOAD_REG_IMMEDIATE(r7, NR_CPUS) |
410 | #else |
411 | LOAD_REG_ADDR(r7, nr_cpu_ids) /* Load nr_cpu_ids address */ |
412 | lwz r7,0(r7) /* also the max paca allocated */ |
413 | #endif |
414 | li r5,0 /* logical cpu id */ |
415 | 1: |
416 | sldi r9,r5,3 /* get paca_ptrs[] index from cpu id */ |
417 | ldx r13,r9,r8 /* r13 = paca_ptrs[cpu id] */ |
418 | lhz r6,PACAHWCPUID(r13) /* Load HW procid from paca */ |
419 | cmpw r6,r24 /* Compare to our id */ |
420 | beq 2f |
421 | addi r5,r5,1 |
422 | cmpw r5,r7 /* Check if more pacas exist */ |
423 | blt 1b |
424 | |
425 | mr r3,r24 /* not found, copy phys to r3 */ |
426 | b kexec_wait /* next kernel might do better */ |
427 | |
428 | 2: SET_PACA(r13) |
429 | #ifdef CONFIG_PPC_BOOK3E_64 |
430 | addi r12,r13,PACA_EXTLB /* and TLB exc frame in another */ |
431 | mtspr SPRN_SPRG_TLB_EXFRAME,r12 |
432 | #endif |
433 | |
434 | /* From now on, r24 is expected to be logical cpuid */ |
435 | mr r24,r5 |
436 | |
437 | /* Create a temp kernel stack for use before relocation is on. */ |
438 | ld r1,PACAEMERGSP(r13) |
439 | subi r1,r1,STACK_FRAME_MIN_SIZE |
440 | |
441 | /* See if we need to call a cpu state restore handler */ |
442 | LOAD_REG_ADDR(r23, cur_cpu_spec) |
443 | ld r23,0(r23) |
444 | ld r12,CPU_SPEC_RESTORE(r23) |
445 | cmpdi 0,r12,0 |
446 | beq 3f |
447 | #ifdef CONFIG_PPC64_ELF_ABI_V1 |
448 | ld r12,0(r12) |
449 | #endif |
450 | mtctr r12 |
451 | bctrl |
452 | |
453 | 3: LOAD_REG_ADDR(r3, spinning_secondaries) /* Decrement spinning_secondaries */ |
454 | lwarx r4,0,r3 |
455 | subi r4,r4,1 |
456 | stwcx. r4,0,r3 |
457 | bne 3b |
458 | isync |
459 | |
460 | 4: HMT_LOW |
461 | lbz r23,PACAPROCSTART(r13) /* Test if this processor should */ |
462 | /* start. */ |
463 | cmpwi 0,r23,0 |
464 | beq 4b /* Loop until told to go */ |
465 | |
466 | sync /* order paca.run and cur_cpu_spec */ |
467 | isync /* In case code patching happened */ |
468 | |
469 | b __secondary_start |
470 | #endif /* SMP */ |
471 | |
472 | /* |
473 | * Turn the MMU off. |
474 | * Assumes we're mapped EA == RA if the MMU is on. |
475 | */ |
476 | #ifdef CONFIG_PPC_BOOK3S |
477 | SYM_FUNC_START_LOCAL(__mmu_off) |
478 | mfmsr r3 |
479 | andi. r0,r3,MSR_IR|MSR_DR |
480 | beqlr |
481 | mflr r4 |
482 | andc r3,r3,r0 |
483 | mtspr SPRN_SRR0,r4 |
484 | mtspr SPRN_SRR1,r3 |
485 | sync |
486 | rfid |
487 | b . /* prevent speculative execution */ |
488 | SYM_FUNC_END(__mmu_off) |
489 | |
490 | SYM_FUNC_START_LOCAL(start_initialization_book3s) |
491 | mflr r25 |
492 | |
493 | /* Setup some critical 970 SPRs before switching MMU off */ |
494 | mfspr r0,SPRN_PVR |
495 | srwi r0,r0,16 |
496 | cmpwi r0,0x39 /* 970 */ |
497 | beq 1f |
498 | cmpwi r0,0x3c /* 970FX */ |
499 | beq 1f |
500 | cmpwi r0,0x44 /* 970MP */ |
501 | beq 1f |
502 | cmpwi r0,0x45 /* 970GX */ |
503 | bne 2f |
504 | 1: bl __cpu_preinit_ppc970 |
505 | 2: |
506 | |
507 | /* Switch off MMU if not already off */ |
508 | bl __mmu_off |
509 | |
510 | /* Now the MMU is off, can return to our PAGE_OFFSET address */ |
511 | tovirt(r25,r25) |
512 | mtlr r25 |
513 | blr |
514 | SYM_FUNC_END(start_initialization_book3s) |
515 | #endif |
516 | |
517 | /* |
518 | * Here is our main kernel entry point. We support currently 2 kind of entries |
519 | * depending on the value of r5. |
520 | * |
521 | * r5 != NULL -> OF entry, we go to prom_init, "legacy" parameter content |
522 | * in r3...r7 |
523 | * |
524 | * r5 == NULL -> kexec style entry. r3 is a physical pointer to the |
525 | * DT block, r4 is a physical pointer to the kernel itself |
526 | * |
527 | */ |
528 | __start_initialization_multiplatform: |
529 | /* Make sure we are running in 64 bits mode */ |
530 | bl enable_64b_mode |
531 | |
532 | /* Zero r13 (paca) so early program check / mce don't use it */ |
533 | li r13,0 |
534 | |
535 | /* Poison TOC */ |
536 | li r2,-1 |
537 | |
538 | /* |
539 | * Are we booted from a PROM Of-type client-interface ? |
540 | */ |
541 | cmpldi cr0,r5,0 |
542 | beq 1f |
543 | b __boot_from_prom /* yes -> prom */ |
544 | 1: |
545 | /* Save parameters */ |
546 | mr r31,r3 |
547 | mr r30,r4 |
548 | #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL |
549 | /* Save OPAL entry */ |
550 | mr r28,r8 |
551 | mr r29,r9 |
552 | #endif |
553 | |
554 | /* Get TOC pointer (current runtime address) */ |
555 | bl relative_toc |
556 | |
557 | /* These functions return to the virtual (PAGE_OFFSET) address */ |
558 | #ifdef CONFIG_PPC_BOOK3E_64 |
559 | bl start_initialization_book3e |
560 | #else |
561 | bl start_initialization_book3s |
562 | #endif /* CONFIG_PPC_BOOK3E_64 */ |
563 | |
564 | /* Get TOC pointer, virtual */ |
565 | bl relative_toc |
566 | |
567 | /* find out where we are now */ |
568 | |
569 | /* OPAL doesn't pass base address in r4, have to derive it. */ |
570 | bcl 20,31,$+4 |
571 | 0: mflr r26 /* r26 = runtime addr here */ |
572 | addis r26,r26,(_stext - 0b)@ha |
573 | addi r26,r26,(_stext - 0b)@l /* current runtime base addr */ |
574 | |
575 | b __after_prom_start |
576 | |
577 | __REF |
578 | __boot_from_prom: |
579 | #ifdef CONFIG_PPC_OF_BOOT_TRAMPOLINE |
580 | /* Get TOC pointer, non-virtual */ |
581 | bl relative_toc |
582 | |
583 | /* find out where we are now */ |
584 | bcl 20,31,$+4 |
585 | 0: mflr r26 /* r26 = runtime addr here */ |
586 | addis r26,r26,(_stext - 0b)@ha |
587 | addi r26,r26,(_stext - 0b)@l /* current runtime base addr */ |
588 | |
589 | /* Save parameters */ |
590 | mr r31,r3 |
591 | mr r30,r4 |
592 | mr r29,r5 |
593 | mr r28,r6 |
594 | mr r27,r7 |
595 | |
596 | /* |
597 | * Align the stack to 16-byte boundary |
598 | * Depending on the size and layout of the ELF sections in the initial |
599 | * boot binary, the stack pointer may be unaligned on PowerMac |
600 | */ |
601 | rldicr r1,r1,0,59 |
602 | |
603 | #ifdef CONFIG_RELOCATABLE |
604 | /* Relocate code for where we are now */ |
605 | mr r3,r26 |
606 | bl relocate |
607 | #endif |
608 | |
609 | /* Restore parameters */ |
610 | mr r3,r31 |
611 | mr r4,r30 |
612 | mr r5,r29 |
613 | mr r6,r28 |
614 | mr r7,r27 |
615 | |
616 | /* Do all of the interaction with OF client interface */ |
617 | mr r8,r26 |
618 | bl CFUNC(prom_init) |
619 | #endif /* #CONFIG_PPC_OF_BOOT_TRAMPOLINE */ |
620 | |
621 | /* We never return. We also hit that trap if trying to boot |
622 | * from OF while CONFIG_PPC_OF_BOOT_TRAMPOLINE isn't selected */ |
623 | trap |
624 | .previous |
625 | |
626 | __after_prom_start: |
627 | #ifdef CONFIG_RELOCATABLE |
628 | /* process relocations for the final address of the kernel */ |
629 | lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26) |
630 | cmplwi cr0,r7,1 /* flagged to stay where we are ? */ |
631 | mr r25,r26 /* then use current kernel base */ |
632 | beq 1f |
633 | LOAD_REG_IMMEDIATE(r25, PAGE_OFFSET) /* else use static kernel base */ |
634 | 1: mr r3,r25 |
635 | bl relocate |
636 | #if defined(CONFIG_PPC_BOOK3E_64) |
637 | /* IVPR needs to be set after relocation. */ |
638 | bl init_core_book3e |
639 | #endif |
640 | #endif |
641 | |
642 | /* |
643 | * We need to run with _stext at physical address PHYSICAL_START. |
644 | * This will leave some code in the first 256B of |
645 | * real memory, which are reserved for software use. |
646 | * |
647 | * Note: This process overwrites the OF exception vectors. |
648 | */ |
649 | LOAD_REG_IMMEDIATE(r3, PAGE_OFFSET) |
650 | mr. r4,r26 /* In some cases the loader may */ |
651 | beq 9f /* have already put us at zero */ |
652 | li r6,0x100 /* Start offset, the first 0x100 */ |
653 | /* bytes were copied earlier. */ |
654 | |
655 | #ifdef CONFIG_RELOCATABLE |
656 | /* |
657 | * Check if the kernel has to be running as relocatable kernel based on the |
658 | * variable __run_at_load, if it is set the kernel is treated as relocatable |
659 | * kernel, otherwise it will be moved to PHYSICAL_START |
660 | */ |
661 | lwz r7,(FIXED_SYMBOL_ABS_ADDR(__run_at_load))(r26) |
662 | cmplwi cr0,r7,1 |
663 | bne 3f |
664 | |
665 | #ifdef CONFIG_PPC_BOOK3E_64 |
666 | LOAD_REG_ADDR(r5, __end_interrupts) |
667 | LOAD_REG_ADDR(r11, _stext) |
668 | sub r5,r5,r11 |
669 | #else |
670 | /* just copy interrupts */ |
671 | LOAD_REG_IMMEDIATE_SYM(r5, r11, FIXED_SYMBOL_ABS_ADDR(__end_interrupts)) |
672 | #endif |
673 | b 5f |
674 | 3: |
675 | #endif |
676 | /* # bytes of memory to copy */ |
677 | lis r5,(ABS_ADDR(copy_to_here, text))@ha |
678 | addi r5,r5,(ABS_ADDR(copy_to_here, text))@l |
679 | |
680 | bl copy_and_flush /* copy the first n bytes */ |
681 | /* this includes the code being */ |
682 | /* executed here. */ |
683 | /* Jump to the copy of this code that we just made */ |
684 | addis r8,r3,(ABS_ADDR(4f, text))@ha |
685 | addi r12,r8,(ABS_ADDR(4f, text))@l |
686 | mtctr r12 |
687 | bctr |
688 | |
689 | .balign 8 |
690 | p_end: .8byte _end - copy_to_here |
691 | |
692 | 4: |
693 | /* |
694 | * Now copy the rest of the kernel up to _end, add |
695 | * _end - copy_to_here to the copy limit and run again. |
696 | */ |
697 | addis r8,r26,(ABS_ADDR(p_end, text))@ha |
698 | ld r8,(ABS_ADDR(p_end, text))@l(r8) |
699 | add r5,r5,r8 |
700 | 5: bl copy_and_flush /* copy the rest */ |
701 | |
702 | 9: b start_here_multiplatform |
703 | |
704 | /* |
705 | * Copy routine used to copy the kernel to start at physical address 0 |
706 | * and flush and invalidate the caches as needed. |
707 | * r3 = dest addr, r4 = source addr, r5 = copy limit, r6 = start offset |
708 | * on exit, r3, r4, r5 are unchanged, r6 is updated to be >= r5. |
709 | * |
710 | * Note: this routine *only* clobbers r0, r6 and lr |
711 | */ |
712 | _GLOBAL(copy_and_flush) |
713 | addi r5,r5,-8 |
714 | addi r6,r6,-8 |
715 | 4: li r0,8 /* Use the smallest common */ |
716 | /* denominator cache line */ |
717 | /* size. This results in */ |
718 | /* extra cache line flushes */ |
719 | /* but operation is correct. */ |
720 | /* Can't get cache line size */ |
721 | /* from NACA as it is being */ |
722 | /* moved too. */ |
723 | |
724 | mtctr r0 /* put # words/line in ctr */ |
725 | 3: addi r6,r6,8 /* copy a cache line */ |
726 | ldx r0,r6,r4 |
727 | stdx r0,r6,r3 |
728 | bdnz 3b |
729 | dcbst r6,r3 /* write it to memory */ |
730 | sync |
731 | icbi r6,r3 /* flush the icache line */ |
732 | cmpld 0,r6,r5 |
733 | blt 4b |
734 | sync |
735 | addi r5,r5,8 |
736 | addi r6,r6,8 |
737 | isync |
738 | blr |
739 | |
740 | _ASM_NOKPROBE_SYMBOL(copy_and_flush); /* Called in real mode */ |
741 | |
742 | .align 8 |
743 | copy_to_here: |
744 | |
745 | #ifdef CONFIG_SMP |
746 | #ifdef CONFIG_PPC_PMAC |
747 | /* |
748 | * On PowerMac, secondary processors starts from the reset vector, which |
749 | * is temporarily turned into a call to one of the functions below. |
750 | */ |
751 | .section ".text" ; |
752 | .align 2 ; |
753 | |
754 | .globl __secondary_start_pmac_0 |
755 | __secondary_start_pmac_0: |
756 | /* NB the entries for cpus 0, 1, 2 must each occupy 8 bytes. */ |
757 | li r24,0 |
758 | b 1f |
759 | li r24,1 |
760 | b 1f |
761 | li r24,2 |
762 | b 1f |
763 | li r24,3 |
764 | 1: |
765 | |
766 | _GLOBAL(pmac_secondary_start) |
767 | /* turn on 64-bit mode */ |
768 | bl enable_64b_mode |
769 | |
770 | li r0,0 |
771 | mfspr r3,SPRN_HID4 |
772 | rldimi r3,r0,40,23 /* clear bit 23 (rm_ci) */ |
773 | sync |
774 | mtspr SPRN_HID4,r3 |
775 | isync |
776 | sync |
777 | slbia |
778 | |
779 | /* Branch to our PAGE_OFFSET address */ |
780 | bcl 20,31,$+4 |
781 | 1: mflr r11 |
782 | addi r11,r11,(2f - 1b) |
783 | tovirt(r11, r11) |
784 | mtctr r11 |
785 | bctr |
786 | 2: |
787 | bl relative_toc |
788 | |
789 | /* Copy some CPU settings from CPU 0 */ |
790 | bl __restore_cpu_ppc970 |
791 | |
792 | /* pSeries do that early though I don't think we really need it */ |
793 | mfmsr r3 |
794 | ori r3,r3,MSR_RI |
795 | mtmsrd r3 /* RI on */ |
796 | |
797 | /* Set up a paca value for this processor. */ |
798 | LOAD_REG_ADDR(r4,paca_ptrs) /* Load paca pointer */ |
799 | ld r4,0(r4) /* Get base vaddr of paca_ptrs array */ |
800 | sldi r5,r24,3 /* get paca_ptrs[] index from cpu id */ |
801 | ldx r13,r5,r4 /* r13 = paca_ptrs[cpu id] */ |
802 | SET_PACA(r13) /* Save vaddr of paca in an SPRG*/ |
803 | |
804 | /* Mark interrupts soft and hard disabled (they might be enabled |
805 | * in the PACA when doing hotplug) |
806 | */ |
807 | li r0,IRQS_DISABLED |
808 | stb r0,PACAIRQSOFTMASK(r13) |
809 | li r0,PACA_IRQ_HARD_DIS |
810 | stb r0,PACAIRQHAPPENED(r13) |
811 | |
812 | /* Create a temp kernel stack for use before relocation is on. */ |
813 | ld r1,PACAEMERGSP(r13) |
814 | subi r1,r1,STACK_FRAME_MIN_SIZE |
815 | |
816 | b __secondary_start |
817 | |
818 | #endif /* CONFIG_PPC_PMAC */ |
819 | |
820 | /* |
821 | * This function is called after the master CPU has released the |
822 | * secondary processors. The execution environment is relocation off. |
823 | * The paca for this processor has the following fields initialized at |
824 | * this point: |
825 | * 1. Processor number |
826 | * 2. Segment table pointer (virtual address) |
827 | * On entry the following are set: |
828 | * r1 = stack pointer (real addr of temp stack) |
829 | * r24 = cpu# (in Linux terms) |
830 | * r13 = paca virtual address |
831 | * SPRG_PACA = paca virtual address |
832 | */ |
833 | .section ".text" ; |
834 | .align 2 ; |
835 | |
836 | .globl __secondary_start |
837 | __secondary_start: |
838 | /* Set thread priority to MEDIUM */ |
839 | HMT_MEDIUM |
840 | |
841 | /* |
842 | * Do early setup for this CPU, in particular initialising the MMU so we |
843 | * can turn it on below. This is a call to C, which is OK, we're still |
844 | * running on the emergency stack. |
845 | */ |
846 | bl CFUNC(early_setup_secondary) |
847 | |
848 | /* |
849 | * The primary has initialized our kernel stack for us in the paca, grab |
850 | * it and put it in r1. We must *not* use it until we turn on the MMU |
851 | * below, because it may not be inside the RMO. |
852 | */ |
853 | ld r1, PACAKSAVE(r13) |
854 | |
855 | /* Clear backchain so we get nice backtraces */ |
856 | li r7,0 |
857 | mtlr r7 |
858 | |
859 | /* Mark interrupts soft and hard disabled (they might be enabled |
860 | * in the PACA when doing hotplug) |
861 | */ |
862 | li r7,IRQS_DISABLED |
863 | stb r7,PACAIRQSOFTMASK(r13) |
864 | li r0,PACA_IRQ_HARD_DIS |
865 | stb r0,PACAIRQHAPPENED(r13) |
866 | |
867 | /* enable MMU and jump to start_secondary */ |
868 | LOAD_REG_ADDR(r3, start_secondary_prolog) |
869 | LOAD_REG_IMMEDIATE(r4, MSR_KERNEL) |
870 | |
871 | mtspr SPRN_SRR0,r3 |
872 | mtspr SPRN_SRR1,r4 |
873 | RFI_TO_KERNEL |
874 | b . /* prevent speculative execution */ |
875 | |
876 | /* |
877 | * Running with relocation on at this point. All we want to do is |
878 | * zero the stack back-chain pointer and get the TOC virtual address |
879 | * before going into C code. |
880 | */ |
881 | start_secondary_prolog: |
882 | LOAD_PACA_TOC() |
883 | li r3,0 |
884 | std r3,0(r1) /* Zero the stack frame pointer */ |
885 | bl CFUNC(start_secondary) |
886 | b . |
887 | /* |
888 | * Reset stack pointer and call start_secondary |
889 | * to continue with online operation when woken up |
890 | * from cede in cpu offline. |
891 | */ |
892 | _GLOBAL(start_secondary_resume) |
893 | ld r1,PACAKSAVE(r13) /* Reload kernel stack pointer */ |
894 | li r3,0 |
895 | std r3,0(r1) /* Zero the stack frame pointer */ |
896 | bl CFUNC(start_secondary) |
897 | b . |
898 | #endif |
899 | |
900 | /* |
901 | * This subroutine clobbers r11 and r12 |
902 | */ |
903 | SYM_FUNC_START_LOCAL(enable_64b_mode) |
904 | mfmsr r11 /* grab the current MSR */ |
905 | #ifdef CONFIG_PPC_BOOK3E_64 |
906 | oris r11,r11,0x8000 /* CM bit set, we'll set ICM later */ |
907 | mtmsr r11 |
908 | #else /* CONFIG_PPC_BOOK3E_64 */ |
909 | LOAD_REG_IMMEDIATE(r12, MSR_64BIT) |
910 | or r11,r11,r12 |
911 | mtmsrd r11 |
912 | isync |
913 | #endif |
914 | blr |
915 | SYM_FUNC_END(enable_64b_mode) |
916 | |
917 | /* |
918 | * This puts the TOC pointer into r2, offset by 0x8000 (as expected |
919 | * by the toolchain). It computes the correct value for wherever we |
920 | * are running at the moment, using position-independent code. |
921 | * |
922 | * Note: The compiler constructs pointers using offsets from the |
923 | * TOC in -mcmodel=medium mode. After we relocate to 0 but before |
924 | * the MMU is on we need our TOC to be a virtual address otherwise |
925 | * these pointers will be real addresses which may get stored and |
926 | * accessed later with the MMU on. We branch to the virtual address |
927 | * while still in real mode then call relative_toc again to handle |
928 | * this. |
929 | */ |
930 | _GLOBAL(relative_toc) |
931 | #ifdef CONFIG_PPC_KERNEL_PCREL |
932 | tdnei r2,-1 |
933 | blr |
934 | #else |
935 | mflr r0 |
936 | bcl 20,31,$+4 |
937 | 0: mflr r11 |
938 | ld r2,(p_toc - 0b)(r11) |
939 | add r2,r2,r11 |
940 | mtlr r0 |
941 | blr |
942 | |
943 | .balign 8 |
944 | p_toc: .8byte .TOC. - 0b |
945 | #endif |
946 | |
947 | /* |
948 | * This is where the main kernel code starts. |
949 | */ |
950 | __REF |
951 | start_here_multiplatform: |
952 | /* Adjust TOC for moved kernel. Could adjust when moving it instead. */ |
953 | bl relative_toc |
954 | |
955 | /* Clear out the BSS. It may have been done in prom_init, |
956 | * already but that's irrelevant since prom_init will soon |
957 | * be detached from the kernel completely. Besides, we need |
958 | * to clear it now for kexec-style entry. |
959 | */ |
960 | LOAD_REG_ADDR(r11,__bss_stop) |
961 | LOAD_REG_ADDR(r8,__bss_start) |
962 | sub r11,r11,r8 /* bss size */ |
963 | addi r11,r11,7 /* round up to an even double word */ |
964 | srdi. r11,r11,3 /* shift right by 3 */ |
965 | beq 4f |
966 | addi r8,r8,-8 |
967 | li r0,0 |
968 | mtctr r11 /* zero this many doublewords */ |
969 | 3: stdu r0,8(r8) |
970 | bdnz 3b |
971 | 4: |
972 | |
973 | #ifdef CONFIG_PPC_EARLY_DEBUG_OPAL |
974 | /* Setup OPAL entry */ |
975 | LOAD_REG_ADDR(r11, opal) |
976 | std r28,0(r11); |
977 | std r29,8(r11); |
978 | #endif |
979 | |
980 | #ifndef CONFIG_PPC_BOOK3E_64 |
981 | mfmsr r6 |
982 | ori r6,r6,MSR_RI |
983 | mtmsrd r6 /* RI on */ |
984 | #endif |
985 | |
986 | #ifdef CONFIG_RELOCATABLE |
987 | /* Save the physical address we're running at in kernstart_addr */ |
988 | LOAD_REG_ADDR(r4, kernstart_addr) |
989 | clrldi r0,r25,2 |
990 | std r0,0(r4) |
991 | #endif |
992 | |
993 | /* set up a stack pointer */ |
994 | LOAD_REG_ADDR(r3,init_thread_union) |
995 | LOAD_REG_IMMEDIATE(r1,THREAD_SIZE) |
996 | add r1,r3,r1 |
997 | li r0,0 |
998 | stdu r0,-STACK_FRAME_MIN_SIZE(r1) |
999 | |
1000 | /* |
1001 | * Do very early kernel initializations, including initial hash table |
1002 | * and SLB setup before we turn on relocation. |
1003 | */ |
1004 | |
1005 | #ifdef CONFIG_KASAN |
1006 | bl CFUNC(kasan_early_init) |
1007 | #endif |
1008 | /* Restore parameters passed from prom_init/kexec */ |
1009 | mr r3,r31 |
1010 | LOAD_REG_ADDR(r12, DOTSYM(early_setup)) |
1011 | mtctr r12 |
1012 | bctrl /* also sets r13 and SPRG_PACA */ |
1013 | |
1014 | LOAD_REG_ADDR(r3, start_here_common) |
1015 | ld r4,PACAKMSR(r13) |
1016 | mtspr SPRN_SRR0,r3 |
1017 | mtspr SPRN_SRR1,r4 |
1018 | RFI_TO_KERNEL |
1019 | b . /* prevent speculative execution */ |
1020 | |
1021 | /* This is where all platforms converge execution */ |
1022 | |
1023 | start_here_common: |
1024 | /* relocation is on at this point */ |
1025 | std r1,PACAKSAVE(r13) |
1026 | |
1027 | /* Load the TOC (virtual address) */ |
1028 | LOAD_PACA_TOC() |
1029 | |
1030 | /* Mark interrupts soft and hard disabled (they might be enabled |
1031 | * in the PACA when doing hotplug) |
1032 | */ |
1033 | li r0,IRQS_DISABLED |
1034 | stb r0,PACAIRQSOFTMASK(r13) |
1035 | li r0,PACA_IRQ_HARD_DIS |
1036 | stb r0,PACAIRQHAPPENED(r13) |
1037 | |
1038 | /* Generic kernel entry */ |
1039 | bl CFUNC(start_kernel) |
1040 | |
1041 | /* Not reached */ |
1042 | 0: trap |
1043 | EMIT_BUG_ENTRY 0b, __FILE__, __LINE__, 0 |
1044 | .previous |
1045 | |