1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ |
2 | /* |
3 | * Kernel execution entry point code. |
4 | * |
5 | * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org> |
6 | * Initial PowerPC version. |
7 | * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu> |
8 | * Rewritten for PReP |
9 | * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au> |
10 | * Low-level exception handers, MMU support, and rewrite. |
11 | * Copyright (c) 1997 Dan Malek <dmalek@jlc.net> |
12 | * PowerPC 8xx modifications. |
13 | * Copyright (c) 1998-1999 TiVo, Inc. |
14 | * PowerPC 403GCX modifications. |
15 | * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu> |
16 | * PowerPC 403GCX/405GP modifications. |
17 | * Copyright 2000 MontaVista Software Inc. |
18 | * PPC405 modifications |
19 | * PowerPC 403GCX/405GP modifications. |
20 | * Author: MontaVista Software, Inc. |
21 | * frank_rowand@mvista.com or source@mvista.com |
22 | * debbie_chu@mvista.com |
23 | * Copyright 2002-2005 MontaVista Software, Inc. |
24 | * PowerPC 44x support, Matt Porter <mporter@kernel.crashing.org> |
25 | */ |
26 | |
27 | #include <linux/init.h> |
28 | #include <linux/pgtable.h> |
29 | #include <asm/processor.h> |
30 | #include <asm/page.h> |
31 | #include <asm/mmu.h> |
32 | #include <asm/cputable.h> |
33 | #include <asm/thread_info.h> |
34 | #include <asm/ppc_asm.h> |
35 | #include <asm/asm-offsets.h> |
36 | #include <asm/ptrace.h> |
37 | #include <asm/synch.h> |
38 | #include <asm/code-patching-asm.h> |
39 | #include "head_booke.h" |
40 | |
41 | |
42 | /* As with the other PowerPC ports, it is expected that when code |
43 | * execution begins here, the following registers contain valid, yet |
44 | * optional, information: |
45 | * |
46 | * r3 - Board info structure pointer (DRAM, frequency, MAC address, etc.) |
47 | * r4 - Starting address of the init RAM disk |
48 | * r5 - Ending address of the init RAM disk |
49 | * r6 - Start of kernel command line string (e.g. "mem=128") |
50 | * r7 - End of kernel command line string |
51 | * |
52 | */ |
53 | __HEAD |
54 | _GLOBAL(_stext); |
55 | _GLOBAL(_start); |
56 | /* |
57 | * Reserve a word at a fixed location to store the address |
58 | * of abatron_pteptrs |
59 | */ |
60 | nop |
61 | mr r31,r3 /* save device tree ptr */ |
62 | li r24,0 /* CPU number */ |
63 | |
64 | #ifdef CONFIG_RELOCATABLE |
65 | /* |
66 | * Relocate ourselves to the current runtime address. |
67 | * This is called only by the Boot CPU. |
68 | * "relocate" is called with our current runtime virutal |
69 | * address. |
70 | * r21 will be loaded with the physical runtime address of _stext |
71 | */ |
72 | bcl 20,31,$+4 /* Get our runtime address */ |
73 | 0: mflr r21 /* Make it accessible */ |
74 | addis r21,r21,(_stext - 0b)@ha |
75 | addi r21,r21,(_stext - 0b)@l /* Get our current runtime base */ |
76 | |
77 | /* |
78 | * We have the runtime (virutal) address of our base. |
79 | * We calculate our shift of offset from a 256M page. |
80 | * We could map the 256M page we belong to at PAGE_OFFSET and |
81 | * get going from there. |
82 | */ |
83 | lis r4,KERNELBASE@h |
84 | ori r4,r4,KERNELBASE@l |
85 | rlwinm r6,r21,0,4,31 /* r6 = PHYS_START % 256M */ |
86 | rlwinm r5,r4,0,4,31 /* r5 = KERNELBASE % 256M */ |
87 | subf r3,r5,r6 /* r3 = r6 - r5 */ |
88 | add r3,r4,r3 /* Required Virutal Address */ |
89 | |
90 | bl relocate |
91 | #endif |
92 | |
93 | bl init_cpu_state |
94 | |
95 | /* |
96 | * This is where the main kernel code starts. |
97 | */ |
98 | |
99 | /* ptr to current */ |
100 | lis r2,init_task@h |
101 | ori r2,r2,init_task@l |
102 | |
103 | /* ptr to current thread */ |
104 | addi r4,r2,THREAD /* init task's THREAD */ |
105 | mtspr SPRN_SPRG_THREAD,r4 |
106 | |
107 | /* stack */ |
108 | lis r1,init_thread_union@h |
109 | ori r1,r1,init_thread_union@l |
110 | li r0,0 |
111 | stwu r0,THREAD_SIZE-STACK_FRAME_MIN_SIZE(r1) |
112 | |
113 | bl early_init |
114 | |
115 | #ifdef CONFIG_RELOCATABLE |
116 | /* |
117 | * Relocatable kernel support based on processing of dynamic |
118 | * relocation entries. |
119 | * |
120 | * r25 will contain RPN/ERPN for the start address of memory |
121 | * r21 will contain the current offset of _stext |
122 | */ |
123 | lis r3,kernstart_addr@ha |
124 | la r3,kernstart_addr@l(r3) |
125 | |
126 | /* |
127 | * Compute the kernstart_addr. |
128 | * kernstart_addr => (r6,r8) |
129 | * kernstart_addr & ~0xfffffff => (r6,r7) |
130 | */ |
131 | rlwinm r6,r25,0,28,31 /* ERPN. Bits 32-35 of Address */ |
132 | rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */ |
133 | rlwinm r8,r21,0,4,31 /* r8 = (_stext & 0xfffffff) */ |
134 | or r8,r7,r8 /* Compute the lower 32bit of kernstart_addr */ |
135 | |
136 | /* Store kernstart_addr */ |
137 | stw r6,0(r3) /* higher 32bit */ |
138 | stw r8,4(r3) /* lower 32bit */ |
139 | |
140 | /* |
141 | * Compute the virt_phys_offset : |
142 | * virt_phys_offset = stext.run - kernstart_addr |
143 | * |
144 | * stext.run = (KERNELBASE & ~0xfffffff) + (kernstart_addr & 0xfffffff) |
145 | * When we relocate, we have : |
146 | * |
147 | * (kernstart_addr & 0xfffffff) = (stext.run & 0xfffffff) |
148 | * |
149 | * hence: |
150 | * virt_phys_offset = (KERNELBASE & ~0xfffffff) - (kernstart_addr & ~0xfffffff) |
151 | * |
152 | */ |
153 | |
154 | /* KERNELBASE&~0xfffffff => (r4,r5) */ |
155 | li r4, 0 /* higer 32bit */ |
156 | lis r5,KERNELBASE@h |
157 | rlwinm r5,r5,0,0,3 /* Align to 256M, lower 32bit */ |
158 | |
159 | /* |
160 | * 64bit subtraction. |
161 | */ |
162 | subfc r5,r7,r5 |
163 | subfe r4,r6,r4 |
164 | |
165 | /* Store virt_phys_offset */ |
166 | lis r3,virt_phys_offset@ha |
167 | la r3,virt_phys_offset@l(r3) |
168 | |
169 | stw r4,0(r3) |
170 | stw r5,4(r3) |
171 | |
172 | #elif defined(CONFIG_DYNAMIC_MEMSTART) |
173 | /* |
174 | * Mapping based, page aligned dynamic kernel loading. |
175 | * |
176 | * r25 will contain RPN/ERPN for the start address of memory |
177 | * |
178 | * Add the difference between KERNELBASE and PAGE_OFFSET to the |
179 | * start of physical memory to get kernstart_addr. |
180 | */ |
181 | lis r3,kernstart_addr@ha |
182 | la r3,kernstart_addr@l(r3) |
183 | |
184 | lis r4,KERNELBASE@h |
185 | ori r4,r4,KERNELBASE@l |
186 | lis r5,PAGE_OFFSET@h |
187 | ori r5,r5,PAGE_OFFSET@l |
188 | subf r4,r5,r4 |
189 | |
190 | rlwinm r6,r25,0,28,31 /* ERPN */ |
191 | rlwinm r7,r25,0,0,3 /* RPN - assuming 256 MB page size */ |
192 | add r7,r7,r4 |
193 | |
194 | stw r6,0(r3) |
195 | stw r7,4(r3) |
196 | #endif |
197 | |
198 | /* |
199 | * Decide what sort of machine this is and initialize the MMU. |
200 | */ |
201 | #ifdef CONFIG_KASAN |
202 | bl kasan_early_init |
203 | #endif |
204 | li r3,0 |
205 | mr r4,r31 |
206 | bl machine_init |
207 | bl MMU_init |
208 | |
209 | /* Setup PTE pointers for the Abatron bdiGDB */ |
210 | lis r6, swapper_pg_dir@h |
211 | ori r6, r6, swapper_pg_dir@l |
212 | lis r5, abatron_pteptrs@h |
213 | ori r5, r5, abatron_pteptrs@l |
214 | lis r4, KERNELBASE@h |
215 | ori r4, r4, KERNELBASE@l |
216 | stw r5, 0(r4) /* Save abatron_pteptrs at a fixed location */ |
217 | stw r6, 0(r5) |
218 | |
219 | /* Clear the Machine Check Syndrome Register */ |
220 | li r0,0 |
221 | mtspr SPRN_MCSR,r0 |
222 | |
223 | /* Let's move on */ |
224 | lis r4,start_kernel@h |
225 | ori r4,r4,start_kernel@l |
226 | lis r3,MSR_KERNEL@h |
227 | ori r3,r3,MSR_KERNEL@l |
228 | mtspr SPRN_SRR0,r4 |
229 | mtspr SPRN_SRR1,r3 |
230 | rfi /* change context and jump to start_kernel */ |
231 | |
232 | /* |
233 | * Interrupt vector entry code |
234 | * |
235 | * The Book E MMUs are always on so we don't need to handle |
236 | * interrupts in real mode as with previous PPC processors. In |
237 | * this case we handle interrupts in the kernel virtual address |
238 | * space. |
239 | * |
240 | * Interrupt vectors are dynamically placed relative to the |
241 | * interrupt prefix as determined by the address of interrupt_base. |
242 | * The interrupt vectors offsets are programmed using the labels |
243 | * for each interrupt vector entry. |
244 | * |
245 | * Interrupt vectors must be aligned on a 16 byte boundary. |
246 | * We align on a 32 byte cache line boundary for good measure. |
247 | */ |
248 | |
249 | interrupt_base: |
250 | /* Critical Input Interrupt */ |
251 | CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception) |
252 | |
253 | /* Machine Check Interrupt */ |
254 | CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \ |
255 | machine_check_exception) |
256 | MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception) |
257 | |
258 | /* Data Storage Interrupt */ |
259 | DATA_STORAGE_EXCEPTION |
260 | |
261 | /* Instruction Storage Interrupt */ |
262 | INSTRUCTION_STORAGE_EXCEPTION |
263 | |
264 | /* External Input Interrupt */ |
265 | EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, do_IRQ) |
266 | |
267 | /* Alignment Interrupt */ |
268 | ALIGNMENT_EXCEPTION |
269 | |
270 | /* Program Interrupt */ |
271 | PROGRAM_EXCEPTION |
272 | |
273 | /* Floating Point Unavailable Interrupt */ |
274 | #ifdef CONFIG_PPC_FPU |
275 | FP_UNAVAILABLE_EXCEPTION |
276 | #else |
277 | EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \ |
278 | FloatingPointUnavailable, unknown_exception) |
279 | #endif |
280 | /* System Call Interrupt */ |
281 | START_EXCEPTION(SystemCall) |
282 | SYSCALL_ENTRY 0xc00 BOOKE_INTERRUPT_SYSCALL |
283 | |
284 | /* Auxiliary Processor Unavailable Interrupt */ |
285 | EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \ |
286 | AuxillaryProcessorUnavailable, unknown_exception) |
287 | |
288 | /* Decrementer Interrupt */ |
289 | DECREMENTER_EXCEPTION |
290 | |
291 | /* Fixed Internal Timer Interrupt */ |
292 | /* TODO: Add FIT support */ |
293 | EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, unknown_exception) |
294 | |
295 | /* Watchdog Timer Interrupt */ |
296 | /* TODO: Add watchdog support */ |
297 | #ifdef CONFIG_BOOKE_WDT |
298 | CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException) |
299 | #else |
300 | CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception) |
301 | #endif |
302 | |
303 | /* Data TLB Error Interrupt */ |
304 | START_EXCEPTION(DataTLBError44x) |
305 | mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ |
306 | mtspr SPRN_SPRG_WSCRATCH1, r11 |
307 | mtspr SPRN_SPRG_WSCRATCH2, r12 |
308 | mtspr SPRN_SPRG_WSCRATCH3, r13 |
309 | mfcr r11 |
310 | mtspr SPRN_SPRG_WSCRATCH4, r11 |
311 | mfspr r10, SPRN_DEAR /* Get faulting address */ |
312 | |
313 | /* If we are faulting a kernel address, we have to use the |
314 | * kernel page tables. |
315 | */ |
316 | lis r11, PAGE_OFFSET@h |
317 | cmplw cr7, r10, r11 |
318 | blt+ cr7, 3f |
319 | lis r11, swapper_pg_dir@h |
320 | ori r11, r11, swapper_pg_dir@l |
321 | |
322 | mfspr r12,SPRN_MMUCR |
323 | rlwinm r12,r12,0,0,23 /* Clear TID */ |
324 | |
325 | b 4f |
326 | |
327 | /* Get the PGD for the current thread */ |
328 | 3: |
329 | mfspr r11,SPRN_SPRG_THREAD |
330 | lwz r11,PGDIR(r11) |
331 | |
332 | /* Load PID into MMUCR TID */ |
333 | mfspr r12,SPRN_MMUCR |
334 | mfspr r13,SPRN_PID /* Get PID */ |
335 | rlwimi r12,r13,0,24,31 /* Set TID */ |
336 | #ifdef CONFIG_PPC_KUAP |
337 | cmpwi r13,0 |
338 | beq 2f /* KUAP Fault */ |
339 | #endif |
340 | |
341 | 4: |
342 | mtspr SPRN_MMUCR,r12 |
343 | |
344 | /* Mask of required permission bits. Note that while we |
345 | * do copy ESR:ST to _PAGE_WRITE position as trying to write |
346 | * to an RO page is pretty common, we don't do it with |
347 | * _PAGE_DIRTY. We could do it, but it's a fairly rare |
348 | * event so I'd rather take the overhead when it happens |
349 | * rather than adding an instruction here. We should measure |
350 | * whether the whole thing is worth it in the first place |
351 | * as we could avoid loading SPRN_ESR completely in the first |
352 | * place... |
353 | * |
354 | * TODO: Is it worth doing that mfspr & rlwimi in the first |
355 | * place or can we save a couple of instructions here ? |
356 | */ |
357 | mfspr r12,SPRN_ESR |
358 | li r13,_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_READ |
359 | rlwimi r13,r12,10,30,30 |
360 | |
361 | /* Load the PTE */ |
362 | /* Compute pgdir/pmd offset */ |
363 | rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29 |
364 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ |
365 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ |
366 | beq 2f /* Bail if no table */ |
367 | |
368 | /* Compute pte address */ |
369 | rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28 |
370 | lwz r11, 0(r12) /* Get high word of pte entry */ |
371 | lwz r12, 4(r12) /* Get low word of pte entry */ |
372 | |
373 | lis r10,tlb_44x_index@ha |
374 | |
375 | andc. r13,r13,r12 /* Check permission */ |
376 | |
377 | /* Load the next available TLB index */ |
378 | lwz r13,tlb_44x_index@l(r10) |
379 | |
380 | bne 2f /* Bail if permission mismatch */ |
381 | |
382 | /* Increment, rollover, and store TLB index */ |
383 | addi r13,r13,1 |
384 | |
385 | patch_site 0f, patch__tlb_44x_hwater_D |
386 | /* Compare with watermark (instruction gets patched) */ |
387 | 0: cmpwi 0,r13,1 /* reserve entries */ |
388 | ble 5f |
389 | li r13,0 |
390 | 5: |
391 | /* Store the next available TLB index */ |
392 | stw r13,tlb_44x_index@l(r10) |
393 | |
394 | /* Re-load the faulting address */ |
395 | mfspr r10,SPRN_DEAR |
396 | |
397 | /* Jump to common tlb load */ |
398 | b finish_tlb_load_44x |
399 | |
400 | 2: |
401 | /* The bailout. Restore registers to pre-exception conditions |
402 | * and call the heavyweights to help us out. |
403 | */ |
404 | mfspr r11, SPRN_SPRG_RSCRATCH4 |
405 | mtcr r11 |
406 | mfspr r13, SPRN_SPRG_RSCRATCH3 |
407 | mfspr r12, SPRN_SPRG_RSCRATCH2 |
408 | mfspr r11, SPRN_SPRG_RSCRATCH1 |
409 | mfspr r10, SPRN_SPRG_RSCRATCH0 |
410 | b DataStorage |
411 | |
412 | /* Instruction TLB Error Interrupt */ |
413 | /* |
414 | * Nearly the same as above, except we get our |
415 | * information from different registers and bailout |
416 | * to a different point. |
417 | */ |
418 | START_EXCEPTION(InstructionTLBError44x) |
419 | mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ |
420 | mtspr SPRN_SPRG_WSCRATCH1, r11 |
421 | mtspr SPRN_SPRG_WSCRATCH2, r12 |
422 | mtspr SPRN_SPRG_WSCRATCH3, r13 |
423 | mfcr r11 |
424 | mtspr SPRN_SPRG_WSCRATCH4, r11 |
425 | mfspr r10, SPRN_SRR0 /* Get faulting address */ |
426 | |
427 | /* If we are faulting a kernel address, we have to use the |
428 | * kernel page tables. |
429 | */ |
430 | lis r11, PAGE_OFFSET@h |
431 | cmplw cr7, r10, r11 |
432 | blt+ cr7, 3f |
433 | lis r11, swapper_pg_dir@h |
434 | ori r11, r11, swapper_pg_dir@l |
435 | |
436 | mfspr r12,SPRN_MMUCR |
437 | rlwinm r12,r12,0,0,23 /* Clear TID */ |
438 | |
439 | b 4f |
440 | |
441 | /* Get the PGD for the current thread */ |
442 | 3: |
443 | mfspr r11,SPRN_SPRG_THREAD |
444 | lwz r11,PGDIR(r11) |
445 | |
446 | /* Load PID into MMUCR TID */ |
447 | mfspr r12,SPRN_MMUCR |
448 | mfspr r13,SPRN_PID /* Get PID */ |
449 | rlwimi r12,r13,0,24,31 /* Set TID */ |
450 | #ifdef CONFIG_PPC_KUAP |
451 | cmpwi r13,0 |
452 | beq 2f /* KUAP Fault */ |
453 | #endif |
454 | |
455 | 4: |
456 | mtspr SPRN_MMUCR,r12 |
457 | |
458 | /* Make up the required permissions */ |
459 | li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC |
460 | |
461 | /* Compute pgdir/pmd offset */ |
462 | rlwinm r12, r10, PPC44x_PGD_OFF_SHIFT, PPC44x_PGD_OFF_MASK_BIT, 29 |
463 | lwzx r11, r12, r11 /* Get pgd/pmd entry */ |
464 | rlwinm. r12, r11, 0, 0, 20 /* Extract pt base address */ |
465 | beq 2f /* Bail if no table */ |
466 | |
467 | /* Compute pte address */ |
468 | rlwimi r12, r10, PPC44x_PTE_ADD_SHIFT, PPC44x_PTE_ADD_MASK_BIT, 28 |
469 | lwz r11, 0(r12) /* Get high word of pte entry */ |
470 | lwz r12, 4(r12) /* Get low word of pte entry */ |
471 | |
472 | lis r10,tlb_44x_index@ha |
473 | |
474 | andc. r13,r13,r12 /* Check permission */ |
475 | |
476 | /* Load the next available TLB index */ |
477 | lwz r13,tlb_44x_index@l(r10) |
478 | |
479 | bne 2f /* Bail if permission mismatch */ |
480 | |
481 | /* Increment, rollover, and store TLB index */ |
482 | addi r13,r13,1 |
483 | |
484 | patch_site 0f, patch__tlb_44x_hwater_I |
485 | /* Compare with watermark (instruction gets patched) */ |
486 | 0: cmpwi 0,r13,1 /* reserve entries */ |
487 | ble 5f |
488 | li r13,0 |
489 | 5: |
490 | /* Store the next available TLB index */ |
491 | stw r13,tlb_44x_index@l(r10) |
492 | |
493 | /* Re-load the faulting address */ |
494 | mfspr r10,SPRN_SRR0 |
495 | |
496 | /* Jump to common TLB load point */ |
497 | b finish_tlb_load_44x |
498 | |
499 | 2: |
500 | /* The bailout. Restore registers to pre-exception conditions |
501 | * and call the heavyweights to help us out. |
502 | */ |
503 | mfspr r11, SPRN_SPRG_RSCRATCH4 |
504 | mtcr r11 |
505 | mfspr r13, SPRN_SPRG_RSCRATCH3 |
506 | mfspr r12, SPRN_SPRG_RSCRATCH2 |
507 | mfspr r11, SPRN_SPRG_RSCRATCH1 |
508 | mfspr r10, SPRN_SPRG_RSCRATCH0 |
509 | b InstructionStorage |
510 | |
511 | /* |
512 | * Both the instruction and data TLB miss get to this |
513 | * point to load the TLB. |
514 | * r10 - EA of fault |
515 | * r11 - PTE high word value |
516 | * r12 - PTE low word value |
517 | * r13 - TLB index |
518 | * cr7 - Result of comparison with PAGE_OFFSET |
519 | * MMUCR - loaded with proper value when we get here |
520 | * Upon exit, we reload everything and RFI. |
521 | */ |
522 | finish_tlb_load_44x: |
523 | /* Combine RPN & ERPN an write WS 0 */ |
524 | rlwimi r11,r12,0,0,31-PAGE_SHIFT |
525 | tlbwe r11,r13,PPC44x_TLB_XLAT |
526 | |
527 | /* |
528 | * Create WS1. This is the faulting address (EPN), |
529 | * page size, and valid flag. |
530 | */ |
531 | li r11,PPC44x_TLB_VALID | PPC44x_TLBE_SIZE |
532 | /* Insert valid and page size */ |
533 | rlwimi r10,r11,0,PPC44x_PTE_ADD_MASK_BIT,31 |
534 | tlbwe r10,r13,PPC44x_TLB_PAGEID /* Write PAGEID */ |
535 | |
536 | /* And WS 2 */ |
537 | li r10,0xf84 /* Mask to apply from PTE */ |
538 | rlwimi r10,r12,29,30,31 /* DIRTY,READ -> SW,SR position */ |
539 | and r11,r12,r10 /* Mask PTE bits to keep */ |
540 | bge cr7,1f /* User page ? no, leave U bits empty */ |
541 | rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ |
542 | rlwinm r11,r11,0,~PPC44x_TLB_SX /* Clear SX if User page */ |
543 | 1: tlbwe r11,r13,PPC44x_TLB_ATTRIB /* Write ATTRIB */ |
544 | |
545 | /* Done...restore registers and get out of here. |
546 | */ |
547 | mfspr r11, SPRN_SPRG_RSCRATCH4 |
548 | mtcr r11 |
549 | mfspr r13, SPRN_SPRG_RSCRATCH3 |
550 | mfspr r12, SPRN_SPRG_RSCRATCH2 |
551 | mfspr r11, SPRN_SPRG_RSCRATCH1 |
552 | mfspr r10, SPRN_SPRG_RSCRATCH0 |
553 | rfi /* Force context change */ |
554 | |
555 | /* TLB error interrupts for 476 |
556 | */ |
557 | #ifdef CONFIG_PPC_47x |
558 | START_EXCEPTION(DataTLBError47x) |
559 | mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */ |
560 | mtspr SPRN_SPRG_WSCRATCH1,r11 |
561 | mtspr SPRN_SPRG_WSCRATCH2,r12 |
562 | mtspr SPRN_SPRG_WSCRATCH3,r13 |
563 | mfcr r11 |
564 | mtspr SPRN_SPRG_WSCRATCH4,r11 |
565 | mfspr r10,SPRN_DEAR /* Get faulting address */ |
566 | |
567 | /* If we are faulting a kernel address, we have to use the |
568 | * kernel page tables. |
569 | */ |
570 | lis r11,PAGE_OFFSET@h |
571 | cmplw cr7,r10,r11 |
572 | blt+ cr7,3f |
573 | lis r11,swapper_pg_dir@h |
574 | ori r11,r11, swapper_pg_dir@l |
575 | li r12,0 /* MMUCR = 0 */ |
576 | b 4f |
577 | |
578 | /* Get the PGD for the current thread and setup MMUCR */ |
579 | 3: mfspr r11,SPRN_SPRG3 |
580 | lwz r11,PGDIR(r11) |
581 | mfspr r12,SPRN_PID /* Get PID */ |
582 | #ifdef CONFIG_PPC_KUAP |
583 | cmpwi r12,0 |
584 | beq 2f /* KUAP Fault */ |
585 | #endif |
586 | 4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */ |
587 | |
588 | /* Mask of required permission bits. Note that while we |
589 | * do copy ESR:ST to _PAGE_WRITE position as trying to write |
590 | * to an RO page is pretty common, we don't do it with |
591 | * _PAGE_DIRTY. We could do it, but it's a fairly rare |
592 | * event so I'd rather take the overhead when it happens |
593 | * rather than adding an instruction here. We should measure |
594 | * whether the whole thing is worth it in the first place |
595 | * as we could avoid loading SPRN_ESR completely in the first |
596 | * place... |
597 | * |
598 | * TODO: Is it worth doing that mfspr & rlwimi in the first |
599 | * place or can we save a couple of instructions here ? |
600 | */ |
601 | mfspr r12,SPRN_ESR |
602 | li r13,_PAGE_PRESENT|_PAGE_ACCESSED|_PAGE_READ |
603 | rlwimi r13,r12,10,30,30 |
604 | |
605 | /* Load the PTE */ |
606 | /* Compute pgdir/pmd offset */ |
607 | rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29 |
608 | lwzx r11,r12,r11 /* Get pgd/pmd entry */ |
609 | |
610 | /* Word 0 is EPN,V,TS,DSIZ */ |
611 | li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE |
612 | rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/ |
613 | li r12,0 |
614 | tlbwe r10,r12,0 |
615 | |
616 | /* XXX can we do better ? Need to make sure tlbwe has established |
617 | * latch V bit in MMUCR0 before the PTE is loaded further down */ |
618 | #ifdef CONFIG_SMP |
619 | isync |
620 | #endif |
621 | |
622 | rlwinm. r12,r11,0,0,20 /* Extract pt base address */ |
623 | /* Compute pte address */ |
624 | rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28 |
625 | beq 2f /* Bail if no table */ |
626 | lwz r11,0(r12) /* Get high word of pte entry */ |
627 | |
628 | /* XXX can we do better ? maybe insert a known 0 bit from r11 into the |
629 | * bottom of r12 to create a data dependency... We can also use r10 |
630 | * as destination nowadays |
631 | */ |
632 | #ifdef CONFIG_SMP |
633 | lwsync |
634 | #endif |
635 | lwz r12,4(r12) /* Get low word of pte entry */ |
636 | |
637 | andc. r13,r13,r12 /* Check permission */ |
638 | |
639 | /* Jump to common tlb load */ |
640 | beq finish_tlb_load_47x |
641 | |
642 | 2: /* The bailout. Restore registers to pre-exception conditions |
643 | * and call the heavyweights to help us out. |
644 | */ |
645 | mfspr r11,SPRN_SPRG_RSCRATCH4 |
646 | mtcr r11 |
647 | mfspr r13,SPRN_SPRG_RSCRATCH3 |
648 | mfspr r12,SPRN_SPRG_RSCRATCH2 |
649 | mfspr r11,SPRN_SPRG_RSCRATCH1 |
650 | mfspr r10,SPRN_SPRG_RSCRATCH0 |
651 | b DataStorage |
652 | |
653 | /* Instruction TLB Error Interrupt */ |
654 | /* |
655 | * Nearly the same as above, except we get our |
656 | * information from different registers and bailout |
657 | * to a different point. |
658 | */ |
659 | START_EXCEPTION(InstructionTLBError47x) |
660 | mtspr SPRN_SPRG_WSCRATCH0,r10 /* Save some working registers */ |
661 | mtspr SPRN_SPRG_WSCRATCH1,r11 |
662 | mtspr SPRN_SPRG_WSCRATCH2,r12 |
663 | mtspr SPRN_SPRG_WSCRATCH3,r13 |
664 | mfcr r11 |
665 | mtspr SPRN_SPRG_WSCRATCH4,r11 |
666 | mfspr r10,SPRN_SRR0 /* Get faulting address */ |
667 | |
668 | /* If we are faulting a kernel address, we have to use the |
669 | * kernel page tables. |
670 | */ |
671 | lis r11,PAGE_OFFSET@h |
672 | cmplw cr7,r10,r11 |
673 | blt+ cr7,3f |
674 | lis r11,swapper_pg_dir@h |
675 | ori r11,r11, swapper_pg_dir@l |
676 | li r12,0 /* MMUCR = 0 */ |
677 | b 4f |
678 | |
679 | /* Get the PGD for the current thread and setup MMUCR */ |
680 | 3: mfspr r11,SPRN_SPRG_THREAD |
681 | lwz r11,PGDIR(r11) |
682 | mfspr r12,SPRN_PID /* Get PID */ |
683 | #ifdef CONFIG_PPC_KUAP |
684 | cmpwi r12,0 |
685 | beq 2f /* KUAP Fault */ |
686 | #endif |
687 | 4: mtspr SPRN_MMUCR,r12 /* Set MMUCR */ |
688 | |
689 | /* Make up the required permissions */ |
690 | li r13,_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_EXEC |
691 | |
692 | /* Load PTE */ |
693 | /* Compute pgdir/pmd offset */ |
694 | rlwinm r12,r10,PPC44x_PGD_OFF_SHIFT,PPC44x_PGD_OFF_MASK_BIT,29 |
695 | lwzx r11,r12,r11 /* Get pgd/pmd entry */ |
696 | |
697 | /* Word 0 is EPN,V,TS,DSIZ */ |
698 | li r12,PPC47x_TLB0_VALID | PPC47x_TLBE_SIZE |
699 | rlwimi r10,r12,0,32-PAGE_SHIFT,31 /* Insert valid and page size*/ |
700 | li r12,0 |
701 | tlbwe r10,r12,0 |
702 | |
703 | /* XXX can we do better ? Need to make sure tlbwe has established |
704 | * latch V bit in MMUCR0 before the PTE is loaded further down */ |
705 | #ifdef CONFIG_SMP |
706 | isync |
707 | #endif |
708 | |
709 | rlwinm. r12,r11,0,0,20 /* Extract pt base address */ |
710 | /* Compute pte address */ |
711 | rlwimi r12,r10,PPC44x_PTE_ADD_SHIFT,PPC44x_PTE_ADD_MASK_BIT,28 |
712 | beq 2f /* Bail if no table */ |
713 | |
714 | lwz r11,0(r12) /* Get high word of pte entry */ |
715 | /* XXX can we do better ? maybe insert a known 0 bit from r11 into the |
716 | * bottom of r12 to create a data dependency... We can also use r10 |
717 | * as destination nowadays |
718 | */ |
719 | #ifdef CONFIG_SMP |
720 | lwsync |
721 | #endif |
722 | lwz r12,4(r12) /* Get low word of pte entry */ |
723 | |
724 | andc. r13,r13,r12 /* Check permission */ |
725 | |
726 | /* Jump to common TLB load point */ |
727 | beq finish_tlb_load_47x |
728 | |
729 | 2: /* The bailout. Restore registers to pre-exception conditions |
730 | * and call the heavyweights to help us out. |
731 | */ |
732 | mfspr r11, SPRN_SPRG_RSCRATCH4 |
733 | mtcr r11 |
734 | mfspr r13, SPRN_SPRG_RSCRATCH3 |
735 | mfspr r12, SPRN_SPRG_RSCRATCH2 |
736 | mfspr r11, SPRN_SPRG_RSCRATCH1 |
737 | mfspr r10, SPRN_SPRG_RSCRATCH0 |
738 | b InstructionStorage |
739 | |
740 | /* |
741 | * Both the instruction and data TLB miss get to this |
742 | * point to load the TLB. |
743 | * r10 - free to use |
744 | * r11 - PTE high word value |
745 | * r12 - PTE low word value |
746 | * r13 - free to use |
747 | * cr7 - Result of comparison with PAGE_OFFSET |
748 | * MMUCR - loaded with proper value when we get here |
749 | * Upon exit, we reload everything and RFI. |
750 | */ |
751 | finish_tlb_load_47x: |
752 | /* Combine RPN & ERPN an write WS 1 */ |
753 | rlwimi r11,r12,0,0,31-PAGE_SHIFT |
754 | tlbwe r11,r13,1 |
755 | |
756 | /* And make up word 2 */ |
757 | li r10,0xf84 /* Mask to apply from PTE */ |
758 | rlwimi r10,r12,29,30,31 /* DIRTY,READ -> SW,SR position */ |
759 | and r11,r12,r10 /* Mask PTE bits to keep */ |
760 | bge cr7,1f /* User page ? no, leave U bits empty */ |
761 | rlwimi r11,r11,3,26,28 /* yes, copy S bits to U */ |
762 | rlwinm r11,r11,0,~PPC47x_TLB2_SX /* Clear SX if User page */ |
763 | 1: tlbwe r11,r13,2 |
764 | |
765 | /* Done...restore registers and get out of here. |
766 | */ |
767 | mfspr r11, SPRN_SPRG_RSCRATCH4 |
768 | mtcr r11 |
769 | mfspr r13, SPRN_SPRG_RSCRATCH3 |
770 | mfspr r12, SPRN_SPRG_RSCRATCH2 |
771 | mfspr r11, SPRN_SPRG_RSCRATCH1 |
772 | mfspr r10, SPRN_SPRG_RSCRATCH0 |
773 | rfi |
774 | |
775 | #endif /* CONFIG_PPC_47x */ |
776 | |
777 | /* Debug Interrupt */ |
778 | /* |
779 | * This statement needs to exist at the end of the IVPR |
780 | * definition just in case you end up taking a debug |
781 | * exception within another exception. |
782 | */ |
783 | DEBUG_CRIT_EXCEPTION |
784 | |
785 | interrupt_end: |
786 | |
787 | /* |
788 | * Global functions |
789 | */ |
790 | |
791 | /* |
792 | * Adjust the machine check IVOR on 440A cores |
793 | */ |
794 | _GLOBAL(__fixup_440A_mcheck) |
795 | li r3,MachineCheckA@l |
796 | mtspr SPRN_IVOR1,r3 |
797 | sync |
798 | blr |
799 | |
800 | /* |
801 | * Init CPU state. This is called at boot time or for secondary CPUs |
802 | * to setup initial TLB entries, setup IVORs, etc... |
803 | * |
804 | */ |
805 | _GLOBAL(init_cpu_state) |
806 | mflr r22 |
807 | #ifdef CONFIG_PPC_47x |
808 | /* We use the PVR to differentiate 44x cores from 476 */ |
809 | mfspr r3,SPRN_PVR |
810 | srwi r3,r3,16 |
811 | cmplwi cr0,r3,PVR_476FPE@h |
812 | beq head_start_47x |
813 | cmplwi cr0,r3,PVR_476@h |
814 | beq head_start_47x |
815 | cmplwi cr0,r3,PVR_476_ISS@h |
816 | beq head_start_47x |
817 | #endif /* CONFIG_PPC_47x */ |
818 | |
819 | /* |
820 | * In case the firmware didn't do it, we apply some workarounds |
821 | * that are good for all 440 core variants here |
822 | */ |
823 | mfspr r3,SPRN_CCR0 |
824 | rlwinm r3,r3,0,0,27 /* disable icache prefetch */ |
825 | isync |
826 | mtspr SPRN_CCR0,r3 |
827 | isync |
828 | sync |
829 | |
830 | /* |
831 | * Set up the initial MMU state for 44x |
832 | * |
833 | * We are still executing code at the virtual address |
834 | * mappings set by the firmware for the base of RAM. |
835 | * |
836 | * We first invalidate all TLB entries but the one |
837 | * we are running from. We then load the KERNELBASE |
838 | * mappings so we can begin to use kernel addresses |
839 | * natively and so the interrupt vector locations are |
840 | * permanently pinned (necessary since Book E |
841 | * implementations always have translation enabled). |
842 | * |
843 | * TODO: Use the known TLB entry we are running from to |
844 | * determine which physical region we are located |
845 | * in. This can be used to determine where in RAM |
846 | * (on a shared CPU system) or PCI memory space |
847 | * (on a DRAMless system) we are located. |
848 | * For now, we assume a perfect world which means |
849 | * we are located at the base of DRAM (physical 0). |
850 | */ |
851 | |
852 | /* |
853 | * Search TLB for entry that we are currently using. |
854 | * Invalidate all entries but the one we are using. |
855 | */ |
856 | /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ |
857 | mfspr r3,SPRN_PID /* Get PID */ |
858 | mfmsr r4 /* Get MSR */ |
859 | andi. r4,r4,MSR_IS@l /* TS=1? */ |
860 | beq wmmucr /* If not, leave STS=0 */ |
861 | oris r3,r3,PPC44x_MMUCR_STS@h /* Set STS=1 */ |
862 | wmmucr: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ |
863 | sync |
864 | |
865 | bcl 20,31,$+4 /* Find our address */ |
866 | invstr: mflr r5 /* Make it accessible */ |
867 | tlbsx r23,0,r5 /* Find entry we are in */ |
868 | li r4,0 /* Start at TLB entry 0 */ |
869 | li r3,0 /* Set PAGEID inval value */ |
870 | 1: cmpw r23,r4 /* Is this our entry? */ |
871 | beq skpinv /* If so, skip the inval */ |
872 | tlbwe r3,r4,PPC44x_TLB_PAGEID /* If not, inval the entry */ |
873 | skpinv: addi r4,r4,1 /* Increment */ |
874 | cmpwi r4,64 /* Are we done? */ |
875 | bne 1b /* If not, repeat */ |
876 | isync /* If so, context change */ |
877 | |
878 | /* |
879 | * Configure and load pinned entry into TLB slot 63. |
880 | */ |
881 | #ifdef CONFIG_NONSTATIC_KERNEL |
882 | /* |
883 | * In case of a NONSTATIC_KERNEL we reuse the TLB XLAT |
884 | * entries of the initial mapping set by the boot loader. |
885 | * The XLAT entry is stored in r25 |
886 | */ |
887 | |
888 | /* Read the XLAT entry for our current mapping */ |
889 | tlbre r25,r23,PPC44x_TLB_XLAT |
890 | |
891 | lis r3,KERNELBASE@h |
892 | ori r3,r3,KERNELBASE@l |
893 | |
894 | /* Use our current RPN entry */ |
895 | mr r4,r25 |
896 | #else |
897 | |
898 | lis r3,PAGE_OFFSET@h |
899 | ori r3,r3,PAGE_OFFSET@l |
900 | |
901 | /* Kernel is at the base of RAM */ |
902 | li r4, 0 /* Load the kernel physical address */ |
903 | #endif |
904 | |
905 | /* Load the kernel PID = 0 */ |
906 | li r0,0 |
907 | mtspr SPRN_PID,r0 |
908 | sync |
909 | |
910 | /* Initialize MMUCR */ |
911 | li r5,0 |
912 | mtspr SPRN_MMUCR,r5 |
913 | sync |
914 | |
915 | /* pageid fields */ |
916 | clrrwi r3,r3,10 /* Mask off the effective page number */ |
917 | ori r3,r3,PPC44x_TLB_VALID | PPC44x_TLB_256M |
918 | |
919 | /* xlat fields */ |
920 | clrrwi r4,r4,10 /* Mask off the real page number */ |
921 | /* ERPN is 0 for first 4GB page */ |
922 | |
923 | /* attrib fields */ |
924 | /* Added guarded bit to protect against speculative loads/stores */ |
925 | li r5,0 |
926 | ori r5,r5,(PPC44x_TLB_SW | PPC44x_TLB_SR | PPC44x_TLB_SX | PPC44x_TLB_G) |
927 | |
928 | li r0,63 /* TLB slot 63 */ |
929 | |
930 | tlbwe r3,r0,PPC44x_TLB_PAGEID /* Load the pageid fields */ |
931 | tlbwe r4,r0,PPC44x_TLB_XLAT /* Load the translation fields */ |
932 | tlbwe r5,r0,PPC44x_TLB_ATTRIB /* Load the attrib/access fields */ |
933 | |
934 | /* Force context change */ |
935 | mfmsr r0 |
936 | mtspr SPRN_SRR1, r0 |
937 | lis r0,3f@h |
938 | ori r0,r0,3f@l |
939 | mtspr SPRN_SRR0,r0 |
940 | sync |
941 | rfi |
942 | |
943 | /* If necessary, invalidate original entry we used */ |
944 | 3: cmpwi r23,63 |
945 | beq 4f |
946 | li r6,0 |
947 | tlbwe r6,r23,PPC44x_TLB_PAGEID |
948 | isync |
949 | |
950 | 4: |
951 | #ifdef CONFIG_PPC_EARLY_DEBUG_44x |
952 | /* Add UART mapping for early debug. */ |
953 | |
954 | /* pageid fields */ |
955 | lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h |
956 | ori r3,r3,PPC44x_TLB_VALID|PPC44x_TLB_TS|PPC44x_TLB_64K |
957 | |
958 | /* xlat fields */ |
959 | lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h |
960 | ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH |
961 | |
962 | /* attrib fields */ |
963 | li r5,(PPC44x_TLB_SW|PPC44x_TLB_SR|PPC44x_TLB_I|PPC44x_TLB_G) |
964 | li r0,62 /* TLB slot 0 */ |
965 | |
966 | tlbwe r3,r0,PPC44x_TLB_PAGEID |
967 | tlbwe r4,r0,PPC44x_TLB_XLAT |
968 | tlbwe r5,r0,PPC44x_TLB_ATTRIB |
969 | |
970 | /* Force context change */ |
971 | isync |
972 | #endif /* CONFIG_PPC_EARLY_DEBUG_44x */ |
973 | |
974 | /* Establish the interrupt vector offsets */ |
975 | SET_IVOR(0, CriticalInput); |
976 | SET_IVOR(1, MachineCheck); |
977 | SET_IVOR(2, DataStorage); |
978 | SET_IVOR(3, InstructionStorage); |
979 | SET_IVOR(4, ExternalInput); |
980 | SET_IVOR(5, Alignment); |
981 | SET_IVOR(6, Program); |
982 | SET_IVOR(7, FloatingPointUnavailable); |
983 | SET_IVOR(8, SystemCall); |
984 | SET_IVOR(9, AuxillaryProcessorUnavailable); |
985 | SET_IVOR(10, Decrementer); |
986 | SET_IVOR(11, FixedIntervalTimer); |
987 | SET_IVOR(12, WatchdogTimer); |
988 | SET_IVOR(13, DataTLBError44x); |
989 | SET_IVOR(14, InstructionTLBError44x); |
990 | SET_IVOR(15, DebugCrit); |
991 | |
992 | b head_start_common |
993 | |
994 | |
995 | #ifdef CONFIG_PPC_47x |
996 | |
997 | #ifdef CONFIG_SMP |
998 | |
999 | /* Entry point for secondary 47x processors */ |
1000 | _GLOBAL(start_secondary_47x) |
1001 | mr r24,r3 /* CPU number */ |
1002 | |
1003 | bl init_cpu_state |
1004 | |
1005 | /* Now we need to bolt the rest of kernel memory which |
1006 | * is done in C code. We must be careful because our task |
1007 | * struct or our stack can (and will probably) be out |
1008 | * of reach of the initial 256M TLB entry, so we use a |
1009 | * small temporary stack in .bss for that. This works |
1010 | * because only one CPU at a time can be in this code |
1011 | */ |
1012 | lis r1,temp_boot_stack@h |
1013 | ori r1,r1,temp_boot_stack@l |
1014 | addi r1,r1,1024-STACK_FRAME_MIN_SIZE |
1015 | li r0,0 |
1016 | stw r0,0(r1) |
1017 | bl mmu_init_secondary |
1018 | |
1019 | /* Now we can get our task struct and real stack pointer */ |
1020 | |
1021 | /* Get current's stack and current */ |
1022 | lis r2,secondary_current@ha |
1023 | lwz r2,secondary_current@l(r2) |
1024 | lwz r1,TASK_STACK(r2) |
1025 | |
1026 | /* Current stack pointer */ |
1027 | addi r1,r1,THREAD_SIZE-STACK_FRAME_MIN_SIZE |
1028 | li r0,0 |
1029 | stw r0,0(r1) |
1030 | |
1031 | /* Kernel stack for exception entry in SPRG3 */ |
1032 | addi r4,r2,THREAD /* init task's THREAD */ |
1033 | mtspr SPRN_SPRG3,r4 |
1034 | |
1035 | b start_secondary |
1036 | |
1037 | #endif /* CONFIG_SMP */ |
1038 | |
1039 | /* |
1040 | * Set up the initial MMU state for 44x |
1041 | * |
1042 | * We are still executing code at the virtual address |
1043 | * mappings set by the firmware for the base of RAM. |
1044 | */ |
1045 | |
1046 | head_start_47x: |
1047 | /* Load our current PID->MMUCR TID and MSR IS->MMUCR STS */ |
1048 | mfspr r3,SPRN_PID /* Get PID */ |
1049 | mfmsr r4 /* Get MSR */ |
1050 | andi. r4,r4,MSR_IS@l /* TS=1? */ |
1051 | beq 1f /* If not, leave STS=0 */ |
1052 | oris r3,r3,PPC47x_MMUCR_STS@h /* Set STS=1 */ |
1053 | 1: mtspr SPRN_MMUCR,r3 /* Put MMUCR */ |
1054 | sync |
1055 | |
1056 | /* Find the entry we are running from */ |
1057 | bcl 20,31,$+4 |
1058 | 1: mflr r23 |
1059 | tlbsx r23,0,r23 |
1060 | tlbre r24,r23,0 |
1061 | tlbre r25,r23,1 |
1062 | tlbre r26,r23,2 |
1063 | |
1064 | /* |
1065 | * Cleanup time |
1066 | */ |
1067 | |
1068 | /* Initialize MMUCR */ |
1069 | li r5,0 |
1070 | mtspr SPRN_MMUCR,r5 |
1071 | sync |
1072 | |
1073 | clear_all_utlb_entries: |
1074 | |
1075 | #; Set initial values. |
1076 | |
1077 | addis r3,0,0x8000 |
1078 | addi r4,0,0 |
1079 | addi r5,0,0 |
1080 | b clear_utlb_entry |
1081 | |
1082 | #; Align the loop to speed things up. |
1083 | |
1084 | .align 6 |
1085 | |
1086 | clear_utlb_entry: |
1087 | |
1088 | tlbwe r4,r3,0 |
1089 | tlbwe r5,r3,1 |
1090 | tlbwe r5,r3,2 |
1091 | addis r3,r3,0x2000 |
1092 | cmpwi r3,0 |
1093 | bne clear_utlb_entry |
1094 | addis r3,0,0x8000 |
1095 | addis r4,r4,0x100 |
1096 | cmpwi r4,0 |
1097 | bne clear_utlb_entry |
1098 | |
1099 | #; Restore original entry. |
1100 | |
1101 | oris r23,r23,0x8000 /* specify the way */ |
1102 | tlbwe r24,r23,0 |
1103 | tlbwe r25,r23,1 |
1104 | tlbwe r26,r23,2 |
1105 | |
1106 | /* |
1107 | * Configure and load pinned entry into TLB for the kernel core |
1108 | */ |
1109 | |
1110 | lis r3,PAGE_OFFSET@h |
1111 | ori r3,r3,PAGE_OFFSET@l |
1112 | |
1113 | /* Load the kernel PID = 0 */ |
1114 | li r0,0 |
1115 | mtspr SPRN_PID,r0 |
1116 | sync |
1117 | |
1118 | /* Word 0 */ |
1119 | clrrwi r3,r3,12 /* Mask off the effective page number */ |
1120 | ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_256M |
1121 | |
1122 | /* Word 1 - use r25. RPN is the same as the original entry */ |
1123 | |
1124 | /* Word 2 */ |
1125 | li r5,0 |
1126 | ori r5,r5,PPC47x_TLB2_S_RWX |
1127 | #ifdef CONFIG_SMP |
1128 | ori r5,r5,PPC47x_TLB2_M |
1129 | #endif |
1130 | |
1131 | /* We write to way 0 and bolted 0 */ |
1132 | lis r0,0x8800 |
1133 | tlbwe r3,r0,0 |
1134 | tlbwe r25,r0,1 |
1135 | tlbwe r5,r0,2 |
1136 | |
1137 | /* |
1138 | * Configure SSPCR, ISPCR and USPCR for now to search everything, we can fix |
1139 | * them up later |
1140 | */ |
1141 | LOAD_REG_IMMEDIATE(r3, 0x9abcdef0) |
1142 | mtspr SPRN_SSPCR,r3 |
1143 | mtspr SPRN_USPCR,r3 |
1144 | LOAD_REG_IMMEDIATE(r3, 0x12345670) |
1145 | mtspr SPRN_ISPCR,r3 |
1146 | |
1147 | /* Force context change */ |
1148 | mfmsr r0 |
1149 | mtspr SPRN_SRR1, r0 |
1150 | lis r0,3f@h |
1151 | ori r0,r0,3f@l |
1152 | mtspr SPRN_SRR0,r0 |
1153 | sync |
1154 | rfi |
1155 | |
1156 | /* Invalidate original entry we used */ |
1157 | 3: |
1158 | rlwinm r24,r24,0,21,19 /* clear the "valid" bit */ |
1159 | tlbwe r24,r23,0 |
1160 | addi r24,0,0 |
1161 | tlbwe r24,r23,1 |
1162 | tlbwe r24,r23,2 |
1163 | isync /* Clear out the shadow TLB entries */ |
1164 | |
1165 | #ifdef CONFIG_PPC_EARLY_DEBUG_44x |
1166 | /* Add UART mapping for early debug. */ |
1167 | |
1168 | /* Word 0 */ |
1169 | lis r3,PPC44x_EARLY_DEBUG_VIRTADDR@h |
1170 | ori r3,r3,PPC47x_TLB0_VALID | PPC47x_TLB0_TS | PPC47x_TLB0_1M |
1171 | |
1172 | /* Word 1 */ |
1173 | lis r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW@h |
1174 | ori r4,r4,CONFIG_PPC_EARLY_DEBUG_44x_PHYSHIGH |
1175 | |
1176 | /* Word 2 */ |
1177 | li r5,(PPC47x_TLB2_S_RW | PPC47x_TLB2_IMG) |
1178 | |
1179 | /* Bolted in way 0, bolt slot 5, we -hope- we don't hit the same |
1180 | * congruence class as the kernel, we need to make sure of it at |
1181 | * some point |
1182 | */ |
1183 | lis r0,0x8d00 |
1184 | tlbwe r3,r0,0 |
1185 | tlbwe r4,r0,1 |
1186 | tlbwe r5,r0,2 |
1187 | |
1188 | /* Force context change */ |
1189 | isync |
1190 | #endif /* CONFIG_PPC_EARLY_DEBUG_44x */ |
1191 | |
1192 | /* Establish the interrupt vector offsets */ |
1193 | SET_IVOR(0, CriticalInput); |
1194 | SET_IVOR(1, MachineCheckA); |
1195 | SET_IVOR(2, DataStorage); |
1196 | SET_IVOR(3, InstructionStorage); |
1197 | SET_IVOR(4, ExternalInput); |
1198 | SET_IVOR(5, Alignment); |
1199 | SET_IVOR(6, Program); |
1200 | SET_IVOR(7, FloatingPointUnavailable); |
1201 | SET_IVOR(8, SystemCall); |
1202 | SET_IVOR(9, AuxillaryProcessorUnavailable); |
1203 | SET_IVOR(10, Decrementer); |
1204 | SET_IVOR(11, FixedIntervalTimer); |
1205 | SET_IVOR(12, WatchdogTimer); |
1206 | SET_IVOR(13, DataTLBError47x); |
1207 | SET_IVOR(14, InstructionTLBError47x); |
1208 | SET_IVOR(15, DebugCrit); |
1209 | |
1210 | /* We configure icbi to invalidate 128 bytes at a time since the |
1211 | * current 32-bit kernel code isn't too happy with icache != dcache |
1212 | * block size. We also disable the BTAC as this can cause errors |
1213 | * in some circumstances (see IBM Erratum 47). |
1214 | */ |
1215 | mfspr r3,SPRN_CCR0 |
1216 | oris r3,r3,0x0020 |
1217 | ori r3,r3,0x0040 |
1218 | mtspr SPRN_CCR0,r3 |
1219 | isync |
1220 | |
1221 | #endif /* CONFIG_PPC_47x */ |
1222 | |
1223 | /* |
1224 | * Here we are back to code that is common between 44x and 47x |
1225 | * |
1226 | * We proceed to further kernel initialization and return to the |
1227 | * main kernel entry |
1228 | */ |
1229 | head_start_common: |
1230 | /* Establish the interrupt vector base */ |
1231 | lis r4,interrupt_base@h /* IVPR only uses the high 16-bits */ |
1232 | mtspr SPRN_IVPR,r4 |
1233 | |
1234 | /* |
1235 | * If the kernel was loaded at a non-zero 256 MB page, we need to |
1236 | * mask off the most significant 4 bits to get the relative address |
1237 | * from the start of physical memory |
1238 | */ |
1239 | rlwinm r22,r22,0,4,31 |
1240 | addis r22,r22,PAGE_OFFSET@h |
1241 | mtlr r22 |
1242 | isync |
1243 | blr |
1244 | |
1245 | #ifdef CONFIG_SMP |
1246 | .data |
1247 | .align 12 |
1248 | temp_boot_stack: |
1249 | .space 1024 |
1250 | #endif /* CONFIG_SMP */ |
1251 | |