1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * linux/arch/arm/kernel/head-common.S |
4 | * |
5 | * Copyright (C) 1994-2002 Russell King |
6 | * Copyright (c) 2003 ARM Limited |
7 | * All Rights Reserved |
8 | */ |
9 | #include <asm/assembler.h> |
10 | |
11 | #define ATAG_CORE 0x54410001 |
12 | #define ATAG_CORE_SIZE ((2*4 + 3*4) >> 2) |
13 | #define ATAG_CORE_SIZE_EMPTY ((2*4) >> 2) |
14 | |
15 | #ifdef CONFIG_CPU_BIG_ENDIAN |
16 | #define OF_DT_MAGIC 0xd00dfeed |
17 | #else |
18 | #define OF_DT_MAGIC 0xedfe0dd0 /* 0xd00dfeed in big-endian */ |
19 | #endif |
20 | |
21 | /* |
22 | * Exception handling. Something went wrong and we can't proceed. We |
23 | * ought to tell the user, but since we don't have any guarantee that |
24 | * we're even running on the right architecture, we do virtually nothing. |
25 | * |
26 | * If CONFIG_DEBUG_LL is set we try to print out something about the error |
27 | * and hope for the best (useful if bootloader fails to pass a proper |
28 | * machine ID for example). |
29 | */ |
30 | __HEAD |
31 | |
32 | /* Determine validity of the r2 atags pointer. The heuristic requires |
33 | * that the pointer be aligned, in the first 16k of physical RAM and |
34 | * that the ATAG_CORE marker is first and present. If CONFIG_OF_FLATTREE |
35 | * is selected, then it will also accept a dtb pointer. Future revisions |
36 | * of this function may be more lenient with the physical address and |
37 | * may also be able to move the ATAGS block if necessary. |
38 | * |
39 | * Returns: |
40 | * r2 either valid atags pointer, valid dtb pointer, or zero |
41 | * r5, r6 corrupted |
42 | */ |
43 | __vet_atags: |
44 | tst r2, #0x3 @ aligned? |
45 | bne 1f |
46 | |
47 | ldr r5, [r2, #0] |
48 | #ifdef CONFIG_OF_FLATTREE |
49 | ldr r6, =OF_DT_MAGIC @ is it a DTB? |
50 | cmp r5, r6 |
51 | beq 2f |
52 | #endif |
53 | cmp r5, #ATAG_CORE_SIZE @ is first tag ATAG_CORE? |
54 | cmpne r5, #ATAG_CORE_SIZE_EMPTY |
55 | bne 1f |
56 | ldr r5, [r2, #4] |
57 | ldr r6, =ATAG_CORE |
58 | cmp r5, r6 |
59 | bne 1f |
60 | |
61 | 2: ret lr @ atag/dtb pointer is ok |
62 | |
63 | 1: mov r2, #0 |
64 | ret lr |
65 | ENDPROC(__vet_atags) |
66 | |
67 | /* |
68 | * The following fragment of code is executed with the MMU on in MMU mode, |
69 | * and uses absolute addresses; this is not position independent. |
70 | * |
71 | * r0 = cp#15 control register (exc_ret for M-class) |
72 | * r1 = machine ID |
73 | * r2 = atags/dtb pointer |
74 | * r9 = processor ID |
75 | */ |
76 | __INIT |
77 | __mmap_switched: |
78 | |
79 | mov r7, r1 |
80 | mov r8, r2 |
81 | mov r10, r0 |
82 | |
83 | adr r4, __mmap_switched_data |
84 | mov fp, #0 |
85 | |
86 | #if defined(CONFIG_XIP_DEFLATED_DATA) |
87 | ARM( ldr sp, [r4], #4 ) |
88 | THUMB( ldr sp, [r4] ) |
89 | THUMB( add r4, #4 ) |
90 | bl __inflate_kernel_data @ decompress .data to RAM |
91 | teq r0, #0 |
92 | bne __error |
93 | #elif defined(CONFIG_XIP_KERNEL) |
94 | ARM( ldmia r4!, {r0, r1, r2, sp} ) |
95 | THUMB( ldmia r4!, {r0, r1, r2, r3} ) |
96 | THUMB( mov sp, r3 ) |
97 | sub r2, r2, r1 |
98 | bl __memcpy @ copy .data to RAM |
99 | #endif |
100 | |
101 | ARM( ldmia r4!, {r0, r1, sp} ) |
102 | THUMB( ldmia r4!, {r0, r1, r3} ) |
103 | THUMB( mov sp, r3 ) |
104 | sub r2, r1, r0 |
105 | mov r1, #0 |
106 | bl __memset @ clear .bss |
107 | |
108 | adr_l r0, init_task @ get swapper task_struct |
109 | set_current r0, r1 |
110 | |
111 | ldmia r4, {r0, r1, r2, r3} |
112 | str r9, [r0] @ Save processor ID |
113 | str r7, [r1] @ Save machine type |
114 | str r8, [r2] @ Save atags pointer |
115 | cmp r3, #0 |
116 | strne r10, [r3] @ Save control register values |
117 | #ifdef CONFIG_KASAN |
118 | bl kasan_early_init |
119 | #endif |
120 | mov lr, #0 |
121 | b start_kernel |
122 | ENDPROC(__mmap_switched) |
123 | |
124 | .align 2 |
125 | .type __mmap_switched_data, %object |
126 | __mmap_switched_data: |
127 | #ifdef CONFIG_XIP_KERNEL |
128 | #ifndef CONFIG_XIP_DEFLATED_DATA |
129 | .long _sdata @ r0 |
130 | .long __data_loc @ r1 |
131 | .long _edata_loc @ r2 |
132 | #endif |
133 | .long __bss_stop @ sp (temporary stack in .bss) |
134 | #endif |
135 | |
136 | .long __bss_start @ r0 |
137 | .long __bss_stop @ r1 |
138 | .long init_thread_union + THREAD_START_SP @ sp |
139 | |
140 | .long processor_id @ r0 |
141 | .long __machine_arch_type @ r1 |
142 | .long __atags_pointer @ r2 |
143 | #ifdef CONFIG_CPU_CP15 |
144 | .long cr_alignment @ r3 |
145 | #else |
146 | M_CLASS(.long exc_ret) @ r3 |
147 | AR_CLASS(.long 0) @ r3 |
148 | #endif |
149 | .size __mmap_switched_data, . - __mmap_switched_data |
150 | |
151 | __FINIT |
152 | .text |
153 | |
154 | /* |
155 | * This provides a C-API version of __lookup_processor_type |
156 | */ |
157 | ENTRY(lookup_processor_type) |
158 | stmfd sp!, {r4 - r6, r9, lr} |
159 | mov r9, r0 |
160 | bl __lookup_processor_type |
161 | mov r0, r5 |
162 | ldmfd sp!, {r4 - r6, r9, pc} |
163 | ENDPROC(lookup_processor_type) |
164 | |
165 | /* |
166 | * Read processor ID register (CP#15, CR0), and look up in the linker-built |
167 | * supported processor list. Note that we can't use the absolute addresses |
168 | * for the __proc_info lists since we aren't running with the MMU on |
169 | * (and therefore, we are not in the correct address space). We have to |
170 | * calculate the offset. |
171 | * |
172 | * r9 = cpuid |
173 | * Returns: |
174 | * r3, r4, r6 corrupted |
175 | * r5 = proc_info pointer in physical address space |
176 | * r9 = cpuid (preserved) |
177 | */ |
178 | __lookup_processor_type: |
179 | /* |
180 | * Look in <asm/procinfo.h> for information about the __proc_info |
181 | * structure. |
182 | */ |
183 | adr_l r5, __proc_info_begin |
184 | adr_l r6, __proc_info_end |
185 | 1: ldmia r5, {r3, r4} @ value, mask |
186 | and r4, r4, r9 @ mask wanted bits |
187 | teq r3, r4 |
188 | beq 2f |
189 | add r5, r5, #PROC_INFO_SZ @ sizeof(proc_info_list) |
190 | cmp r5, r6 |
191 | blo 1b |
192 | mov r5, #0 @ unknown processor |
193 | 2: ret lr |
194 | ENDPROC(__lookup_processor_type) |
195 | |
196 | __error_lpae: |
197 | #ifdef CONFIG_DEBUG_LL |
198 | adr r0, str_lpae |
199 | bl printascii |
200 | b __error |
201 | str_lpae: .asciz "\nError: Kernel with LPAE support, but CPU does not support LPAE.\n" |
202 | #else |
203 | b __error |
204 | #endif |
205 | .align |
206 | ENDPROC(__error_lpae) |
207 | |
208 | __error_p: |
209 | #ifdef CONFIG_DEBUG_LL |
210 | adr r0, str_p1 |
211 | bl printascii |
212 | mov r0, r9 |
213 | bl printhex8 |
214 | adr r0, str_p2 |
215 | bl printascii |
216 | b __error |
217 | str_p1: .asciz "\nError: unrecognized/unsupported processor variant (0x" |
218 | str_p2: .asciz ").\n" |
219 | .align |
220 | #endif |
221 | ENDPROC(__error_p) |
222 | |
223 | __error: |
224 | #ifdef CONFIG_ARCH_RPC |
225 | /* |
226 | * Turn the screen red on a error - RiscPC only. |
227 | */ |
228 | mov r0, #0x02000000 |
229 | mov r3, #0x11 |
230 | orr r3, r3, r3, lsl #8 |
231 | orr r3, r3, r3, lsl #16 |
232 | str r3, [r0], #4 |
233 | str r3, [r0], #4 |
234 | str r3, [r0], #4 |
235 | str r3, [r0], #4 |
236 | #endif |
237 | 1: mov r0, r0 |
238 | b 1b |
239 | ENDPROC(__error) |
240 | |