1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
2 | /* |
3 | * Based on arch/arm/mm/proc.S |
4 | * |
5 | * Copyright (C) 2001 Deep Blue Solutions Ltd. |
6 | * Copyright (C) 2012 ARM Ltd. |
7 | * Author: Catalin Marinas <catalin.marinas@arm.com> |
8 | */ |
9 | |
10 | #include <linux/init.h> |
11 | #include <linux/linkage.h> |
12 | #include <linux/pgtable.h> |
13 | #include <linux/cfi_types.h> |
14 | #include <asm/assembler.h> |
15 | #include <asm/asm-offsets.h> |
16 | #include <asm/asm_pointer_auth.h> |
17 | #include <asm/hwcap.h> |
18 | #include <asm/kernel-pgtable.h> |
19 | #include <asm/pgtable-hwdef.h> |
20 | #include <asm/cpufeature.h> |
21 | #include <asm/alternative.h> |
22 | #include <asm/smp.h> |
23 | #include <asm/sysreg.h> |
24 | |
25 | #ifdef CONFIG_ARM64_64K_PAGES |
26 | #define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K |
27 | #elif defined(CONFIG_ARM64_16K_PAGES) |
28 | #define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K |
29 | #else /* CONFIG_ARM64_4K_PAGES */ |
30 | #define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K |
31 | #endif |
32 | |
33 | #ifdef CONFIG_RANDOMIZE_BASE |
34 | #define TCR_KASLR_FLAGS TCR_NFD1 |
35 | #else |
36 | #define TCR_KASLR_FLAGS 0 |
37 | #endif |
38 | |
39 | #define TCR_SMP_FLAGS TCR_SHARED |
40 | |
41 | /* PTWs cacheable, inner/outer WBWA */ |
42 | #define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA |
43 | |
44 | #ifdef CONFIG_KASAN_SW_TAGS |
45 | #define TCR_KASAN_SW_FLAGS TCR_TBI1 | TCR_TBID1 |
46 | #else |
47 | #define TCR_KASAN_SW_FLAGS 0 |
48 | #endif |
49 | |
50 | #ifdef CONFIG_KASAN_HW_TAGS |
51 | #define TCR_MTE_FLAGS TCR_TCMA1 | TCR_TBI1 | TCR_TBID1 |
52 | #elif defined(CONFIG_ARM64_MTE) |
53 | /* |
54 | * The mte_zero_clear_page_tags() implementation uses DC GZVA, which relies on |
55 | * TBI being enabled at EL1. |
56 | */ |
57 | #define TCR_MTE_FLAGS TCR_TBI1 | TCR_TBID1 |
58 | #else |
59 | #define TCR_MTE_FLAGS 0 |
60 | #endif |
61 | |
62 | /* |
63 | * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and |
64 | * changed during mte_cpu_setup to Normal Tagged if the system supports MTE. |
65 | */ |
66 | #define MAIR_EL1_SET \ |
67 | (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ |
68 | MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ |
69 | MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ |
70 | MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ |
71 | MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED)) |
72 | |
73 | #ifdef CONFIG_CPU_PM |
74 | /** |
75 | * cpu_do_suspend - save CPU registers context |
76 | * |
77 | * x0: virtual address of context pointer |
78 | * |
79 | * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>. |
80 | */ |
81 | SYM_FUNC_START(cpu_do_suspend) |
82 | mrs x2, tpidr_el0 |
83 | mrs x3, tpidrro_el0 |
84 | mrs x4, contextidr_el1 |
85 | mrs x5, osdlr_el1 |
86 | mrs x6, cpacr_el1 |
87 | mrs x7, tcr_el1 |
88 | mrs x8, vbar_el1 |
89 | mrs x9, mdscr_el1 |
90 | mrs x10, oslsr_el1 |
91 | mrs x11, sctlr_el1 |
92 | get_this_cpu_offset x12 |
93 | mrs x13, sp_el0 |
94 | stp x2, x3, [x0] |
95 | stp x4, x5, [x0, #16] |
96 | stp x6, x7, [x0, #32] |
97 | stp x8, x9, [x0, #48] |
98 | stp x10, x11, [x0, #64] |
99 | stp x12, x13, [x0, #80] |
100 | /* |
101 | * Save x18 as it may be used as a platform register, e.g. by shadow |
102 | * call stack. |
103 | */ |
104 | str x18, [x0, #96] |
105 | ret |
106 | SYM_FUNC_END(cpu_do_suspend) |
107 | |
108 | /** |
109 | * cpu_do_resume - restore CPU register context |
110 | * |
111 | * x0: Address of context pointer |
112 | */ |
113 | SYM_FUNC_START(cpu_do_resume) |
114 | ldp x2, x3, [x0] |
115 | ldp x4, x5, [x0, #16] |
116 | ldp x6, x8, [x0, #32] |
117 | ldp x9, x10, [x0, #48] |
118 | ldp x11, x12, [x0, #64] |
119 | ldp x13, x14, [x0, #80] |
120 | /* |
121 | * Restore x18, as it may be used as a platform register, and clear |
122 | * the buffer to minimize the risk of exposure when used for shadow |
123 | * call stack. |
124 | */ |
125 | ldr x18, [x0, #96] |
126 | str xzr, [x0, #96] |
127 | msr tpidr_el0, x2 |
128 | msr tpidrro_el0, x3 |
129 | msr contextidr_el1, x4 |
130 | msr cpacr_el1, x6 |
131 | |
132 | /* Don't change t0sz here, mask those bits when restoring */ |
133 | mrs x7, tcr_el1 |
134 | bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH |
135 | |
136 | msr tcr_el1, x8 |
137 | msr vbar_el1, x9 |
138 | |
139 | /* |
140 | * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking |
141 | * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug |
142 | * exception. Mask them until local_daif_restore() in cpu_suspend() |
143 | * resets them. |
144 | */ |
145 | disable_daif |
146 | msr mdscr_el1, x10 |
147 | |
148 | msr sctlr_el1, x12 |
149 | set_this_cpu_offset x13 |
150 | msr sp_el0, x14 |
151 | /* |
152 | * Restore oslsr_el1 by writing oslar_el1 |
153 | */ |
154 | msr osdlr_el1, x5 |
155 | ubfx x11, x11, #1, #1 |
156 | msr oslar_el1, x11 |
157 | reset_pmuserenr_el0 x0 // Disable PMU access from EL0 |
158 | reset_amuserenr_el0 x0 // Disable AMU access from EL0 |
159 | |
160 | alternative_if ARM64_HAS_RAS_EXTN |
161 | msr_s SYS_DISR_EL1, xzr |
162 | alternative_else_nop_endif |
163 | |
164 | ptrauth_keys_install_kernel_nosync x14, x1, x2, x3 |
165 | isb |
166 | ret |
167 | SYM_FUNC_END(cpu_do_resume) |
168 | #endif |
169 | |
170 | .pushsection ".idmap.text" , "a" |
171 | |
172 | .macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 |
173 | adrp \tmp1, reserved_pg_dir |
174 | phys_to_ttbr \tmp2, \tmp1 |
175 | offset_ttbr1 \tmp2, \tmp1 |
176 | msr ttbr1_el1, \tmp2 |
177 | isb |
178 | tlbi vmalle1 |
179 | dsb nsh |
180 | isb |
181 | .endm |
182 | |
183 | /* |
184 | * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1) |
185 | * |
186 | * This is the low-level counterpart to cpu_replace_ttbr1, and should not be |
187 | * called by anything else. It can only be executed from a TTBR0 mapping. |
188 | */ |
189 | SYM_TYPED_FUNC_START(idmap_cpu_replace_ttbr1) |
190 | __idmap_cpu_set_reserved_ttbr1 x1, x3 |
191 | |
192 | offset_ttbr1 x0, x3 |
193 | msr ttbr1_el1, x0 |
194 | isb |
195 | |
196 | ret |
197 | SYM_FUNC_END(idmap_cpu_replace_ttbr1) |
198 | SYM_FUNC_ALIAS(__pi_idmap_cpu_replace_ttbr1, idmap_cpu_replace_ttbr1) |
199 | .popsection |
200 | |
201 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
202 | |
203 | #define KPTI_NG_PTE_FLAGS (PTE_ATTRINDX(MT_NORMAL) | PTE_TYPE_PAGE | \ |
204 | PTE_AF | PTE_SHARED | PTE_UXN | PTE_WRITE) |
205 | |
206 | .pushsection ".idmap.text" , "a" |
207 | |
208 | .macro pte_to_phys, phys, pte |
209 | and \phys, \pte, #PTE_ADDR_LOW |
210 | #ifdef CONFIG_ARM64_PA_BITS_52 |
211 | and \pte, \pte, #PTE_ADDR_HIGH |
212 | orr \phys, \phys, \pte, lsl #PTE_ADDR_HIGH_SHIFT |
213 | #endif |
214 | .endm |
215 | |
216 | .macro kpti_mk_tbl_ng, type, num_entries |
217 | add end_\type\()p, cur_\type\()p, #\num_entries * 8 |
218 | .Ldo_\type: |
219 | ldr \type, [cur_\type\()p], #8 // Load the entry and advance |
220 | tbz \type, #0, .Lnext_\type // Skip invalid and |
221 | tbnz \type, #11, .Lnext_\type // non-global entries |
222 | orr \type, \type, #PTE_NG // Same bit for blocks and pages |
223 | str \type, [cur_\type\()p, #-8] // Update the entry |
224 | .ifnc \type, pte |
225 | tbnz \type, #1, .Lderef_\type |
226 | .endif |
227 | .Lnext_\type: |
228 | cmp cur_\type\()p, end_\type\()p |
229 | b.ne .Ldo_\type |
230 | .endm |
231 | |
232 | /* |
233 | * Dereference the current table entry and map it into the temporary |
234 | * fixmap slot associated with the current level. |
235 | */ |
236 | .macro kpti_map_pgtbl, type, level |
237 | str xzr, [temp_pte, #8 * (\level + 2)] // break before make |
238 | dsb nshst |
239 | add pte, temp_pte, #PAGE_SIZE * (\level + 2) |
240 | lsr pte, pte, #12 |
241 | tlbi vaae1, pte |
242 | dsb nsh |
243 | isb |
244 | |
245 | phys_to_pte pte, cur_\type\()p |
246 | add cur_\type\()p, temp_pte, #PAGE_SIZE * (\level + 2) |
247 | orr pte, pte, pte_flags |
248 | str pte, [temp_pte, #8 * (\level + 2)] |
249 | dsb nshst |
250 | .endm |
251 | |
252 | /* |
253 | * void __kpti_install_ng_mappings(int cpu, int num_secondaries, phys_addr_t temp_pgd, |
254 | * unsigned long temp_pte_va) |
255 | * |
256 | * Called exactly once from stop_machine context by each CPU found during boot. |
257 | */ |
258 | .pushsection ".data" , "aw" , %progbits |
259 | SYM_DATA(__idmap_kpti_flag, .long 1) |
260 | .popsection |
261 | |
262 | SYM_TYPED_FUNC_START(idmap_kpti_install_ng_mappings) |
263 | cpu .req w0 |
264 | temp_pte .req x0 |
265 | num_cpus .req w1 |
266 | pte_flags .req x1 |
267 | temp_pgd_phys .req x2 |
268 | swapper_ttb .req x3 |
269 | flag_ptr .req x4 |
270 | cur_pgdp .req x5 |
271 | end_pgdp .req x6 |
272 | pgd .req x7 |
273 | cur_pudp .req x8 |
274 | end_pudp .req x9 |
275 | cur_pmdp .req x11 |
276 | end_pmdp .req x12 |
277 | cur_ptep .req x14 |
278 | end_ptep .req x15 |
279 | pte .req x16 |
280 | valid .req x17 |
281 | cur_p4dp .req x19 |
282 | end_p4dp .req x20 |
283 | |
284 | mov x5, x3 // preserve temp_pte arg |
285 | mrs swapper_ttb, ttbr1_el1 |
286 | adr_l flag_ptr, __idmap_kpti_flag |
287 | |
288 | cbnz cpu, __idmap_kpti_secondary |
289 | |
290 | #if CONFIG_PGTABLE_LEVELS > 4 |
291 | stp x29, x30, [sp, #-32]! |
292 | mov x29, sp |
293 | stp x19, x20, [sp, #16] |
294 | #endif |
295 | |
296 | /* We're the boot CPU. Wait for the others to catch up */ |
297 | sevl |
298 | 1: wfe |
299 | ldaxr w17, [flag_ptr] |
300 | eor w17, w17, num_cpus |
301 | cbnz w17, 1b |
302 | |
303 | /* Switch to the temporary page tables on this CPU only */ |
304 | __idmap_cpu_set_reserved_ttbr1 x8, x9 |
305 | offset_ttbr1 temp_pgd_phys, x8 |
306 | msr ttbr1_el1, temp_pgd_phys |
307 | isb |
308 | |
309 | mov temp_pte, x5 |
310 | mov_q pte_flags, KPTI_NG_PTE_FLAGS |
311 | |
312 | /* Everybody is enjoying the idmap, so we can rewrite swapper. */ |
313 | |
314 | #ifdef CONFIG_ARM64_LPA2 |
315 | /* |
316 | * If LPA2 support is configured, but 52-bit virtual addressing is not |
317 | * enabled at runtime, we will fall back to one level of paging less, |
318 | * and so we have to walk swapper_pg_dir as if we dereferenced its |
319 | * address from a PGD level entry, and terminate the PGD level loop |
320 | * right after. |
321 | */ |
322 | adrp pgd, swapper_pg_dir // walk &swapper_pg_dir at the next level |
323 | mov cur_pgdp, end_pgdp // must be equal to terminate the PGD loop |
324 | alternative_if_not ARM64_HAS_VA52 |
325 | b .Lderef_pgd // skip to the next level |
326 | alternative_else_nop_endif |
327 | /* |
328 | * LPA2 based 52-bit virtual addressing requires 52-bit physical |
329 | * addressing to be enabled as well. In this case, the shareability |
330 | * bits are repurposed as physical address bits, and should not be |
331 | * set in pte_flags. |
332 | */ |
333 | bic pte_flags, pte_flags, #PTE_SHARED |
334 | #endif |
335 | |
336 | /* PGD */ |
337 | adrp cur_pgdp, swapper_pg_dir |
338 | kpti_map_pgtbl pgd, -1 |
339 | kpti_mk_tbl_ng pgd, PTRS_PER_PGD |
340 | |
341 | /* Ensure all the updated entries are visible to secondary CPUs */ |
342 | dsb ishst |
343 | |
344 | /* We're done: fire up swapper_pg_dir again */ |
345 | __idmap_cpu_set_reserved_ttbr1 x8, x9 |
346 | msr ttbr1_el1, swapper_ttb |
347 | isb |
348 | |
349 | /* Set the flag to zero to indicate that we're all done */ |
350 | str wzr, [flag_ptr] |
351 | #if CONFIG_PGTABLE_LEVELS > 4 |
352 | ldp x19, x20, [sp, #16] |
353 | ldp x29, x30, [sp], #32 |
354 | #endif |
355 | ret |
356 | |
357 | .Lderef_pgd: |
358 | /* P4D */ |
359 | .if CONFIG_PGTABLE_LEVELS > 4 |
360 | p4d .req x30 |
361 | pte_to_phys cur_p4dp, pgd |
362 | kpti_map_pgtbl p4d, 0 |
363 | kpti_mk_tbl_ng p4d, PTRS_PER_P4D |
364 | b .Lnext_pgd |
365 | .else /* CONFIG_PGTABLE_LEVELS <= 4 */ |
366 | p4d .req pgd |
367 | .set .Lnext_p4d, .Lnext_pgd |
368 | .endif |
369 | |
370 | .Lderef_p4d: |
371 | /* PUD */ |
372 | .if CONFIG_PGTABLE_LEVELS > 3 |
373 | pud .req x10 |
374 | pte_to_phys cur_pudp, p4d |
375 | kpti_map_pgtbl pud, 1 |
376 | kpti_mk_tbl_ng pud, PTRS_PER_PUD |
377 | b .Lnext_p4d |
378 | .else /* CONFIG_PGTABLE_LEVELS <= 3 */ |
379 | pud .req pgd |
380 | .set .Lnext_pud, .Lnext_pgd |
381 | .endif |
382 | |
383 | .Lderef_pud: |
384 | /* PMD */ |
385 | .if CONFIG_PGTABLE_LEVELS > 2 |
386 | pmd .req x13 |
387 | pte_to_phys cur_pmdp, pud |
388 | kpti_map_pgtbl pmd, 2 |
389 | kpti_mk_tbl_ng pmd, PTRS_PER_PMD |
390 | b .Lnext_pud |
391 | .else /* CONFIG_PGTABLE_LEVELS <= 2 */ |
392 | pmd .req pgd |
393 | .set .Lnext_pmd, .Lnext_pgd |
394 | .endif |
395 | |
396 | .Lderef_pmd: |
397 | /* PTE */ |
398 | pte_to_phys cur_ptep, pmd |
399 | kpti_map_pgtbl pte, 3 |
400 | kpti_mk_tbl_ng pte, PTRS_PER_PTE |
401 | b .Lnext_pmd |
402 | |
403 | .unreq cpu |
404 | .unreq temp_pte |
405 | .unreq num_cpus |
406 | .unreq pte_flags |
407 | .unreq temp_pgd_phys |
408 | .unreq cur_pgdp |
409 | .unreq end_pgdp |
410 | .unreq pgd |
411 | .unreq cur_pudp |
412 | .unreq end_pudp |
413 | .unreq pud |
414 | .unreq cur_pmdp |
415 | .unreq end_pmdp |
416 | .unreq pmd |
417 | .unreq cur_ptep |
418 | .unreq end_ptep |
419 | .unreq pte |
420 | .unreq valid |
421 | .unreq cur_p4dp |
422 | .unreq end_p4dp |
423 | .unreq p4d |
424 | |
425 | /* Secondary CPUs end up here */ |
426 | __idmap_kpti_secondary: |
427 | /* Uninstall swapper before surgery begins */ |
428 | __idmap_cpu_set_reserved_ttbr1 x16, x17 |
429 | |
430 | /* Increment the flag to let the boot CPU we're ready */ |
431 | 1: ldxr w16, [flag_ptr] |
432 | add w16, w16, #1 |
433 | stxr w17, w16, [flag_ptr] |
434 | cbnz w17, 1b |
435 | |
436 | /* Wait for the boot CPU to finish messing around with swapper */ |
437 | sevl |
438 | 1: wfe |
439 | ldxr w16, [flag_ptr] |
440 | cbnz w16, 1b |
441 | |
442 | /* All done, act like nothing happened */ |
443 | msr ttbr1_el1, swapper_ttb |
444 | isb |
445 | ret |
446 | |
447 | .unreq swapper_ttb |
448 | .unreq flag_ptr |
449 | SYM_FUNC_END(idmap_kpti_install_ng_mappings) |
450 | .popsection |
451 | #endif |
452 | |
453 | /* |
454 | * __cpu_setup |
455 | * |
456 | * Initialise the processor for turning the MMU on. |
457 | * |
458 | * Output: |
459 | * Return in x0 the value of the SCTLR_EL1 register. |
460 | */ |
461 | .pushsection ".idmap.text" , "a" |
462 | SYM_FUNC_START(__cpu_setup) |
463 | tlbi vmalle1 // Invalidate local TLB |
464 | dsb nsh |
465 | |
466 | msr cpacr_el1, xzr // Reset cpacr_el1 |
467 | mov x1, #1 << 12 // Reset mdscr_el1 and disable |
468 | msr mdscr_el1, x1 // access to the DCC from EL0 |
469 | isb // Unmask debug exceptions now, |
470 | enable_dbg // since this is per-cpu |
471 | reset_pmuserenr_el0 x1 // Disable PMU access from EL0 |
472 | reset_amuserenr_el0 x1 // Disable AMU access from EL0 |
473 | |
474 | /* |
475 | * Default values for VMSA control registers. These will be adjusted |
476 | * below depending on detected CPU features. |
477 | */ |
478 | mair .req x17 |
479 | tcr .req x16 |
480 | mov_q mair, MAIR_EL1_SET |
481 | mov_q tcr, TCR_T0SZ(IDMAP_VA_BITS) | TCR_T1SZ(VA_BITS_MIN) | TCR_CACHE_FLAGS | \ |
482 | TCR_SMP_FLAGS | TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ |
483 | TCR_TBI0 | TCR_A1 | TCR_KASAN_SW_FLAGS | TCR_MTE_FLAGS |
484 | |
485 | tcr_clear_errata_bits tcr, x9, x5 |
486 | |
487 | #ifdef CONFIG_ARM64_VA_BITS_52 |
488 | mov x9, #64 - VA_BITS |
489 | alternative_if ARM64_HAS_VA52 |
490 | tcr_set_t1sz tcr, x9 |
491 | #ifdef CONFIG_ARM64_LPA2 |
492 | orr tcr, tcr, #TCR_DS |
493 | #endif |
494 | alternative_else_nop_endif |
495 | #endif |
496 | |
497 | /* |
498 | * Set the IPS bits in TCR_EL1. |
499 | */ |
500 | tcr_compute_pa_size tcr, #TCR_IPS_SHIFT, x5, x6 |
501 | #ifdef CONFIG_ARM64_HW_AFDBM |
502 | /* |
503 | * Enable hardware update of the Access Flags bit. |
504 | * Hardware dirty bit management is enabled later, |
505 | * via capabilities. |
506 | */ |
507 | mrs x9, ID_AA64MMFR1_EL1 |
508 | and x9, x9, ID_AA64MMFR1_EL1_HAFDBS_MASK |
509 | cbz x9, 1f |
510 | orr tcr, tcr, #TCR_HA // hardware Access flag update |
511 | 1: |
512 | #endif /* CONFIG_ARM64_HW_AFDBM */ |
513 | msr mair_el1, mair |
514 | msr tcr_el1, tcr |
515 | |
516 | mrs_s x1, SYS_ID_AA64MMFR3_EL1 |
517 | ubfx x1, x1, #ID_AA64MMFR3_EL1_S1PIE_SHIFT, #4 |
518 | cbz x1, .Lskip_indirection |
519 | |
520 | /* |
521 | * The PROT_* macros describing the various memory types may resolve to |
522 | * C expressions if they include the PTE_MAYBE_* macros, and so they |
523 | * can only be used from C code. The PIE_E* constants below are also |
524 | * defined in terms of those macros, but will mask out those |
525 | * PTE_MAYBE_* constants, whether they are set or not. So #define them |
526 | * as 0x0 here so we can evaluate the PIE_E* constants in asm context. |
527 | */ |
528 | |
529 | #define PTE_MAYBE_NG 0 |
530 | #define PTE_MAYBE_SHARED 0 |
531 | |
532 | mov_q x0, PIE_E0 |
533 | msr REG_PIRE0_EL1, x0 |
534 | mov_q x0, PIE_E1 |
535 | msr REG_PIR_EL1, x0 |
536 | |
537 | #undef PTE_MAYBE_NG |
538 | #undef PTE_MAYBE_SHARED |
539 | |
540 | mov x0, TCR2_EL1x_PIE |
541 | msr REG_TCR2_EL1, x0 |
542 | |
543 | .Lskip_indirection: |
544 | |
545 | /* |
546 | * Prepare SCTLR |
547 | */ |
548 | mov_q x0, INIT_SCTLR_EL1_MMU_ON |
549 | ret // return to head.S |
550 | |
551 | .unreq mair |
552 | .unreq tcr |
553 | SYM_FUNC_END(__cpu_setup) |
554 | |