1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * kexec for arm64
4 *
5 * Copyright (C) Linaro.
6 * Copyright (C) Huawei Futurewei Technologies.
7 * Copyright (C) 2021, Microsoft Corporation.
8 * Pasha Tatashin <pasha.tatashin@soleen.com>
9 */
10
11#include <linux/kexec.h>
12#include <linux/linkage.h>
13
14#include <asm/assembler.h>
15#include <asm/kexec.h>
16#include <asm/page.h>
17#include <asm/sysreg.h>
18#include <asm/virt.h>
19
20.macro turn_off_mmu tmp1, tmp2
21 mov_q \tmp1, INIT_SCTLR_EL1_MMU_OFF
22 pre_disable_mmu_workaround
23 msr sctlr_el1, \tmp1
24 isb
25.endm
26
27.section ".kexec_relocate.text", "ax"
28/*
29 * arm64_relocate_new_kernel - Put a 2nd stage image in place and boot it.
30 *
31 * The memory that the old kernel occupies may be overwritten when copying the
32 * new image to its final location. To assure that the
33 * arm64_relocate_new_kernel routine which does that copy is not overwritten,
34 * all code and data needed by arm64_relocate_new_kernel must be between the
35 * symbols arm64_relocate_new_kernel and arm64_relocate_new_kernel_end. The
36 * machine_kexec() routine will copy arm64_relocate_new_kernel to the kexec
37 * safe memory that has been set up to be preserved during the copy operation.
38 */
39SYM_CODE_START(arm64_relocate_new_kernel)
40 /*
41 * The kimage structure isn't allocated specially and may be clobbered
42 * during relocation. We must load any values we need from it prior to
43 * any relocation occurring.
44 */
45 ldr x28, [x0, #KIMAGE_START]
46 ldr x27, [x0, #KIMAGE_ARCH_EL2_VECTORS]
47 ldr x26, [x0, #KIMAGE_ARCH_DTB_MEM]
48
49 /* Setup the list loop variables. */
50 ldr x18, [x0, #KIMAGE_ARCH_ZERO_PAGE] /* x18 = zero page for BBM */
51 ldr x17, [x0, #KIMAGE_ARCH_TTBR1] /* x17 = linear map copy */
52 ldr x16, [x0, #KIMAGE_HEAD] /* x16 = kimage_head */
53 ldr x22, [x0, #KIMAGE_ARCH_PHYS_OFFSET] /* x22 phys_offset */
54 raw_dcache_line_size x15, x1 /* x15 = dcache line size */
55 break_before_make_ttbr_switch x18, x17, x1, x2 /* set linear map */
56.Lloop:
57 and x12, x16, PAGE_MASK /* x12 = addr */
58 sub x12, x12, x22 /* Convert x12 to virt */
59 /* Test the entry flags. */
60.Ltest_source:
61 tbz x16, IND_SOURCE_BIT, .Ltest_indirection
62
63 /* Invalidate dest page to PoC. */
64 mov x19, x13
65 copy_page x13, x12, x1, x2, x3, x4, x5, x6, x7, x8
66 add x1, x19, #PAGE_SIZE
67 dcache_by_myline_op civac, sy, x19, x1, x15, x20
68 b .Lnext
69.Ltest_indirection:
70 tbz x16, IND_INDIRECTION_BIT, .Ltest_destination
71 mov x14, x12 /* ptr = addr */
72 b .Lnext
73.Ltest_destination:
74 tbz x16, IND_DESTINATION_BIT, .Lnext
75 mov x13, x12 /* dest = addr */
76.Lnext:
77 ldr x16, [x14], #8 /* entry = *ptr++ */
78 tbz x16, IND_DONE_BIT, .Lloop /* while (!(entry & DONE)) */
79 /* wait for writes from copy_page to finish */
80 dsb nsh
81 ic iallu
82 dsb nsh
83 isb
84 turn_off_mmu x12, x13
85
86 /* Start new image. */
87 cbz x27, .Lel1
88 mov x1, x28 /* kernel entry point */
89 mov x2, x26 /* dtb address */
90 mov x3, xzr
91 mov x4, xzr
92 mov x0, #HVC_SOFT_RESTART
93 hvc #0 /* Jumps from el2 */
94.Lel1:
95 mov x0, x26 /* dtb address */
96 mov x1, xzr
97 mov x2, xzr
98 mov x3, xzr
99 br x28 /* Jumps from el1 */
100SYM_CODE_END(arm64_relocate_new_kernel)
101

source code of linux/arch/arm64/kernel/relocate_kernel.S