1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2012 Regents of the University of California |
4 | * Copyright (C) 2017 SiFive |
5 | */ |
6 | |
7 | #define GENERATING_ASM_OFFSETS |
8 | |
9 | #include <linux/kbuild.h> |
10 | #include <linux/mm.h> |
11 | #include <linux/sched.h> |
12 | #include <linux/suspend.h> |
13 | #include <asm/kvm_host.h> |
14 | #include <asm/thread_info.h> |
15 | #include <asm/ptrace.h> |
16 | #include <asm/cpu_ops_sbi.h> |
17 | #include <asm/stacktrace.h> |
18 | #include <asm/suspend.h> |
19 | |
20 | void asm_offsets(void); |
21 | |
22 | void asm_offsets(void) |
23 | { |
24 | OFFSET(TASK_THREAD_RA, task_struct, thread.ra); |
25 | OFFSET(TASK_THREAD_SP, task_struct, thread.sp); |
26 | OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]); |
27 | OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]); |
28 | OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]); |
29 | OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]); |
30 | OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]); |
31 | OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]); |
32 | OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]); |
33 | OFFSET(TASK_THREAD_S7, task_struct, thread.s[7]); |
34 | OFFSET(TASK_THREAD_S8, task_struct, thread.s[8]); |
35 | OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]); |
36 | OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]); |
37 | OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]); |
38 | OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags); |
39 | OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count); |
40 | OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp); |
41 | OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp); |
42 | #ifdef CONFIG_SHADOW_CALL_STACK |
43 | OFFSET(TASK_TI_SCS_SP, task_struct, thread_info.scs_sp); |
44 | #endif |
45 | |
46 | OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu); |
47 | OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]); |
48 | OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]); |
49 | OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]); |
50 | OFFSET(TASK_THREAD_F3, task_struct, thread.fstate.f[3]); |
51 | OFFSET(TASK_THREAD_F4, task_struct, thread.fstate.f[4]); |
52 | OFFSET(TASK_THREAD_F5, task_struct, thread.fstate.f[5]); |
53 | OFFSET(TASK_THREAD_F6, task_struct, thread.fstate.f[6]); |
54 | OFFSET(TASK_THREAD_F7, task_struct, thread.fstate.f[7]); |
55 | OFFSET(TASK_THREAD_F8, task_struct, thread.fstate.f[8]); |
56 | OFFSET(TASK_THREAD_F9, task_struct, thread.fstate.f[9]); |
57 | OFFSET(TASK_THREAD_F10, task_struct, thread.fstate.f[10]); |
58 | OFFSET(TASK_THREAD_F11, task_struct, thread.fstate.f[11]); |
59 | OFFSET(TASK_THREAD_F12, task_struct, thread.fstate.f[12]); |
60 | OFFSET(TASK_THREAD_F13, task_struct, thread.fstate.f[13]); |
61 | OFFSET(TASK_THREAD_F14, task_struct, thread.fstate.f[14]); |
62 | OFFSET(TASK_THREAD_F15, task_struct, thread.fstate.f[15]); |
63 | OFFSET(TASK_THREAD_F16, task_struct, thread.fstate.f[16]); |
64 | OFFSET(TASK_THREAD_F17, task_struct, thread.fstate.f[17]); |
65 | OFFSET(TASK_THREAD_F18, task_struct, thread.fstate.f[18]); |
66 | OFFSET(TASK_THREAD_F19, task_struct, thread.fstate.f[19]); |
67 | OFFSET(TASK_THREAD_F20, task_struct, thread.fstate.f[20]); |
68 | OFFSET(TASK_THREAD_F21, task_struct, thread.fstate.f[21]); |
69 | OFFSET(TASK_THREAD_F22, task_struct, thread.fstate.f[22]); |
70 | OFFSET(TASK_THREAD_F23, task_struct, thread.fstate.f[23]); |
71 | OFFSET(TASK_THREAD_F24, task_struct, thread.fstate.f[24]); |
72 | OFFSET(TASK_THREAD_F25, task_struct, thread.fstate.f[25]); |
73 | OFFSET(TASK_THREAD_F26, task_struct, thread.fstate.f[26]); |
74 | OFFSET(TASK_THREAD_F27, task_struct, thread.fstate.f[27]); |
75 | OFFSET(TASK_THREAD_F28, task_struct, thread.fstate.f[28]); |
76 | OFFSET(TASK_THREAD_F29, task_struct, thread.fstate.f[29]); |
77 | OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]); |
78 | OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]); |
79 | OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr); |
80 | #ifdef CONFIG_STACKPROTECTOR |
81 | OFFSET(TSK_STACK_CANARY, task_struct, stack_canary); |
82 | #endif |
83 | |
84 | DEFINE(PT_SIZE, sizeof(struct pt_regs)); |
85 | OFFSET(PT_EPC, pt_regs, epc); |
86 | OFFSET(PT_RA, pt_regs, ra); |
87 | OFFSET(PT_FP, pt_regs, s0); |
88 | OFFSET(PT_S0, pt_regs, s0); |
89 | OFFSET(PT_S1, pt_regs, s1); |
90 | OFFSET(PT_S2, pt_regs, s2); |
91 | OFFSET(PT_S3, pt_regs, s3); |
92 | OFFSET(PT_S4, pt_regs, s4); |
93 | OFFSET(PT_S5, pt_regs, s5); |
94 | OFFSET(PT_S6, pt_regs, s6); |
95 | OFFSET(PT_S7, pt_regs, s7); |
96 | OFFSET(PT_S8, pt_regs, s8); |
97 | OFFSET(PT_S9, pt_regs, s9); |
98 | OFFSET(PT_S10, pt_regs, s10); |
99 | OFFSET(PT_S11, pt_regs, s11); |
100 | OFFSET(PT_SP, pt_regs, sp); |
101 | OFFSET(PT_TP, pt_regs, tp); |
102 | OFFSET(PT_A0, pt_regs, a0); |
103 | OFFSET(PT_A1, pt_regs, a1); |
104 | OFFSET(PT_A2, pt_regs, a2); |
105 | OFFSET(PT_A3, pt_regs, a3); |
106 | OFFSET(PT_A4, pt_regs, a4); |
107 | OFFSET(PT_A5, pt_regs, a5); |
108 | OFFSET(PT_A6, pt_regs, a6); |
109 | OFFSET(PT_A7, pt_regs, a7); |
110 | OFFSET(PT_T0, pt_regs, t0); |
111 | OFFSET(PT_T1, pt_regs, t1); |
112 | OFFSET(PT_T2, pt_regs, t2); |
113 | OFFSET(PT_T3, pt_regs, t3); |
114 | OFFSET(PT_T4, pt_regs, t4); |
115 | OFFSET(PT_T5, pt_regs, t5); |
116 | OFFSET(PT_T6, pt_regs, t6); |
117 | OFFSET(PT_GP, pt_regs, gp); |
118 | OFFSET(PT_ORIG_A0, pt_regs, orig_a0); |
119 | OFFSET(PT_STATUS, pt_regs, status); |
120 | OFFSET(PT_BADADDR, pt_regs, badaddr); |
121 | OFFSET(PT_CAUSE, pt_regs, cause); |
122 | |
123 | OFFSET(SUSPEND_CONTEXT_REGS, suspend_context, regs); |
124 | |
125 | OFFSET(HIBERN_PBE_ADDR, pbe, address); |
126 | OFFSET(HIBERN_PBE_ORIG, pbe, orig_address); |
127 | OFFSET(HIBERN_PBE_NEXT, pbe, next); |
128 | |
129 | OFFSET(KVM_ARCH_GUEST_ZERO, kvm_vcpu_arch, guest_context.zero); |
130 | OFFSET(KVM_ARCH_GUEST_RA, kvm_vcpu_arch, guest_context.ra); |
131 | OFFSET(KVM_ARCH_GUEST_SP, kvm_vcpu_arch, guest_context.sp); |
132 | OFFSET(KVM_ARCH_GUEST_GP, kvm_vcpu_arch, guest_context.gp); |
133 | OFFSET(KVM_ARCH_GUEST_TP, kvm_vcpu_arch, guest_context.tp); |
134 | OFFSET(KVM_ARCH_GUEST_T0, kvm_vcpu_arch, guest_context.t0); |
135 | OFFSET(KVM_ARCH_GUEST_T1, kvm_vcpu_arch, guest_context.t1); |
136 | OFFSET(KVM_ARCH_GUEST_T2, kvm_vcpu_arch, guest_context.t2); |
137 | OFFSET(KVM_ARCH_GUEST_S0, kvm_vcpu_arch, guest_context.s0); |
138 | OFFSET(KVM_ARCH_GUEST_S1, kvm_vcpu_arch, guest_context.s1); |
139 | OFFSET(KVM_ARCH_GUEST_A0, kvm_vcpu_arch, guest_context.a0); |
140 | OFFSET(KVM_ARCH_GUEST_A1, kvm_vcpu_arch, guest_context.a1); |
141 | OFFSET(KVM_ARCH_GUEST_A2, kvm_vcpu_arch, guest_context.a2); |
142 | OFFSET(KVM_ARCH_GUEST_A3, kvm_vcpu_arch, guest_context.a3); |
143 | OFFSET(KVM_ARCH_GUEST_A4, kvm_vcpu_arch, guest_context.a4); |
144 | OFFSET(KVM_ARCH_GUEST_A5, kvm_vcpu_arch, guest_context.a5); |
145 | OFFSET(KVM_ARCH_GUEST_A6, kvm_vcpu_arch, guest_context.a6); |
146 | OFFSET(KVM_ARCH_GUEST_A7, kvm_vcpu_arch, guest_context.a7); |
147 | OFFSET(KVM_ARCH_GUEST_S2, kvm_vcpu_arch, guest_context.s2); |
148 | OFFSET(KVM_ARCH_GUEST_S3, kvm_vcpu_arch, guest_context.s3); |
149 | OFFSET(KVM_ARCH_GUEST_S4, kvm_vcpu_arch, guest_context.s4); |
150 | OFFSET(KVM_ARCH_GUEST_S5, kvm_vcpu_arch, guest_context.s5); |
151 | OFFSET(KVM_ARCH_GUEST_S6, kvm_vcpu_arch, guest_context.s6); |
152 | OFFSET(KVM_ARCH_GUEST_S7, kvm_vcpu_arch, guest_context.s7); |
153 | OFFSET(KVM_ARCH_GUEST_S8, kvm_vcpu_arch, guest_context.s8); |
154 | OFFSET(KVM_ARCH_GUEST_S9, kvm_vcpu_arch, guest_context.s9); |
155 | OFFSET(KVM_ARCH_GUEST_S10, kvm_vcpu_arch, guest_context.s10); |
156 | OFFSET(KVM_ARCH_GUEST_S11, kvm_vcpu_arch, guest_context.s11); |
157 | OFFSET(KVM_ARCH_GUEST_T3, kvm_vcpu_arch, guest_context.t3); |
158 | OFFSET(KVM_ARCH_GUEST_T4, kvm_vcpu_arch, guest_context.t4); |
159 | OFFSET(KVM_ARCH_GUEST_T5, kvm_vcpu_arch, guest_context.t5); |
160 | OFFSET(KVM_ARCH_GUEST_T6, kvm_vcpu_arch, guest_context.t6); |
161 | OFFSET(KVM_ARCH_GUEST_SEPC, kvm_vcpu_arch, guest_context.sepc); |
162 | OFFSET(KVM_ARCH_GUEST_SSTATUS, kvm_vcpu_arch, guest_context.sstatus); |
163 | OFFSET(KVM_ARCH_GUEST_HSTATUS, kvm_vcpu_arch, guest_context.hstatus); |
164 | OFFSET(KVM_ARCH_GUEST_SCOUNTEREN, kvm_vcpu_arch, guest_csr.scounteren); |
165 | |
166 | OFFSET(KVM_ARCH_HOST_ZERO, kvm_vcpu_arch, host_context.zero); |
167 | OFFSET(KVM_ARCH_HOST_RA, kvm_vcpu_arch, host_context.ra); |
168 | OFFSET(KVM_ARCH_HOST_SP, kvm_vcpu_arch, host_context.sp); |
169 | OFFSET(KVM_ARCH_HOST_GP, kvm_vcpu_arch, host_context.gp); |
170 | OFFSET(KVM_ARCH_HOST_TP, kvm_vcpu_arch, host_context.tp); |
171 | OFFSET(KVM_ARCH_HOST_T0, kvm_vcpu_arch, host_context.t0); |
172 | OFFSET(KVM_ARCH_HOST_T1, kvm_vcpu_arch, host_context.t1); |
173 | OFFSET(KVM_ARCH_HOST_T2, kvm_vcpu_arch, host_context.t2); |
174 | OFFSET(KVM_ARCH_HOST_S0, kvm_vcpu_arch, host_context.s0); |
175 | OFFSET(KVM_ARCH_HOST_S1, kvm_vcpu_arch, host_context.s1); |
176 | OFFSET(KVM_ARCH_HOST_A0, kvm_vcpu_arch, host_context.a0); |
177 | OFFSET(KVM_ARCH_HOST_A1, kvm_vcpu_arch, host_context.a1); |
178 | OFFSET(KVM_ARCH_HOST_A2, kvm_vcpu_arch, host_context.a2); |
179 | OFFSET(KVM_ARCH_HOST_A3, kvm_vcpu_arch, host_context.a3); |
180 | OFFSET(KVM_ARCH_HOST_A4, kvm_vcpu_arch, host_context.a4); |
181 | OFFSET(KVM_ARCH_HOST_A5, kvm_vcpu_arch, host_context.a5); |
182 | OFFSET(KVM_ARCH_HOST_A6, kvm_vcpu_arch, host_context.a6); |
183 | OFFSET(KVM_ARCH_HOST_A7, kvm_vcpu_arch, host_context.a7); |
184 | OFFSET(KVM_ARCH_HOST_S2, kvm_vcpu_arch, host_context.s2); |
185 | OFFSET(KVM_ARCH_HOST_S3, kvm_vcpu_arch, host_context.s3); |
186 | OFFSET(KVM_ARCH_HOST_S4, kvm_vcpu_arch, host_context.s4); |
187 | OFFSET(KVM_ARCH_HOST_S5, kvm_vcpu_arch, host_context.s5); |
188 | OFFSET(KVM_ARCH_HOST_S6, kvm_vcpu_arch, host_context.s6); |
189 | OFFSET(KVM_ARCH_HOST_S7, kvm_vcpu_arch, host_context.s7); |
190 | OFFSET(KVM_ARCH_HOST_S8, kvm_vcpu_arch, host_context.s8); |
191 | OFFSET(KVM_ARCH_HOST_S9, kvm_vcpu_arch, host_context.s9); |
192 | OFFSET(KVM_ARCH_HOST_S10, kvm_vcpu_arch, host_context.s10); |
193 | OFFSET(KVM_ARCH_HOST_S11, kvm_vcpu_arch, host_context.s11); |
194 | OFFSET(KVM_ARCH_HOST_T3, kvm_vcpu_arch, host_context.t3); |
195 | OFFSET(KVM_ARCH_HOST_T4, kvm_vcpu_arch, host_context.t4); |
196 | OFFSET(KVM_ARCH_HOST_T5, kvm_vcpu_arch, host_context.t5); |
197 | OFFSET(KVM_ARCH_HOST_T6, kvm_vcpu_arch, host_context.t6); |
198 | OFFSET(KVM_ARCH_HOST_SEPC, kvm_vcpu_arch, host_context.sepc); |
199 | OFFSET(KVM_ARCH_HOST_SSTATUS, kvm_vcpu_arch, host_context.sstatus); |
200 | OFFSET(KVM_ARCH_HOST_HSTATUS, kvm_vcpu_arch, host_context.hstatus); |
201 | OFFSET(KVM_ARCH_HOST_SSCRATCH, kvm_vcpu_arch, host_sscratch); |
202 | OFFSET(KVM_ARCH_HOST_STVEC, kvm_vcpu_arch, host_stvec); |
203 | OFFSET(KVM_ARCH_HOST_SCOUNTEREN, kvm_vcpu_arch, host_scounteren); |
204 | |
205 | OFFSET(KVM_ARCH_TRAP_SEPC, kvm_cpu_trap, sepc); |
206 | OFFSET(KVM_ARCH_TRAP_SCAUSE, kvm_cpu_trap, scause); |
207 | OFFSET(KVM_ARCH_TRAP_STVAL, kvm_cpu_trap, stval); |
208 | OFFSET(KVM_ARCH_TRAP_HTVAL, kvm_cpu_trap, htval); |
209 | OFFSET(KVM_ARCH_TRAP_HTINST, kvm_cpu_trap, htinst); |
210 | |
211 | /* F extension */ |
212 | |
213 | OFFSET(KVM_ARCH_FP_F_F0, kvm_cpu_context, fp.f.f[0]); |
214 | OFFSET(KVM_ARCH_FP_F_F1, kvm_cpu_context, fp.f.f[1]); |
215 | OFFSET(KVM_ARCH_FP_F_F2, kvm_cpu_context, fp.f.f[2]); |
216 | OFFSET(KVM_ARCH_FP_F_F3, kvm_cpu_context, fp.f.f[3]); |
217 | OFFSET(KVM_ARCH_FP_F_F4, kvm_cpu_context, fp.f.f[4]); |
218 | OFFSET(KVM_ARCH_FP_F_F5, kvm_cpu_context, fp.f.f[5]); |
219 | OFFSET(KVM_ARCH_FP_F_F6, kvm_cpu_context, fp.f.f[6]); |
220 | OFFSET(KVM_ARCH_FP_F_F7, kvm_cpu_context, fp.f.f[7]); |
221 | OFFSET(KVM_ARCH_FP_F_F8, kvm_cpu_context, fp.f.f[8]); |
222 | OFFSET(KVM_ARCH_FP_F_F9, kvm_cpu_context, fp.f.f[9]); |
223 | OFFSET(KVM_ARCH_FP_F_F10, kvm_cpu_context, fp.f.f[10]); |
224 | OFFSET(KVM_ARCH_FP_F_F11, kvm_cpu_context, fp.f.f[11]); |
225 | OFFSET(KVM_ARCH_FP_F_F12, kvm_cpu_context, fp.f.f[12]); |
226 | OFFSET(KVM_ARCH_FP_F_F13, kvm_cpu_context, fp.f.f[13]); |
227 | OFFSET(KVM_ARCH_FP_F_F14, kvm_cpu_context, fp.f.f[14]); |
228 | OFFSET(KVM_ARCH_FP_F_F15, kvm_cpu_context, fp.f.f[15]); |
229 | OFFSET(KVM_ARCH_FP_F_F16, kvm_cpu_context, fp.f.f[16]); |
230 | OFFSET(KVM_ARCH_FP_F_F17, kvm_cpu_context, fp.f.f[17]); |
231 | OFFSET(KVM_ARCH_FP_F_F18, kvm_cpu_context, fp.f.f[18]); |
232 | OFFSET(KVM_ARCH_FP_F_F19, kvm_cpu_context, fp.f.f[19]); |
233 | OFFSET(KVM_ARCH_FP_F_F20, kvm_cpu_context, fp.f.f[20]); |
234 | OFFSET(KVM_ARCH_FP_F_F21, kvm_cpu_context, fp.f.f[21]); |
235 | OFFSET(KVM_ARCH_FP_F_F22, kvm_cpu_context, fp.f.f[22]); |
236 | OFFSET(KVM_ARCH_FP_F_F23, kvm_cpu_context, fp.f.f[23]); |
237 | OFFSET(KVM_ARCH_FP_F_F24, kvm_cpu_context, fp.f.f[24]); |
238 | OFFSET(KVM_ARCH_FP_F_F25, kvm_cpu_context, fp.f.f[25]); |
239 | OFFSET(KVM_ARCH_FP_F_F26, kvm_cpu_context, fp.f.f[26]); |
240 | OFFSET(KVM_ARCH_FP_F_F27, kvm_cpu_context, fp.f.f[27]); |
241 | OFFSET(KVM_ARCH_FP_F_F28, kvm_cpu_context, fp.f.f[28]); |
242 | OFFSET(KVM_ARCH_FP_F_F29, kvm_cpu_context, fp.f.f[29]); |
243 | OFFSET(KVM_ARCH_FP_F_F30, kvm_cpu_context, fp.f.f[30]); |
244 | OFFSET(KVM_ARCH_FP_F_F31, kvm_cpu_context, fp.f.f[31]); |
245 | OFFSET(KVM_ARCH_FP_F_FCSR, kvm_cpu_context, fp.f.fcsr); |
246 | |
247 | /* D extension */ |
248 | |
249 | OFFSET(KVM_ARCH_FP_D_F0, kvm_cpu_context, fp.d.f[0]); |
250 | OFFSET(KVM_ARCH_FP_D_F1, kvm_cpu_context, fp.d.f[1]); |
251 | OFFSET(KVM_ARCH_FP_D_F2, kvm_cpu_context, fp.d.f[2]); |
252 | OFFSET(KVM_ARCH_FP_D_F3, kvm_cpu_context, fp.d.f[3]); |
253 | OFFSET(KVM_ARCH_FP_D_F4, kvm_cpu_context, fp.d.f[4]); |
254 | OFFSET(KVM_ARCH_FP_D_F5, kvm_cpu_context, fp.d.f[5]); |
255 | OFFSET(KVM_ARCH_FP_D_F6, kvm_cpu_context, fp.d.f[6]); |
256 | OFFSET(KVM_ARCH_FP_D_F7, kvm_cpu_context, fp.d.f[7]); |
257 | OFFSET(KVM_ARCH_FP_D_F8, kvm_cpu_context, fp.d.f[8]); |
258 | OFFSET(KVM_ARCH_FP_D_F9, kvm_cpu_context, fp.d.f[9]); |
259 | OFFSET(KVM_ARCH_FP_D_F10, kvm_cpu_context, fp.d.f[10]); |
260 | OFFSET(KVM_ARCH_FP_D_F11, kvm_cpu_context, fp.d.f[11]); |
261 | OFFSET(KVM_ARCH_FP_D_F12, kvm_cpu_context, fp.d.f[12]); |
262 | OFFSET(KVM_ARCH_FP_D_F13, kvm_cpu_context, fp.d.f[13]); |
263 | OFFSET(KVM_ARCH_FP_D_F14, kvm_cpu_context, fp.d.f[14]); |
264 | OFFSET(KVM_ARCH_FP_D_F15, kvm_cpu_context, fp.d.f[15]); |
265 | OFFSET(KVM_ARCH_FP_D_F16, kvm_cpu_context, fp.d.f[16]); |
266 | OFFSET(KVM_ARCH_FP_D_F17, kvm_cpu_context, fp.d.f[17]); |
267 | OFFSET(KVM_ARCH_FP_D_F18, kvm_cpu_context, fp.d.f[18]); |
268 | OFFSET(KVM_ARCH_FP_D_F19, kvm_cpu_context, fp.d.f[19]); |
269 | OFFSET(KVM_ARCH_FP_D_F20, kvm_cpu_context, fp.d.f[20]); |
270 | OFFSET(KVM_ARCH_FP_D_F21, kvm_cpu_context, fp.d.f[21]); |
271 | OFFSET(KVM_ARCH_FP_D_F22, kvm_cpu_context, fp.d.f[22]); |
272 | OFFSET(KVM_ARCH_FP_D_F23, kvm_cpu_context, fp.d.f[23]); |
273 | OFFSET(KVM_ARCH_FP_D_F24, kvm_cpu_context, fp.d.f[24]); |
274 | OFFSET(KVM_ARCH_FP_D_F25, kvm_cpu_context, fp.d.f[25]); |
275 | OFFSET(KVM_ARCH_FP_D_F26, kvm_cpu_context, fp.d.f[26]); |
276 | OFFSET(KVM_ARCH_FP_D_F27, kvm_cpu_context, fp.d.f[27]); |
277 | OFFSET(KVM_ARCH_FP_D_F28, kvm_cpu_context, fp.d.f[28]); |
278 | OFFSET(KVM_ARCH_FP_D_F29, kvm_cpu_context, fp.d.f[29]); |
279 | OFFSET(KVM_ARCH_FP_D_F30, kvm_cpu_context, fp.d.f[30]); |
280 | OFFSET(KVM_ARCH_FP_D_F31, kvm_cpu_context, fp.d.f[31]); |
281 | OFFSET(KVM_ARCH_FP_D_FCSR, kvm_cpu_context, fp.d.fcsr); |
282 | |
283 | /* |
284 | * THREAD_{F,X}* might be larger than a S-type offset can handle, but |
285 | * these are used in performance-sensitive assembly so we can't resort |
286 | * to loading the long immediate every time. |
287 | */ |
288 | DEFINE(TASK_THREAD_RA_RA, |
289 | offsetof(struct task_struct, thread.ra) |
290 | - offsetof(struct task_struct, thread.ra) |
291 | ); |
292 | DEFINE(TASK_THREAD_SP_RA, |
293 | offsetof(struct task_struct, thread.sp) |
294 | - offsetof(struct task_struct, thread.ra) |
295 | ); |
296 | DEFINE(TASK_THREAD_S0_RA, |
297 | offsetof(struct task_struct, thread.s[0]) |
298 | - offsetof(struct task_struct, thread.ra) |
299 | ); |
300 | DEFINE(TASK_THREAD_S1_RA, |
301 | offsetof(struct task_struct, thread.s[1]) |
302 | - offsetof(struct task_struct, thread.ra) |
303 | ); |
304 | DEFINE(TASK_THREAD_S2_RA, |
305 | offsetof(struct task_struct, thread.s[2]) |
306 | - offsetof(struct task_struct, thread.ra) |
307 | ); |
308 | DEFINE(TASK_THREAD_S3_RA, |
309 | offsetof(struct task_struct, thread.s[3]) |
310 | - offsetof(struct task_struct, thread.ra) |
311 | ); |
312 | DEFINE(TASK_THREAD_S4_RA, |
313 | offsetof(struct task_struct, thread.s[4]) |
314 | - offsetof(struct task_struct, thread.ra) |
315 | ); |
316 | DEFINE(TASK_THREAD_S5_RA, |
317 | offsetof(struct task_struct, thread.s[5]) |
318 | - offsetof(struct task_struct, thread.ra) |
319 | ); |
320 | DEFINE(TASK_THREAD_S6_RA, |
321 | offsetof(struct task_struct, thread.s[6]) |
322 | - offsetof(struct task_struct, thread.ra) |
323 | ); |
324 | DEFINE(TASK_THREAD_S7_RA, |
325 | offsetof(struct task_struct, thread.s[7]) |
326 | - offsetof(struct task_struct, thread.ra) |
327 | ); |
328 | DEFINE(TASK_THREAD_S8_RA, |
329 | offsetof(struct task_struct, thread.s[8]) |
330 | - offsetof(struct task_struct, thread.ra) |
331 | ); |
332 | DEFINE(TASK_THREAD_S9_RA, |
333 | offsetof(struct task_struct, thread.s[9]) |
334 | - offsetof(struct task_struct, thread.ra) |
335 | ); |
336 | DEFINE(TASK_THREAD_S10_RA, |
337 | offsetof(struct task_struct, thread.s[10]) |
338 | - offsetof(struct task_struct, thread.ra) |
339 | ); |
340 | DEFINE(TASK_THREAD_S11_RA, |
341 | offsetof(struct task_struct, thread.s[11]) |
342 | - offsetof(struct task_struct, thread.ra) |
343 | ); |
344 | |
345 | DEFINE(TASK_THREAD_F0_F0, |
346 | offsetof(struct task_struct, thread.fstate.f[0]) |
347 | - offsetof(struct task_struct, thread.fstate.f[0]) |
348 | ); |
349 | DEFINE(TASK_THREAD_F1_F0, |
350 | offsetof(struct task_struct, thread.fstate.f[1]) |
351 | - offsetof(struct task_struct, thread.fstate.f[0]) |
352 | ); |
353 | DEFINE(TASK_THREAD_F2_F0, |
354 | offsetof(struct task_struct, thread.fstate.f[2]) |
355 | - offsetof(struct task_struct, thread.fstate.f[0]) |
356 | ); |
357 | DEFINE(TASK_THREAD_F3_F0, |
358 | offsetof(struct task_struct, thread.fstate.f[3]) |
359 | - offsetof(struct task_struct, thread.fstate.f[0]) |
360 | ); |
361 | DEFINE(TASK_THREAD_F4_F0, |
362 | offsetof(struct task_struct, thread.fstate.f[4]) |
363 | - offsetof(struct task_struct, thread.fstate.f[0]) |
364 | ); |
365 | DEFINE(TASK_THREAD_F5_F0, |
366 | offsetof(struct task_struct, thread.fstate.f[5]) |
367 | - offsetof(struct task_struct, thread.fstate.f[0]) |
368 | ); |
369 | DEFINE(TASK_THREAD_F6_F0, |
370 | offsetof(struct task_struct, thread.fstate.f[6]) |
371 | - offsetof(struct task_struct, thread.fstate.f[0]) |
372 | ); |
373 | DEFINE(TASK_THREAD_F7_F0, |
374 | offsetof(struct task_struct, thread.fstate.f[7]) |
375 | - offsetof(struct task_struct, thread.fstate.f[0]) |
376 | ); |
377 | DEFINE(TASK_THREAD_F8_F0, |
378 | offsetof(struct task_struct, thread.fstate.f[8]) |
379 | - offsetof(struct task_struct, thread.fstate.f[0]) |
380 | ); |
381 | DEFINE(TASK_THREAD_F9_F0, |
382 | offsetof(struct task_struct, thread.fstate.f[9]) |
383 | - offsetof(struct task_struct, thread.fstate.f[0]) |
384 | ); |
385 | DEFINE(TASK_THREAD_F10_F0, |
386 | offsetof(struct task_struct, thread.fstate.f[10]) |
387 | - offsetof(struct task_struct, thread.fstate.f[0]) |
388 | ); |
389 | DEFINE(TASK_THREAD_F11_F0, |
390 | offsetof(struct task_struct, thread.fstate.f[11]) |
391 | - offsetof(struct task_struct, thread.fstate.f[0]) |
392 | ); |
393 | DEFINE(TASK_THREAD_F12_F0, |
394 | offsetof(struct task_struct, thread.fstate.f[12]) |
395 | - offsetof(struct task_struct, thread.fstate.f[0]) |
396 | ); |
397 | DEFINE(TASK_THREAD_F13_F0, |
398 | offsetof(struct task_struct, thread.fstate.f[13]) |
399 | - offsetof(struct task_struct, thread.fstate.f[0]) |
400 | ); |
401 | DEFINE(TASK_THREAD_F14_F0, |
402 | offsetof(struct task_struct, thread.fstate.f[14]) |
403 | - offsetof(struct task_struct, thread.fstate.f[0]) |
404 | ); |
405 | DEFINE(TASK_THREAD_F15_F0, |
406 | offsetof(struct task_struct, thread.fstate.f[15]) |
407 | - offsetof(struct task_struct, thread.fstate.f[0]) |
408 | ); |
409 | DEFINE(TASK_THREAD_F16_F0, |
410 | offsetof(struct task_struct, thread.fstate.f[16]) |
411 | - offsetof(struct task_struct, thread.fstate.f[0]) |
412 | ); |
413 | DEFINE(TASK_THREAD_F17_F0, |
414 | offsetof(struct task_struct, thread.fstate.f[17]) |
415 | - offsetof(struct task_struct, thread.fstate.f[0]) |
416 | ); |
417 | DEFINE(TASK_THREAD_F18_F0, |
418 | offsetof(struct task_struct, thread.fstate.f[18]) |
419 | - offsetof(struct task_struct, thread.fstate.f[0]) |
420 | ); |
421 | DEFINE(TASK_THREAD_F19_F0, |
422 | offsetof(struct task_struct, thread.fstate.f[19]) |
423 | - offsetof(struct task_struct, thread.fstate.f[0]) |
424 | ); |
425 | DEFINE(TASK_THREAD_F20_F0, |
426 | offsetof(struct task_struct, thread.fstate.f[20]) |
427 | - offsetof(struct task_struct, thread.fstate.f[0]) |
428 | ); |
429 | DEFINE(TASK_THREAD_F21_F0, |
430 | offsetof(struct task_struct, thread.fstate.f[21]) |
431 | - offsetof(struct task_struct, thread.fstate.f[0]) |
432 | ); |
433 | DEFINE(TASK_THREAD_F22_F0, |
434 | offsetof(struct task_struct, thread.fstate.f[22]) |
435 | - offsetof(struct task_struct, thread.fstate.f[0]) |
436 | ); |
437 | DEFINE(TASK_THREAD_F23_F0, |
438 | offsetof(struct task_struct, thread.fstate.f[23]) |
439 | - offsetof(struct task_struct, thread.fstate.f[0]) |
440 | ); |
441 | DEFINE(TASK_THREAD_F24_F0, |
442 | offsetof(struct task_struct, thread.fstate.f[24]) |
443 | - offsetof(struct task_struct, thread.fstate.f[0]) |
444 | ); |
445 | DEFINE(TASK_THREAD_F25_F0, |
446 | offsetof(struct task_struct, thread.fstate.f[25]) |
447 | - offsetof(struct task_struct, thread.fstate.f[0]) |
448 | ); |
449 | DEFINE(TASK_THREAD_F26_F0, |
450 | offsetof(struct task_struct, thread.fstate.f[26]) |
451 | - offsetof(struct task_struct, thread.fstate.f[0]) |
452 | ); |
453 | DEFINE(TASK_THREAD_F27_F0, |
454 | offsetof(struct task_struct, thread.fstate.f[27]) |
455 | - offsetof(struct task_struct, thread.fstate.f[0]) |
456 | ); |
457 | DEFINE(TASK_THREAD_F28_F0, |
458 | offsetof(struct task_struct, thread.fstate.f[28]) |
459 | - offsetof(struct task_struct, thread.fstate.f[0]) |
460 | ); |
461 | DEFINE(TASK_THREAD_F29_F0, |
462 | offsetof(struct task_struct, thread.fstate.f[29]) |
463 | - offsetof(struct task_struct, thread.fstate.f[0]) |
464 | ); |
465 | DEFINE(TASK_THREAD_F30_F0, |
466 | offsetof(struct task_struct, thread.fstate.f[30]) |
467 | - offsetof(struct task_struct, thread.fstate.f[0]) |
468 | ); |
469 | DEFINE(TASK_THREAD_F31_F0, |
470 | offsetof(struct task_struct, thread.fstate.f[31]) |
471 | - offsetof(struct task_struct, thread.fstate.f[0]) |
472 | ); |
473 | DEFINE(TASK_THREAD_FCSR_F0, |
474 | offsetof(struct task_struct, thread.fstate.fcsr) |
475 | - offsetof(struct task_struct, thread.fstate.f[0]) |
476 | ); |
477 | |
478 | /* |
479 | * We allocate a pt_regs on the stack when entering the kernel. This |
480 | * ensures the alignment is sane. |
481 | */ |
482 | DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN)); |
483 | |
484 | OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr); |
485 | OFFSET(SBI_HART_BOOT_TASK_PTR_OFFSET, sbi_hart_boot_data, task_ptr); |
486 | OFFSET(SBI_HART_BOOT_STACK_PTR_OFFSET, sbi_hart_boot_data, stack_ptr); |
487 | |
488 | DEFINE(STACKFRAME_SIZE_ON_STACK, ALIGN(sizeof(struct stackframe), STACK_ALIGN)); |
489 | OFFSET(STACKFRAME_FP, stackframe, fp); |
490 | OFFSET(STACKFRAME_RA, stackframe, ra); |
491 | } |
492 | |