1 | //===-- EmulateInstructionARM64.cpp ---------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | |
9 | #include "EmulateInstructionARM64.h" |
10 | |
11 | #include "lldb/Core/Address.h" |
12 | #include "lldb/Core/PluginManager.h" |
13 | #include "lldb/Symbol/UnwindPlan.h" |
14 | #include "lldb/Utility/ArchSpec.h" |
15 | #include "lldb/Utility/RegisterValue.h" |
16 | #include "lldb/Utility/Stream.h" |
17 | |
18 | #include "llvm/Support/CheckedArithmetic.h" |
19 | |
20 | #include "Plugins/Process/Utility/ARMDefines.h" |
21 | #include "Plugins/Process/Utility/ARMUtils.h" |
22 | #include "Plugins/Process/Utility/lldb-arm64-register-enums.h" |
23 | |
24 | #include <algorithm> |
25 | #include <cstdlib> |
26 | #include <optional> |
27 | |
28 | #define GPR_OFFSET(idx) ((idx)*8) |
29 | #define GPR_OFFSET_NAME(reg) 0 |
30 | #define FPU_OFFSET(idx) ((idx)*16) |
31 | #define FPU_OFFSET_NAME(reg) 0 |
32 | #define EXC_OFFSET_NAME(reg) 0 |
33 | #define DBG_OFFSET_NAME(reg) 0 |
34 | #define DBG_OFFSET_NAME(reg) 0 |
35 | #define DEFINE_DBG(re, y) \ |
36 | "na", nullptr, 8, 0, lldb::eEncodingUint, lldb::eFormatHex, \ |
37 | {LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM, \ |
38 | LLDB_INVALID_REGNUM, LLDB_INVALID_REGNUM}, \ |
39 | nullptr, nullptr, nullptr |
40 | |
41 | #define DECLARE_REGISTER_INFOS_ARM64_STRUCT |
42 | |
43 | #include "Plugins/Process/Utility/RegisterInfos_arm64.h" |
44 | |
45 | #include "llvm/ADT/STLExtras.h" |
46 | #include "llvm/Support/MathExtras.h" |
47 | |
48 | #include "Plugins/Process/Utility/InstructionUtils.h" |
49 | |
50 | using namespace lldb; |
51 | using namespace lldb_private; |
52 | |
53 | LLDB_PLUGIN_DEFINE_ADV(EmulateInstructionARM64, InstructionARM64) |
54 | |
55 | static std::optional<RegisterInfo> LLDBTableGetRegisterInfo(uint32_t reg_num) { |
56 | if (reg_num >= std::size(g_register_infos_arm64_le)) |
57 | return {}; |
58 | return g_register_infos_arm64_le[reg_num]; |
59 | } |
60 | |
61 | #define No_VFP 0 |
62 | #define VFPv1 (1u << 1) |
63 | #define VFPv2 (1u << 2) |
64 | #define VFPv3 (1u << 3) |
65 | #define AdvancedSIMD (1u << 4) |
66 | |
67 | #define VFPv1_ABOVE (VFPv1 | VFPv2 | VFPv3 | AdvancedSIMD) |
68 | #define VFPv2_ABOVE (VFPv2 | VFPv3 | AdvancedSIMD) |
69 | #define VFPv2v3 (VFPv2 | VFPv3) |
70 | |
71 | #define UInt(x) ((uint64_t)x) |
72 | #define SInt(x) ((int64_t)x) |
73 | #define bit bool |
74 | #define boolean bool |
75 | #define integer int64_t |
76 | |
77 | static inline bool IsZero(uint64_t x) { return x == 0; } |
78 | |
79 | static inline uint64_t NOT(uint64_t x) { return ~x; } |
80 | |
81 | // LSL() |
82 | // ===== |
83 | |
84 | static inline uint64_t LSL(uint64_t x, integer shift) { |
85 | if (shift == 0) |
86 | return x; |
87 | return x << shift; |
88 | } |
89 | |
90 | // ConstrainUnpredictable() |
91 | // ======================== |
92 | |
93 | EmulateInstructionARM64::ConstraintType |
94 | ConstrainUnpredictable(EmulateInstructionARM64::Unpredictable which) { |
95 | EmulateInstructionARM64::ConstraintType result = |
96 | EmulateInstructionARM64::Constraint_UNKNOWN; |
97 | switch (which) { |
98 | case EmulateInstructionARM64::Unpredictable_WBOVERLAP: |
99 | case EmulateInstructionARM64::Unpredictable_LDPOVERLAP: |
100 | // TODO: don't know what to really do here? Pseudo code says: |
101 | // set result to one of above Constraint behaviours or UNDEFINED |
102 | break; |
103 | } |
104 | return result; |
105 | } |
106 | |
107 | // |
108 | // EmulateInstructionARM implementation |
109 | // |
110 | |
111 | void EmulateInstructionARM64::Initialize() { |
112 | PluginManager::RegisterPlugin(name: GetPluginNameStatic(), |
113 | description: GetPluginDescriptionStatic(), create_callback: CreateInstance); |
114 | } |
115 | |
116 | void EmulateInstructionARM64::Terminate() { |
117 | PluginManager::UnregisterPlugin(create_callback: CreateInstance); |
118 | } |
119 | |
120 | llvm::StringRef EmulateInstructionARM64::GetPluginDescriptionStatic() { |
121 | return "Emulate instructions for the ARM64 architecture." ; |
122 | } |
123 | |
124 | EmulateInstruction * |
125 | EmulateInstructionARM64::CreateInstance(const ArchSpec &arch, |
126 | InstructionType inst_type) { |
127 | if (EmulateInstructionARM64::SupportsEmulatingInstructionsOfTypeStatic( |
128 | inst_type)) { |
129 | if (arch.GetTriple().getArch() == llvm::Triple::aarch64 || |
130 | arch.GetTriple().getArch() == llvm::Triple::aarch64_32) { |
131 | return new EmulateInstructionARM64(arch); |
132 | } |
133 | } |
134 | |
135 | return nullptr; |
136 | } |
137 | |
138 | bool EmulateInstructionARM64::SetTargetTriple(const ArchSpec &arch) { |
139 | if (arch.GetTriple().getArch() == llvm::Triple::arm) |
140 | return true; |
141 | else if (arch.GetTriple().getArch() == llvm::Triple::thumb) |
142 | return true; |
143 | |
144 | return false; |
145 | } |
146 | |
147 | std::optional<RegisterInfo> |
148 | EmulateInstructionARM64::GetRegisterInfo(RegisterKind reg_kind, |
149 | uint32_t reg_num) { |
150 | if (reg_kind == eRegisterKindGeneric) { |
151 | switch (reg_num) { |
152 | case LLDB_REGNUM_GENERIC_PC: |
153 | reg_kind = eRegisterKindLLDB; |
154 | reg_num = gpr_pc_arm64; |
155 | break; |
156 | case LLDB_REGNUM_GENERIC_SP: |
157 | reg_kind = eRegisterKindLLDB; |
158 | reg_num = gpr_sp_arm64; |
159 | break; |
160 | case LLDB_REGNUM_GENERIC_FP: |
161 | reg_kind = eRegisterKindLLDB; |
162 | reg_num = gpr_fp_arm64; |
163 | break; |
164 | case LLDB_REGNUM_GENERIC_RA: |
165 | reg_kind = eRegisterKindLLDB; |
166 | reg_num = gpr_lr_arm64; |
167 | break; |
168 | case LLDB_REGNUM_GENERIC_FLAGS: |
169 | reg_kind = eRegisterKindLLDB; |
170 | reg_num = gpr_cpsr_arm64; |
171 | break; |
172 | |
173 | default: |
174 | return {}; |
175 | } |
176 | } |
177 | |
178 | if (reg_kind == eRegisterKindLLDB) |
179 | return LLDBTableGetRegisterInfo(reg_num); |
180 | return {}; |
181 | } |
182 | |
183 | EmulateInstructionARM64::Opcode * |
184 | EmulateInstructionARM64::GetOpcodeForInstruction(const uint32_t opcode) { |
185 | static EmulateInstructionARM64::Opcode g_opcodes[] = { |
186 | // Prologue instructions |
187 | |
188 | // push register(s) |
189 | {.mask: 0xff000000, .value: 0xd1000000, No_VFP, |
190 | .callback: &EmulateInstructionARM64::EmulateADDSUBImm, |
191 | .name: "SUB <Xd|SP>, <Xn|SP>, #<imm> {, <shift>}" }, |
192 | {.mask: 0xff000000, .value: 0xf1000000, No_VFP, |
193 | .callback: &EmulateInstructionARM64::EmulateADDSUBImm, |
194 | .name: "SUBS <Xd>, <Xn|SP>, #<imm> {, <shift>}" }, |
195 | {.mask: 0xff000000, .value: 0x91000000, No_VFP, |
196 | .callback: &EmulateInstructionARM64::EmulateADDSUBImm, |
197 | .name: "ADD <Xd|SP>, <Xn|SP>, #<imm> {, <shift>}" }, |
198 | {.mask: 0xff000000, .value: 0xb1000000, No_VFP, |
199 | .callback: &EmulateInstructionARM64::EmulateADDSUBImm, |
200 | .name: "ADDS <Xd>, <Xn|SP>, #<imm> {, <shift>}" }, |
201 | |
202 | {.mask: 0xff000000, .value: 0x51000000, No_VFP, |
203 | .callback: &EmulateInstructionARM64::EmulateADDSUBImm, |
204 | .name: "SUB <Wd|WSP>, <Wn|WSP>, #<imm> {, <shift>}" }, |
205 | {.mask: 0xff000000, .value: 0x71000000, No_VFP, |
206 | .callback: &EmulateInstructionARM64::EmulateADDSUBImm, |
207 | .name: "SUBS <Wd>, <Wn|WSP>, #<imm> {, <shift>}" }, |
208 | {.mask: 0xff000000, .value: 0x11000000, No_VFP, |
209 | .callback: &EmulateInstructionARM64::EmulateADDSUBImm, |
210 | .name: "ADD <Wd|WSP>, <Wn|WSP>, #<imm> {, <shift>}" }, |
211 | {.mask: 0xff000000, .value: 0x31000000, No_VFP, |
212 | .callback: &EmulateInstructionARM64::EmulateADDSUBImm, |
213 | .name: "ADDS <Wd>, <Wn|WSP>, #<imm> {, <shift>}" }, |
214 | |
215 | {.mask: 0xffc00000, .value: 0x29000000, No_VFP, |
216 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_OFF>, |
217 | .name: "STP <Wt>, <Wt2>, [<Xn|SP>{, #<imm>}]" }, |
218 | {.mask: 0xffc00000, .value: 0xa9000000, No_VFP, |
219 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_OFF>, |
220 | .name: "STP <Xt>, <Xt2>, [<Xn|SP>{, #<imm>}]" }, |
221 | {.mask: 0xffc00000, .value: 0x2d000000, No_VFP, |
222 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_OFF>, |
223 | .name: "STP <St>, <St2>, [<Xn|SP>{, #<imm>}]" }, |
224 | {.mask: 0xffc00000, .value: 0x6d000000, No_VFP, |
225 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_OFF>, |
226 | .name: "STP <Dt>, <Dt2>, [<Xn|SP>{, #<imm>}]" }, |
227 | {.mask: 0xffc00000, .value: 0xad000000, No_VFP, |
228 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_OFF>, |
229 | .name: "STP <Qt>, <Qt2>, [<Xn|SP>{, #<imm>}]" }, |
230 | |
231 | {.mask: 0xffc00000, .value: 0x29800000, No_VFP, |
232 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_PRE>, |
233 | .name: "STP <Wt>, <Wt2>, [<Xn|SP>, #<imm>]!" }, |
234 | {.mask: 0xffc00000, .value: 0xa9800000, No_VFP, |
235 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_PRE>, |
236 | .name: "STP <Xt>, <Xt2>, [<Xn|SP>, #<imm>]!" }, |
237 | {.mask: 0xffc00000, .value: 0x2d800000, No_VFP, |
238 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_PRE>, |
239 | .name: "STP <St>, <St2>, [<Xn|SP>, #<imm>]!" }, |
240 | {.mask: 0xffc00000, .value: 0x6d800000, No_VFP, |
241 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_PRE>, |
242 | .name: "STP <Dt>, <Dt2>, [<Xn|SP>, #<imm>]!" }, |
243 | {.mask: 0xffc00000, .value: 0xad800000, No_VFP, |
244 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_PRE>, |
245 | .name: "STP <Qt>, <Qt2>, [<Xn|SP>, #<imm>]!" }, |
246 | |
247 | {.mask: 0xffc00000, .value: 0x28800000, No_VFP, |
248 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_POST>, |
249 | .name: "STP <Wt>, <Wt2>, [<Xn|SP>, #<imm>]!" }, |
250 | {.mask: 0xffc00000, .value: 0xa8800000, No_VFP, |
251 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_POST>, |
252 | .name: "STP <Xt>, <Xt2>, [<Xn|SP>, #<imm>]!" }, |
253 | {.mask: 0xffc00000, .value: 0x2c800000, No_VFP, |
254 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_POST>, |
255 | .name: "STP <St>, <St2>, [<Xn|SP>, #<imm>]!" }, |
256 | {.mask: 0xffc00000, .value: 0x6c800000, No_VFP, |
257 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_POST>, |
258 | .name: "STP <Dt>, <Dt2>, [<Xn|SP>, #<imm>]!" }, |
259 | {.mask: 0xffc00000, .value: 0xac800000, No_VFP, |
260 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_POST>, |
261 | .name: "STP <Qt>, <Qt2>, [<Xn|SP>, #<imm>]!" }, |
262 | |
263 | {.mask: 0xffc00000, .value: 0x29400000, No_VFP, |
264 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_OFF>, |
265 | .name: "LDP <Wt>, <Wt2>, [<Xn|SP>{, #<imm>}]" }, |
266 | {.mask: 0xffc00000, .value: 0xa9400000, No_VFP, |
267 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_OFF>, |
268 | .name: "LDP <Xt>, <Xt2>, [<Xn|SP>{, #<imm>}]" }, |
269 | {.mask: 0xffc00000, .value: 0x2d400000, No_VFP, |
270 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_OFF>, |
271 | .name: "LDP <St>, <St2>, [<Xn|SP>{, #<imm>}]" }, |
272 | {.mask: 0xffc00000, .value: 0x6d400000, No_VFP, |
273 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_OFF>, |
274 | .name: "LDP <Dt>, <Dt2>, [<Xn|SP>{, #<imm>}]" }, |
275 | {.mask: 0xffc00000, .value: 0xad400000, No_VFP, |
276 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_OFF>, |
277 | .name: "LDP <Qt>, <Qt2>, [<Xn|SP>{, #<imm>}]" }, |
278 | |
279 | {.mask: 0xffc00000, .value: 0x29c00000, No_VFP, |
280 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_PRE>, |
281 | .name: "LDP <Wt>, <Wt2>, [<Xn|SP>, #<imm>]!" }, |
282 | {.mask: 0xffc00000, .value: 0xa9c00000, No_VFP, |
283 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_PRE>, |
284 | .name: "LDP <Xt>, <Xt2>, [<Xn|SP>, #<imm>]!" }, |
285 | {.mask: 0xffc00000, .value: 0x2dc00000, No_VFP, |
286 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_PRE>, |
287 | .name: "LDP <St>, <St2>, [<Xn|SP>, #<imm>]!" }, |
288 | {.mask: 0xffc00000, .value: 0x6dc00000, No_VFP, |
289 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_PRE>, |
290 | .name: "LDP <Dt>, <Dt2>, [<Xn|SP>, #<imm>]!" }, |
291 | {.mask: 0xffc00000, .value: 0xadc00000, No_VFP, |
292 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_PRE>, |
293 | .name: "LDP <Qt>, <Qt2>, [<Xn|SP>, #<imm>]!" }, |
294 | |
295 | {.mask: 0xffc00000, .value: 0x28c00000, No_VFP, |
296 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_POST>, |
297 | .name: "LDP <Wt>, <Wt2>, [<Xn|SP>, #<imm>]!" }, |
298 | {.mask: 0xffc00000, .value: 0xa8c00000, No_VFP, |
299 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_POST>, |
300 | .name: "LDP <Xt>, <Xt2>, [<Xn|SP>, #<imm>]!" }, |
301 | {.mask: 0xffc00000, .value: 0x2cc00000, No_VFP, |
302 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_POST>, |
303 | .name: "LDP <St>, <St2>, [<Xn|SP>, #<imm>]!" }, |
304 | {.mask: 0xffc00000, .value: 0x6cc00000, No_VFP, |
305 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_POST>, |
306 | .name: "LDP <Dt>, <Dt2>, [<Xn|SP>, #<imm>]!" }, |
307 | {.mask: 0xffc00000, .value: 0xacc00000, No_VFP, |
308 | .callback: &EmulateInstructionARM64::EmulateLDPSTP<AddrMode_POST>, |
309 | .name: "LDP <Qt>, <Qt2>, [<Xn|SP>, #<imm>]!" }, |
310 | |
311 | {.mask: 0xffe00c00, .value: 0xb8000400, No_VFP, |
312 | .callback: &EmulateInstructionARM64::EmulateLDRSTRImm<AddrMode_POST>, |
313 | .name: "STR <Wt>, [<Xn|SP>], #<simm>" }, |
314 | {.mask: 0xffe00c00, .value: 0xf8000400, No_VFP, |
315 | .callback: &EmulateInstructionARM64::EmulateLDRSTRImm<AddrMode_POST>, |
316 | .name: "STR <Xt>, [<Xn|SP>], #<simm>" }, |
317 | {.mask: 0xffe00c00, .value: 0xb8000c00, No_VFP, |
318 | .callback: &EmulateInstructionARM64::EmulateLDRSTRImm<AddrMode_PRE>, |
319 | .name: "STR <Wt>, [<Xn|SP>, #<simm>]!" }, |
320 | {.mask: 0xffe00c00, .value: 0xf8000c00, No_VFP, |
321 | .callback: &EmulateInstructionARM64::EmulateLDRSTRImm<AddrMode_PRE>, |
322 | .name: "STR <Xt>, [<Xn|SP>, #<simm>]!" }, |
323 | {.mask: 0xffc00000, .value: 0xb9000000, No_VFP, |
324 | .callback: &EmulateInstructionARM64::EmulateLDRSTRImm<AddrMode_OFF>, |
325 | .name: "STR <Wt>, [<Xn|SP>{, #<pimm>}]" }, |
326 | {.mask: 0xffc00000, .value: 0xf9000000, No_VFP, |
327 | .callback: &EmulateInstructionARM64::EmulateLDRSTRImm<AddrMode_OFF>, |
328 | .name: "STR <Xt>, [<Xn|SP>{, #<pimm>}]" }, |
329 | |
330 | {.mask: 0xffe00c00, .value: 0xb8400400, No_VFP, |
331 | .callback: &EmulateInstructionARM64::EmulateLDRSTRImm<AddrMode_POST>, |
332 | .name: "LDR <Wt>, [<Xn|SP>], #<simm>" }, |
333 | {.mask: 0xffe00c00, .value: 0xf8400400, No_VFP, |
334 | .callback: &EmulateInstructionARM64::EmulateLDRSTRImm<AddrMode_POST>, |
335 | .name: "LDR <Xt>, [<Xn|SP>], #<simm>" }, |
336 | {.mask: 0xffe00c00, .value: 0xb8400c00, No_VFP, |
337 | .callback: &EmulateInstructionARM64::EmulateLDRSTRImm<AddrMode_PRE>, |
338 | .name: "LDR <Wt>, [<Xn|SP>, #<simm>]!" }, |
339 | {.mask: 0xffe00c00, .value: 0xf8400c00, No_VFP, |
340 | .callback: &EmulateInstructionARM64::EmulateLDRSTRImm<AddrMode_PRE>, |
341 | .name: "LDR <Xt>, [<Xn|SP>, #<simm>]!" }, |
342 | {.mask: 0xffc00000, .value: 0xb9400000, No_VFP, |
343 | .callback: &EmulateInstructionARM64::EmulateLDRSTRImm<AddrMode_OFF>, |
344 | .name: "LDR <Wt>, [<Xn|SP>{, #<pimm>}]" }, |
345 | {.mask: 0xffc00000, .value: 0xf9400000, No_VFP, |
346 | .callback: &EmulateInstructionARM64::EmulateLDRSTRImm<AddrMode_OFF>, |
347 | .name: "LDR <Xt>, [<Xn|SP>{, #<pimm>}]" }, |
348 | |
349 | {.mask: 0xfc000000, .value: 0x14000000, No_VFP, .callback: &EmulateInstructionARM64::EmulateB, |
350 | .name: "B <label>" }, |
351 | {.mask: 0xff000010, .value: 0x54000000, No_VFP, .callback: &EmulateInstructionARM64::EmulateBcond, |
352 | .name: "B.<cond> <label>" }, |
353 | {.mask: 0x7f000000, .value: 0x34000000, No_VFP, .callback: &EmulateInstructionARM64::EmulateCBZ, |
354 | .name: "CBZ <Wt>, <label>" }, |
355 | {.mask: 0x7f000000, .value: 0x35000000, No_VFP, .callback: &EmulateInstructionARM64::EmulateCBZ, |
356 | .name: "CBNZ <Wt>, <label>" }, |
357 | {.mask: 0x7f000000, .value: 0x36000000, No_VFP, .callback: &EmulateInstructionARM64::EmulateTBZ, |
358 | .name: "TBZ <R><t>, #<imm>, <label>" }, |
359 | {.mask: 0x7f000000, .value: 0x37000000, No_VFP, .callback: &EmulateInstructionARM64::EmulateTBZ, |
360 | .name: "TBNZ <R><t>, #<imm>, <label>" }, |
361 | |
362 | }; |
363 | static const size_t k_num_arm_opcodes = std::size(g_opcodes); |
364 | |
365 | for (size_t i = 0; i < k_num_arm_opcodes; ++i) { |
366 | if ((g_opcodes[i].mask & opcode) == g_opcodes[i].value) |
367 | return &g_opcodes[i]; |
368 | } |
369 | return nullptr; |
370 | } |
371 | |
372 | bool EmulateInstructionARM64::ReadInstruction() { |
373 | bool success = false; |
374 | m_addr = ReadRegisterUnsigned(reg_kind: eRegisterKindGeneric, LLDB_REGNUM_GENERIC_PC, |
375 | LLDB_INVALID_ADDRESS, success_ptr: &success); |
376 | if (success) { |
377 | Context read_inst_context; |
378 | read_inst_context.type = eContextReadOpcode; |
379 | read_inst_context.SetNoArgs(); |
380 | m_opcode.SetOpcode32( |
381 | inst: ReadMemoryUnsigned(context: read_inst_context, addr: m_addr, byte_size: 4, fail_value: 0, success_ptr: &success), |
382 | order: GetByteOrder()); |
383 | } |
384 | if (!success) |
385 | m_addr = LLDB_INVALID_ADDRESS; |
386 | return success; |
387 | } |
388 | |
389 | bool EmulateInstructionARM64::EvaluateInstruction(uint32_t evaluate_options) { |
390 | const uint32_t opcode = m_opcode.GetOpcode32(); |
391 | Opcode *opcode_data = GetOpcodeForInstruction(opcode); |
392 | if (opcode_data == nullptr) |
393 | return false; |
394 | |
395 | const bool auto_advance_pc = |
396 | evaluate_options & eEmulateInstructionOptionAutoAdvancePC; |
397 | m_ignore_conditions = |
398 | evaluate_options & eEmulateInstructionOptionIgnoreConditions; |
399 | |
400 | bool success = false; |
401 | |
402 | // Only return false if we are unable to read the CPSR if we care about |
403 | // conditions |
404 | if (!success && !m_ignore_conditions) |
405 | return false; |
406 | |
407 | uint32_t orig_pc_value = 0; |
408 | if (auto_advance_pc) { |
409 | orig_pc_value = |
410 | ReadRegisterUnsigned(reg_kind: eRegisterKindLLDB, reg_num: gpr_pc_arm64, fail_value: 0, success_ptr: &success); |
411 | if (!success) |
412 | return false; |
413 | } |
414 | |
415 | // Call the Emulate... function. |
416 | success = (this->*opcode_data->callback)(opcode); |
417 | if (!success) |
418 | return false; |
419 | |
420 | if (auto_advance_pc) { |
421 | uint32_t new_pc_value = |
422 | ReadRegisterUnsigned(reg_kind: eRegisterKindLLDB, reg_num: gpr_pc_arm64, fail_value: 0, success_ptr: &success); |
423 | if (!success) |
424 | return false; |
425 | |
426 | if (new_pc_value == orig_pc_value) { |
427 | EmulateInstruction::Context context; |
428 | context.type = eContextAdvancePC; |
429 | context.SetNoArgs(); |
430 | if (!WriteRegisterUnsigned(context, reg_kind: eRegisterKindLLDB, reg_num: gpr_pc_arm64, |
431 | reg_value: orig_pc_value + 4)) |
432 | return false; |
433 | } |
434 | } |
435 | return true; |
436 | } |
437 | |
438 | bool EmulateInstructionARM64::CreateFunctionEntryUnwind( |
439 | UnwindPlan &unwind_plan) { |
440 | unwind_plan.Clear(); |
441 | unwind_plan.SetRegisterKind(eRegisterKindLLDB); |
442 | |
443 | UnwindPlan::RowSP row(new UnwindPlan::Row); |
444 | |
445 | // Our previous Call Frame Address is the stack pointer |
446 | row->GetCFAValue().SetIsRegisterPlusOffset(reg_num: gpr_sp_arm64, offset: 0); |
447 | |
448 | unwind_plan.AppendRow(row_sp: row); |
449 | unwind_plan.SetSourceName("EmulateInstructionARM64" ); |
450 | unwind_plan.SetSourcedFromCompiler(eLazyBoolNo); |
451 | unwind_plan.SetUnwindPlanValidAtAllInstructions(eLazyBoolYes); |
452 | unwind_plan.SetUnwindPlanForSignalTrap(eLazyBoolNo); |
453 | unwind_plan.SetReturnAddressRegister(gpr_lr_arm64); |
454 | return true; |
455 | } |
456 | |
457 | uint32_t EmulateInstructionARM64::GetFramePointerRegisterNumber() const { |
458 | if (m_arch.GetTriple().isAndroid()) |
459 | return LLDB_INVALID_REGNUM; // Don't use frame pointer on android |
460 | |
461 | return gpr_fp_arm64; |
462 | } |
463 | |
464 | bool EmulateInstructionARM64::UsingAArch32() { |
465 | bool aarch32 = m_opcode_pstate.RW == 1; |
466 | // if !HaveAnyAArch32() then assert !aarch32; |
467 | // if HighestELUsingAArch32() then assert aarch32; |
468 | return aarch32; |
469 | } |
470 | |
471 | bool EmulateInstructionARM64::BranchTo(const Context &context, uint32_t N, |
472 | addr_t target) { |
473 | #if 0 |
474 | // Set program counter to a new address, with a branch reason hint for |
475 | // possible use by hardware fetching the next instruction. |
476 | BranchTo(bits(N) target, BranchType branch_type) |
477 | Hint_Branch(branch_type); |
478 | if N == 32 then |
479 | assert UsingAArch32(); |
480 | _PC = ZeroExtend(target); |
481 | else |
482 | assert N == 64 && !UsingAArch32(); |
483 | // Remove the tag bits from a tagged target |
484 | case PSTATE.EL of |
485 | when EL0, EL1 |
486 | if target<55> == '1' && TCR_EL1.TBI1 == '1' then |
487 | target<63:56> = '11111111'; |
488 | if target<55> == '0' && TCR_EL1.TBI0 == '1' then |
489 | target<63:56> = '00000000'; |
490 | when EL2 |
491 | if TCR_EL2.TBI == '1' then |
492 | target<63:56> = '00000000'; |
493 | when EL3 |
494 | if TCR_EL3.TBI == '1' then |
495 | target<63:56> = '00000000'; |
496 | _PC = target<63:0>; |
497 | return; |
498 | #endif |
499 | |
500 | addr_t addr; |
501 | |
502 | // Hint_Branch(branch_type); |
503 | if (N == 32) { |
504 | if (!UsingAArch32()) |
505 | return false; |
506 | addr = target; |
507 | } else if (N == 64) { |
508 | if (UsingAArch32()) |
509 | return false; |
510 | // TODO: Remove the tag bits from a tagged target |
511 | addr = target; |
512 | } else |
513 | return false; |
514 | |
515 | return WriteRegisterUnsigned(context, reg_kind: eRegisterKindGeneric, |
516 | LLDB_REGNUM_GENERIC_PC, reg_value: addr); |
517 | } |
518 | |
519 | bool EmulateInstructionARM64::ConditionHolds(const uint32_t cond) { |
520 | // If we are ignoring conditions, then always return true. this allows us to |
521 | // iterate over disassembly code and still emulate an instruction even if we |
522 | // don't have all the right bits set in the CPSR register... |
523 | if (m_ignore_conditions) |
524 | return true; |
525 | |
526 | bool result = false; |
527 | switch (UnsignedBits(value: cond, msbit: 3, lsbit: 1)) { |
528 | case 0: |
529 | result = (m_opcode_pstate.Z == 1); |
530 | break; |
531 | case 1: |
532 | result = (m_opcode_pstate.C == 1); |
533 | break; |
534 | case 2: |
535 | result = (m_opcode_pstate.N == 1); |
536 | break; |
537 | case 3: |
538 | result = (m_opcode_pstate.V == 1); |
539 | break; |
540 | case 4: |
541 | result = (m_opcode_pstate.C == 1 && m_opcode_pstate.Z == 0); |
542 | break; |
543 | case 5: |
544 | result = (m_opcode_pstate.N == m_opcode_pstate.V); |
545 | break; |
546 | case 6: |
547 | result = (m_opcode_pstate.N == m_opcode_pstate.V && m_opcode_pstate.Z == 0); |
548 | break; |
549 | case 7: |
550 | // Always execute (cond == 0b1110, or the special 0b1111 which gives |
551 | // opcodes different meanings, but always means execution happens. |
552 | return true; |
553 | } |
554 | |
555 | if (cond & 1) |
556 | result = !result; |
557 | return result; |
558 | } |
559 | |
560 | uint64_t EmulateInstructionARM64:: |
561 | AddWithCarry(uint32_t N, uint64_t x, uint64_t y, bit carry_in, |
562 | EmulateInstructionARM64::ProcState &proc_state) { |
563 | uint64_t unsigned_sum = UInt(x) + UInt(y) + UInt(carry_in); |
564 | std::optional<int64_t> signed_sum = llvm::checkedAdd(SInt(x), SInt(y)); |
565 | bool overflow = !signed_sum; |
566 | if (!overflow) |
567 | overflow |= !llvm::checkedAdd(LHS: *signed_sum, SInt(carry_in)); |
568 | uint64_t result = unsigned_sum; |
569 | if (N < 64) |
570 | result = Bits64(bits: result, msbit: N - 1, lsbit: 0); |
571 | proc_state.N = Bit64(bits: result, bit: N - 1); |
572 | proc_state.Z = IsZero(x: result); |
573 | proc_state.C = UInt(result) != unsigned_sum; |
574 | proc_state.V = overflow; |
575 | return result; |
576 | } |
577 | |
578 | bool EmulateInstructionARM64::EmulateADDSUBImm(const uint32_t opcode) { |
579 | // integer d = UInt(Rd); |
580 | // integer n = UInt(Rn); |
581 | // integer datasize = if sf == 1 then 64 else 32; |
582 | // boolean sub_op = (op == 1); |
583 | // boolean setflags = (S == 1); |
584 | // bits(datasize) imm; |
585 | // |
586 | // case shift of |
587 | // when '00' imm = ZeroExtend(imm12, datasize); |
588 | // when '01' imm = ZeroExtend(imm12 : Zeros(12), datasize); |
589 | // when '1x' UNDEFINED; |
590 | // |
591 | // |
592 | // bits(datasize) result; |
593 | // bits(datasize) operand1 = if n == 31 then SP[] else X[n]; |
594 | // bits(datasize) operand2 = imm; |
595 | // bits(4) nzcv; |
596 | // bit carry_in; |
597 | // |
598 | // if sub_op then |
599 | // operand2 = NOT(operand2); |
600 | // carry_in = 1; |
601 | // else |
602 | // carry_in = 0; |
603 | // |
604 | // (result, nzcv) = AddWithCarry(operand1, operand2, carry_in); |
605 | // |
606 | // if setflags then |
607 | // PSTATE.NZCV = nzcv; |
608 | // |
609 | // if d == 31 && !setflags then |
610 | // SP[] = result; |
611 | // else |
612 | // X[d] = result; |
613 | |
614 | const uint32_t sf = Bit32(bits: opcode, bit: 31); |
615 | const uint32_t op = Bit32(bits: opcode, bit: 30); |
616 | const uint32_t S = Bit32(bits: opcode, bit: 29); |
617 | const uint32_t shift = Bits32(bits: opcode, msbit: 23, lsbit: 22); |
618 | const uint32_t imm12 = Bits32(bits: opcode, msbit: 21, lsbit: 10); |
619 | const uint32_t Rn = Bits32(bits: opcode, msbit: 9, lsbit: 5); |
620 | const uint32_t Rd = Bits32(bits: opcode, msbit: 4, lsbit: 0); |
621 | |
622 | bool success = false; |
623 | |
624 | const uint32_t d = UInt(Rd); |
625 | const uint32_t n = UInt(Rn); |
626 | const uint32_t datasize = (sf == 1) ? 64 : 32; |
627 | boolean sub_op = op == 1; |
628 | boolean setflags = S == 1; |
629 | uint64_t imm; |
630 | |
631 | switch (shift) { |
632 | case 0: |
633 | imm = imm12; |
634 | break; |
635 | case 1: |
636 | imm = static_cast<uint64_t>(imm12) << 12; |
637 | break; |
638 | default: |
639 | return false; // UNDEFINED; |
640 | } |
641 | uint64_t result; |
642 | uint64_t operand1 = |
643 | ReadRegisterUnsigned(reg_kind: eRegisterKindLLDB, reg_num: gpr_x0_arm64 + n, fail_value: 0, success_ptr: &success); |
644 | uint64_t operand2 = imm; |
645 | bit carry_in; |
646 | |
647 | if (sub_op) { |
648 | operand2 = NOT(x: operand2); |
649 | carry_in = true; |
650 | imm = -imm; // For the Register plug offset context below |
651 | } else { |
652 | carry_in = false; |
653 | } |
654 | |
655 | ProcState proc_state; |
656 | |
657 | result = AddWithCarry(N: datasize, x: operand1, y: operand2, carry_in, proc_state); |
658 | |
659 | if (setflags) { |
660 | m_emulated_pstate.N = proc_state.N; |
661 | m_emulated_pstate.Z = proc_state.Z; |
662 | m_emulated_pstate.C = proc_state.C; |
663 | m_emulated_pstate.V = proc_state.V; |
664 | } |
665 | |
666 | Context context; |
667 | std::optional<RegisterInfo> reg_info_Rn = |
668 | GetRegisterInfo(reg_kind: eRegisterKindLLDB, reg_num: n); |
669 | if (reg_info_Rn) |
670 | context.SetRegisterPlusOffset(base_reg: *reg_info_Rn, signed_offset: imm); |
671 | |
672 | if (n == GetFramePointerRegisterNumber() && d == gpr_sp_arm64 && !setflags) { |
673 | // 'mov sp, fp' - common epilogue instruction, CFA is now in terms of the |
674 | // stack pointer, instead of frame pointer. |
675 | context.type = EmulateInstruction::eContextRestoreStackPointer; |
676 | } else if ((n == gpr_sp_arm64 || n == GetFramePointerRegisterNumber()) && |
677 | d == gpr_sp_arm64 && !setflags) { |
678 | context.type = EmulateInstruction::eContextAdjustStackPointer; |
679 | } else if (d == GetFramePointerRegisterNumber() && n == gpr_sp_arm64 && |
680 | !setflags) { |
681 | context.type = EmulateInstruction::eContextSetFramePointer; |
682 | } else { |
683 | context.type = EmulateInstruction::eContextImmediate; |
684 | } |
685 | |
686 | // If setflags && d == gpr_sp_arm64 then d = WZR/XZR. See CMN, CMP |
687 | if (!setflags || d != gpr_sp_arm64) |
688 | WriteRegisterUnsigned(context, reg_kind: eRegisterKindLLDB, reg_num: gpr_x0_arm64 + d, reg_value: result); |
689 | |
690 | return false; |
691 | } |
692 | |
693 | template <EmulateInstructionARM64::AddrMode a_mode> |
694 | bool EmulateInstructionARM64::EmulateLDPSTP(const uint32_t opcode) { |
695 | uint32_t opc = Bits32(bits: opcode, msbit: 31, lsbit: 30); |
696 | uint32_t V = Bit32(bits: opcode, bit: 26); |
697 | uint32_t L = Bit32(bits: opcode, bit: 22); |
698 | uint32_t imm7 = Bits32(bits: opcode, msbit: 21, lsbit: 15); |
699 | uint32_t Rt2 = Bits32(bits: opcode, msbit: 14, lsbit: 10); |
700 | uint32_t Rn = Bits32(bits: opcode, msbit: 9, lsbit: 5); |
701 | uint32_t Rt = Bits32(bits: opcode, msbit: 4, lsbit: 0); |
702 | |
703 | integer n = UInt(Rn); |
704 | integer t = UInt(Rt); |
705 | integer t2 = UInt(Rt2); |
706 | uint64_t idx; |
707 | |
708 | MemOp memop = L == 1 ? MemOp_LOAD : MemOp_STORE; |
709 | boolean vector = (V == 1); |
710 | // AccType acctype = AccType_NORMAL; |
711 | boolean is_signed = false; |
712 | boolean wback = a_mode != AddrMode_OFF; |
713 | boolean wb_unknown = false; |
714 | boolean rt_unknown = false; |
715 | integer scale; |
716 | integer size; |
717 | |
718 | if (opc == 3) |
719 | return false; // UNDEFINED |
720 | |
721 | if (vector) { |
722 | scale = 2 + UInt(opc); |
723 | } else { |
724 | scale = (opc & 2) ? 3 : 2; |
725 | is_signed = (opc & 1) != 0; |
726 | if (is_signed && memop == MemOp_STORE) |
727 | return false; // UNDEFINED |
728 | } |
729 | |
730 | if (!vector && wback && ((t == n) || (t2 == n))) { |
731 | switch (ConstrainUnpredictable(which: Unpredictable_WBOVERLAP)) { |
732 | case Constraint_UNKNOWN: |
733 | wb_unknown = true; // writeback is UNKNOWN |
734 | break; |
735 | |
736 | case Constraint_SUPPRESSWB: |
737 | wback = false; // writeback is suppressed |
738 | break; |
739 | |
740 | case Constraint_NOP: |
741 | memop = MemOp_NOP; // do nothing |
742 | wback = false; |
743 | break; |
744 | |
745 | case Constraint_NONE: |
746 | break; |
747 | } |
748 | } |
749 | |
750 | if (memop == MemOp_LOAD && t == t2) { |
751 | switch (ConstrainUnpredictable(which: Unpredictable_LDPOVERLAP)) { |
752 | case Constraint_UNKNOWN: |
753 | rt_unknown = true; // result is UNKNOWN |
754 | break; |
755 | |
756 | case Constraint_NOP: |
757 | memop = MemOp_NOP; // do nothing |
758 | wback = false; |
759 | break; |
760 | |
761 | default: |
762 | break; |
763 | } |
764 | } |
765 | |
766 | idx = LSL(x: llvm::SignExtend64<7>(x: imm7), shift: scale); |
767 | size = (integer)1 << scale; |
768 | uint64_t datasize = size * 8; |
769 | uint64_t address; |
770 | uint64_t wb_address; |
771 | |
772 | std::optional<RegisterInfo> reg_info_base = |
773 | GetRegisterInfo(reg_kind: eRegisterKindLLDB, reg_num: gpr_x0_arm64 + n); |
774 | if (!reg_info_base) |
775 | return false; |
776 | |
777 | std::optional<RegisterInfo> reg_info_Rt; |
778 | std::optional<RegisterInfo> reg_info_Rt2; |
779 | |
780 | if (vector) { |
781 | reg_info_Rt = GetRegisterInfo(reg_kind: eRegisterKindLLDB, reg_num: fpu_d0_arm64 + t); |
782 | reg_info_Rt2 = GetRegisterInfo(reg_kind: eRegisterKindLLDB, reg_num: fpu_d0_arm64 + t2); |
783 | } else { |
784 | reg_info_Rt = GetRegisterInfo(reg_kind: eRegisterKindLLDB, reg_num: gpr_x0_arm64 + t); |
785 | reg_info_Rt2 = GetRegisterInfo(reg_kind: eRegisterKindLLDB, reg_num: gpr_x0_arm64 + t2); |
786 | } |
787 | |
788 | if (!reg_info_Rt || !reg_info_Rt2) |
789 | return false; |
790 | |
791 | bool success = false; |
792 | if (n == 31) { |
793 | // CheckSPAlignment(); |
794 | address = |
795 | ReadRegisterUnsigned(reg_kind: eRegisterKindLLDB, reg_num: gpr_sp_arm64, fail_value: 0, success_ptr: &success); |
796 | } else |
797 | address = |
798 | ReadRegisterUnsigned(reg_kind: eRegisterKindLLDB, reg_num: gpr_x0_arm64 + n, fail_value: 0, success_ptr: &success); |
799 | |
800 | wb_address = address + idx; |
801 | if (a_mode != AddrMode_POST) |
802 | address = wb_address; |
803 | |
804 | Context context_t; |
805 | Context context_t2; |
806 | |
807 | RegisterValue::BytesContainer buffer; |
808 | Status error; |
809 | |
810 | switch (memop) { |
811 | case MemOp_STORE: { |
812 | if (n == 31 || n == GetFramePointerRegisterNumber()) // if this store is |
813 | // based off of the sp |
814 | // or fp register |
815 | { |
816 | context_t.type = eContextPushRegisterOnStack; |
817 | context_t2.type = eContextPushRegisterOnStack; |
818 | } else { |
819 | context_t.type = eContextRegisterStore; |
820 | context_t2.type = eContextRegisterStore; |
821 | } |
822 | context_t.SetRegisterToRegisterPlusOffset(data_reg: *reg_info_Rt, base_reg: *reg_info_base, offset: 0); |
823 | context_t2.SetRegisterToRegisterPlusOffset(data_reg: *reg_info_Rt2, base_reg: *reg_info_base, |
824 | offset: size); |
825 | |
826 | std::optional<RegisterValue> data_Rt = ReadRegister(reg_info: *reg_info_Rt); |
827 | if (!data_Rt) |
828 | return false; |
829 | |
830 | buffer.resize(N: reg_info_Rt->byte_size); |
831 | if (data_Rt->GetAsMemoryData(reg_info: *reg_info_Rt, dst: buffer.data(), |
832 | dst_len: reg_info_Rt->byte_size, dst_byte_order: eByteOrderLittle, |
833 | error) == 0) |
834 | return false; |
835 | |
836 | if (!WriteMemory(context: context_t, addr: address + 0, src: buffer.data(), |
837 | src_len: reg_info_Rt->byte_size)) |
838 | return false; |
839 | |
840 | std::optional<RegisterValue> data_Rt2 = ReadRegister(reg_info: *reg_info_Rt2); |
841 | if (!data_Rt2) |
842 | return false; |
843 | |
844 | buffer.resize(N: reg_info_Rt2->byte_size); |
845 | if (data_Rt2->GetAsMemoryData(reg_info: *reg_info_Rt2, dst: buffer.data(), |
846 | dst_len: reg_info_Rt2->byte_size, dst_byte_order: eByteOrderLittle, |
847 | error) == 0) |
848 | return false; |
849 | |
850 | if (!WriteMemory(context: context_t2, addr: address + size, src: buffer.data(), |
851 | src_len: reg_info_Rt2->byte_size)) |
852 | return false; |
853 | } break; |
854 | |
855 | case MemOp_LOAD: { |
856 | if (n == 31 || n == GetFramePointerRegisterNumber()) // if this load is |
857 | // based off of the sp |
858 | // or fp register |
859 | { |
860 | context_t.type = eContextPopRegisterOffStack; |
861 | context_t2.type = eContextPopRegisterOffStack; |
862 | } else { |
863 | context_t.type = eContextRegisterLoad; |
864 | context_t2.type = eContextRegisterLoad; |
865 | } |
866 | context_t.SetAddress(address); |
867 | context_t2.SetAddress(address + size); |
868 | |
869 | buffer.resize(N: reg_info_Rt->byte_size); |
870 | if (rt_unknown) |
871 | std::fill(first: buffer.begin(), last: buffer.end(), value: 'U'); |
872 | else { |
873 | if (!ReadMemory(context: context_t, addr: address, dst: buffer.data(), |
874 | dst_len: reg_info_Rt->byte_size)) |
875 | return false; |
876 | } |
877 | |
878 | RegisterValue data_Rt; |
879 | if (data_Rt.SetFromMemoryData(reg_info: *reg_info_Rt, src: buffer.data(), |
880 | src_len: reg_info_Rt->byte_size, src_byte_order: eByteOrderLittle, |
881 | error) == 0) |
882 | return false; |
883 | |
884 | if (!vector && is_signed && !data_Rt.SignExtend(sign_bitpos: datasize)) |
885 | return false; |
886 | |
887 | if (!WriteRegister(context: context_t, ref_info: *reg_info_Rt, reg_value: data_Rt)) |
888 | return false; |
889 | |
890 | buffer.resize(N: reg_info_Rt2->byte_size); |
891 | if (!rt_unknown) |
892 | if (!ReadMemory(context: context_t2, addr: address + size, dst: buffer.data(), |
893 | dst_len: reg_info_Rt2->byte_size)) |
894 | return false; |
895 | |
896 | RegisterValue data_Rt2; |
897 | if (data_Rt2.SetFromMemoryData(reg_info: *reg_info_Rt2, src: buffer.data(), |
898 | src_len: reg_info_Rt2->byte_size, src_byte_order: eByteOrderLittle, |
899 | error) == 0) |
900 | return false; |
901 | |
902 | if (!vector && is_signed && !data_Rt2.SignExtend(sign_bitpos: datasize)) |
903 | return false; |
904 | |
905 | if (!WriteRegister(context: context_t2, ref_info: *reg_info_Rt2, reg_value: data_Rt2)) |
906 | return false; |
907 | } break; |
908 | |
909 | default: |
910 | break; |
911 | } |
912 | |
913 | if (wback) { |
914 | if (wb_unknown) |
915 | wb_address = LLDB_INVALID_ADDRESS; |
916 | Context context; |
917 | context.SetImmediateSigned(idx); |
918 | if (n == 31) |
919 | context.type = eContextAdjustStackPointer; |
920 | else |
921 | context.type = eContextAdjustBaseRegister; |
922 | WriteRegisterUnsigned(context, reg_info: *reg_info_base, reg_value: wb_address); |
923 | } |
924 | return true; |
925 | } |
926 | |
927 | template <EmulateInstructionARM64::AddrMode a_mode> |
928 | bool EmulateInstructionARM64::EmulateLDRSTRImm(const uint32_t opcode) { |
929 | uint32_t size = Bits32(bits: opcode, msbit: 31, lsbit: 30); |
930 | uint32_t opc = Bits32(bits: opcode, msbit: 23, lsbit: 22); |
931 | uint32_t n = Bits32(bits: opcode, msbit: 9, lsbit: 5); |
932 | uint32_t t = Bits32(bits: opcode, msbit: 4, lsbit: 0); |
933 | |
934 | bool wback; |
935 | bool postindex; |
936 | uint64_t offset; |
937 | |
938 | switch (a_mode) { |
939 | case AddrMode_POST: |
940 | wback = true; |
941 | postindex = true; |
942 | offset = llvm::SignExtend64<9>(x: Bits32(bits: opcode, msbit: 20, lsbit: 12)); |
943 | break; |
944 | case AddrMode_PRE: |
945 | wback = true; |
946 | postindex = false; |
947 | offset = llvm::SignExtend64<9>(x: Bits32(bits: opcode, msbit: 20, lsbit: 12)); |
948 | break; |
949 | case AddrMode_OFF: |
950 | wback = false; |
951 | postindex = false; |
952 | offset = LSL(x: Bits32(bits: opcode, msbit: 21, lsbit: 10), shift: size); |
953 | break; |
954 | } |
955 | |
956 | MemOp memop; |
957 | |
958 | if (Bit32(bits: opc, bit: 1) == 0) { |
959 | memop = Bit32(bits: opc, bit: 0) == 1 ? MemOp_LOAD : MemOp_STORE; |
960 | } else { |
961 | memop = MemOp_LOAD; |
962 | if (size == 2 && Bit32(bits: opc, bit: 0) == 1) |
963 | return false; |
964 | } |
965 | |
966 | Status error; |
967 | bool success = false; |
968 | uint64_t address; |
969 | RegisterValue::BytesContainer buffer; |
970 | |
971 | if (n == 31) |
972 | address = |
973 | ReadRegisterUnsigned(reg_kind: eRegisterKindLLDB, reg_num: gpr_sp_arm64, fail_value: 0, success_ptr: &success); |
974 | else |
975 | address = |
976 | ReadRegisterUnsigned(reg_kind: eRegisterKindLLDB, reg_num: gpr_x0_arm64 + n, fail_value: 0, success_ptr: &success); |
977 | |
978 | if (!success) |
979 | return false; |
980 | |
981 | if (!postindex) |
982 | address += offset; |
983 | |
984 | std::optional<RegisterInfo> reg_info_base = |
985 | GetRegisterInfo(reg_kind: eRegisterKindLLDB, reg_num: gpr_x0_arm64 + n); |
986 | if (!reg_info_base) |
987 | return false; |
988 | |
989 | std::optional<RegisterInfo> reg_info_Rt = |
990 | GetRegisterInfo(reg_kind: eRegisterKindLLDB, reg_num: gpr_x0_arm64 + t); |
991 | if (!reg_info_Rt) |
992 | return false; |
993 | |
994 | Context context; |
995 | switch (memop) { |
996 | case MemOp_STORE: { |
997 | if (n == 31 || n == GetFramePointerRegisterNumber()) // if this store is |
998 | // based off of the sp |
999 | // or fp register |
1000 | context.type = eContextPushRegisterOnStack; |
1001 | else |
1002 | context.type = eContextRegisterStore; |
1003 | context.SetRegisterToRegisterPlusOffset(data_reg: *reg_info_Rt, base_reg: *reg_info_base, |
1004 | offset: postindex ? 0 : offset); |
1005 | |
1006 | std::optional<RegisterValue> data_Rt = ReadRegister(reg_info: *reg_info_Rt); |
1007 | if (!data_Rt) |
1008 | return false; |
1009 | |
1010 | buffer.resize(N: reg_info_Rt->byte_size); |
1011 | if (data_Rt->GetAsMemoryData(reg_info: *reg_info_Rt, dst: buffer.data(), |
1012 | dst_len: reg_info_Rt->byte_size, dst_byte_order: eByteOrderLittle, |
1013 | error) == 0) |
1014 | return false; |
1015 | |
1016 | if (!WriteMemory(context, addr: address, src: buffer.data(), src_len: reg_info_Rt->byte_size)) |
1017 | return false; |
1018 | } break; |
1019 | |
1020 | case MemOp_LOAD: { |
1021 | if (n == 31 || n == GetFramePointerRegisterNumber()) // if this store is |
1022 | // based off of the sp |
1023 | // or fp register |
1024 | context.type = eContextPopRegisterOffStack; |
1025 | else |
1026 | context.type = eContextRegisterLoad; |
1027 | context.SetAddress(address); |
1028 | |
1029 | buffer.resize(N: reg_info_Rt->byte_size); |
1030 | if (!ReadMemory(context, addr: address, dst: buffer.data(), dst_len: reg_info_Rt->byte_size)) |
1031 | return false; |
1032 | |
1033 | RegisterValue data_Rt; |
1034 | if (data_Rt.SetFromMemoryData(reg_info: *reg_info_Rt, src: buffer.data(), |
1035 | src_len: reg_info_Rt->byte_size, src_byte_order: eByteOrderLittle, |
1036 | error) == 0) |
1037 | return false; |
1038 | |
1039 | if (!WriteRegister(context, ref_info: *reg_info_Rt, reg_value: data_Rt)) |
1040 | return false; |
1041 | } break; |
1042 | default: |
1043 | return false; |
1044 | } |
1045 | |
1046 | if (wback) { |
1047 | if (postindex) |
1048 | address += offset; |
1049 | |
1050 | if (n == 31) |
1051 | context.type = eContextAdjustStackPointer; |
1052 | else |
1053 | context.type = eContextAdjustBaseRegister; |
1054 | context.SetImmediateSigned(offset); |
1055 | |
1056 | if (!WriteRegisterUnsigned(context, reg_info: *reg_info_base, reg_value: address)) |
1057 | return false; |
1058 | } |
1059 | return true; |
1060 | } |
1061 | |
1062 | bool EmulateInstructionARM64::EmulateB(const uint32_t opcode) { |
1063 | #if 0 |
1064 | // ARM64 pseudo code... |
1065 | if branch_type == BranchType_CALL then X[30] = PC[] + 4; |
1066 | BranchTo(PC[] + offset, branch_type); |
1067 | #endif |
1068 | |
1069 | bool success = false; |
1070 | |
1071 | EmulateInstruction::Context context; |
1072 | context.type = EmulateInstruction::eContextRelativeBranchImmediate; |
1073 | const uint64_t pc = ReadRegisterUnsigned(reg_kind: eRegisterKindGeneric, |
1074 | LLDB_REGNUM_GENERIC_PC, fail_value: 0, success_ptr: &success); |
1075 | if (!success) |
1076 | return false; |
1077 | |
1078 | int64_t offset = llvm::SignExtend64<28>(x: Bits32(bits: opcode, msbit: 25, lsbit: 0) << 2); |
1079 | BranchType branch_type = Bit32(bits: opcode, bit: 31) ? BranchType_CALL : BranchType_JMP; |
1080 | addr_t target = pc + offset; |
1081 | context.SetImmediateSigned(offset); |
1082 | |
1083 | switch (branch_type) { |
1084 | case BranchType_CALL: { |
1085 | addr_t x30 = pc + 4; |
1086 | if (!WriteRegisterUnsigned(context, reg_kind: eRegisterKindLLDB, reg_num: gpr_lr_arm64, reg_value: x30)) |
1087 | return false; |
1088 | } break; |
1089 | case BranchType_JMP: |
1090 | break; |
1091 | default: |
1092 | return false; |
1093 | } |
1094 | |
1095 | return BranchTo(context, N: 64, target); |
1096 | } |
1097 | |
1098 | bool EmulateInstructionARM64::EmulateBcond(const uint32_t opcode) { |
1099 | #if 0 |
1100 | // ARM64 pseudo code... |
1101 | bits(64) offset = SignExtend(imm19:'00', 64); |
1102 | bits(4) condition = cond; |
1103 | if ConditionHolds(condition) then |
1104 | BranchTo(PC[] + offset, BranchType_JMP); |
1105 | #endif |
1106 | |
1107 | if (ConditionHolds(cond: Bits32(bits: opcode, msbit: 3, lsbit: 0))) { |
1108 | bool success = false; |
1109 | |
1110 | const uint64_t pc = ReadRegisterUnsigned( |
1111 | reg_kind: eRegisterKindGeneric, LLDB_REGNUM_GENERIC_PC, fail_value: 0, success_ptr: &success); |
1112 | if (!success) |
1113 | return false; |
1114 | |
1115 | int64_t offset = llvm::SignExtend64<21>(x: Bits32(bits: opcode, msbit: 23, lsbit: 5) << 2); |
1116 | addr_t target = pc + offset; |
1117 | |
1118 | EmulateInstruction::Context context; |
1119 | context.type = EmulateInstruction::eContextRelativeBranchImmediate; |
1120 | context.SetImmediateSigned(offset); |
1121 | if (!BranchTo(context, N: 64, target)) |
1122 | return false; |
1123 | } |
1124 | return true; |
1125 | } |
1126 | |
1127 | bool EmulateInstructionARM64::EmulateCBZ(const uint32_t opcode) { |
1128 | #if 0 |
1129 | integer t = UInt(Rt); |
1130 | integer datasize = if sf == '1' then 64 else 32; |
1131 | boolean iszero = (op == '0'); |
1132 | bits(64) offset = SignExtend(imm19:'00', 64); |
1133 | |
1134 | bits(datasize) operand1 = X[t]; |
1135 | if IsZero(operand1) == iszero then |
1136 | BranchTo(PC[] + offset, BranchType_JMP); |
1137 | #endif |
1138 | |
1139 | bool success = false; |
1140 | |
1141 | uint32_t t = Bits32(bits: opcode, msbit: 4, lsbit: 0); |
1142 | bool is_zero = Bit32(bits: opcode, bit: 24) == 0; |
1143 | int32_t offset = llvm::SignExtend64<21>(x: Bits32(bits: opcode, msbit: 23, lsbit: 5) << 2); |
1144 | |
1145 | const uint64_t operand = |
1146 | ReadRegisterUnsigned(reg_kind: eRegisterKindLLDB, reg_num: gpr_x0_arm64 + t, fail_value: 0, success_ptr: &success); |
1147 | if (!success) |
1148 | return false; |
1149 | |
1150 | if (m_ignore_conditions || ((operand == 0) == is_zero)) { |
1151 | const uint64_t pc = ReadRegisterUnsigned( |
1152 | reg_kind: eRegisterKindGeneric, LLDB_REGNUM_GENERIC_PC, fail_value: 0, success_ptr: &success); |
1153 | if (!success) |
1154 | return false; |
1155 | |
1156 | EmulateInstruction::Context context; |
1157 | context.type = EmulateInstruction::eContextRelativeBranchImmediate; |
1158 | context.SetImmediateSigned(offset); |
1159 | if (!BranchTo(context, N: 64, target: pc + offset)) |
1160 | return false; |
1161 | } |
1162 | return true; |
1163 | } |
1164 | |
1165 | bool EmulateInstructionARM64::EmulateTBZ(const uint32_t opcode) { |
1166 | #if 0 |
1167 | integer t = UInt(Rt); |
1168 | integer datasize = if b5 == '1' then 64 else 32; |
1169 | integer bit_pos = UInt(b5:b40); |
1170 | bit bit_val = op; |
1171 | bits(64) offset = SignExtend(imm14:'00', 64); |
1172 | #endif |
1173 | |
1174 | bool success = false; |
1175 | |
1176 | uint32_t t = Bits32(bits: opcode, msbit: 4, lsbit: 0); |
1177 | uint32_t bit_pos = (Bit32(bits: opcode, bit: 31) << 6) | (Bits32(bits: opcode, msbit: 23, lsbit: 19)); |
1178 | uint32_t bit_val = Bit32(bits: opcode, bit: 24); |
1179 | int64_t offset = llvm::SignExtend64<16>(x: Bits32(bits: opcode, msbit: 18, lsbit: 5) << 2); |
1180 | |
1181 | const uint64_t operand = |
1182 | ReadRegisterUnsigned(reg_kind: eRegisterKindLLDB, reg_num: gpr_x0_arm64 + t, fail_value: 0, success_ptr: &success); |
1183 | if (!success) |
1184 | return false; |
1185 | |
1186 | if (m_ignore_conditions || Bit32(bits: operand, bit: bit_pos) == bit_val) { |
1187 | const uint64_t pc = ReadRegisterUnsigned( |
1188 | reg_kind: eRegisterKindGeneric, LLDB_REGNUM_GENERIC_PC, fail_value: 0, success_ptr: &success); |
1189 | if (!success) |
1190 | return false; |
1191 | |
1192 | EmulateInstruction::Context context; |
1193 | context.type = EmulateInstruction::eContextRelativeBranchImmediate; |
1194 | context.SetImmediateSigned(offset); |
1195 | if (!BranchTo(context, N: 64, target: pc + offset)) |
1196 | return false; |
1197 | } |
1198 | return true; |
1199 | } |
1200 | |