1//===-- DNBArchImplARM64.cpp ------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Created by Greg Clayton on 6/25/07.
10//
11//===----------------------------------------------------------------------===//
12
13#if defined(__arm__) || defined(__arm64__) || defined(__aarch64__)
14
15#include "MacOSX/arm64/DNBArchImplARM64.h"
16
17#if defined(ARM_THREAD_STATE64_COUNT)
18
19#include "DNB.h"
20#include "DNBBreakpoint.h"
21#include "DNBLog.h"
22#include "DNBRegisterInfo.h"
23#include "MacOSX/MachProcess.h"
24#include "MacOSX/MachThread.h"
25
26#include <cinttypes>
27#include <sys/sysctl.h>
28
29#if __has_feature(ptrauth_calls)
30#include <ptrauth.h>
31#endif
32
33// Break only in privileged or user mode
34// (PAC bits in the DBGWVRn_EL1 watchpoint control register)
35#define S_USER ((uint32_t)(2u << 1))
36
37#define BCR_ENABLE ((uint32_t)(1u))
38#define WCR_ENABLE ((uint32_t)(1u))
39
40// Watchpoint load/store
41// (LSC bits in the DBGWVRn_EL1 watchpoint control register)
42#define WCR_LOAD ((uint32_t)(1u << 3))
43#define WCR_STORE ((uint32_t)(1u << 4))
44
45// Single instruction step
46// (SS bit in the MDSCR_EL1 register)
47#define SS_ENABLE ((uint32_t)(1u))
48
49static const uint8_t g_arm64_breakpoint_opcode[] = {
50 0x00, 0x00, 0x20, 0xD4}; // "brk #0", 0xd4200000 in BE byte order
51
52// If we need to set one logical watchpoint by using
53// two hardware watchpoint registers, the watchpoint
54// will be split into a "high" and "low" watchpoint.
55// Record both of them in the LoHi array.
56
57// It's safe to initialize to all 0's since
58// hi > lo and therefore LoHi[i] cannot be 0.
59static uint32_t LoHi[16] = {0};
60
61void DNBArchMachARM64::Initialize() {
62 DNBArchPluginInfo arch_plugin_info = {
63 CPU_TYPE_ARM64, DNBArchMachARM64::Create,
64 DNBArchMachARM64::GetRegisterSetInfo,
65 DNBArchMachARM64::SoftwareBreakpointOpcode};
66
67 // Register this arch plug-in with the main protocol class
68 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info);
69
70 DNBArchPluginInfo arch_plugin_info_32 = {
71 CPU_TYPE_ARM64_32, DNBArchMachARM64::Create,
72 DNBArchMachARM64::GetRegisterSetInfo,
73 DNBArchMachARM64::SoftwareBreakpointOpcode};
74
75 // Register this arch plug-in with the main protocol class
76 DNBArchProtocol::RegisterArchPlugin(arch_plugin_info_32);
77}
78
79DNBArchProtocol *DNBArchMachARM64::Create(MachThread *thread) {
80 DNBArchMachARM64 *obj = new DNBArchMachARM64(thread);
81
82 return obj;
83}
84
85const uint8_t *
86DNBArchMachARM64::SoftwareBreakpointOpcode(nub_size_t byte_size) {
87 return g_arm64_breakpoint_opcode;
88}
89
90uint32_t DNBArchMachARM64::GetCPUType() { return CPU_TYPE_ARM64; }
91
92static uint64_t clear_pac_bits(uint64_t value) {
93 uint32_t addressing_bits = 0;
94 if (!DNBGetAddressingBits(addressing_bits))
95 return value;
96
97 // On arm64_32, no ptrauth bits to clear
98#if !defined(__LP64__)
99 return value;
100#endif
101
102 uint64_t mask = ((1ULL << addressing_bits) - 1);
103
104 // Normally PAC bit clearing needs to check b55 and either set the
105 // non-addressing bits, or clear them. But the register values we
106 // get from thread_get_state on an arm64e process don't follow this
107 // convention?, at least when there's been a PAC auth failure in
108 // the inferior.
109 // Userland processes are always in low memory, so this
110 // hardcoding b55 == 0 PAC stripping behavior here.
111
112 return value & mask; // high bits cleared to 0
113}
114
115uint64_t DNBArchMachARM64::GetPC(uint64_t failValue) {
116 // Get program counter
117 if (GetGPRState(false) == KERN_SUCCESS)
118#if __has_feature(ptrauth_calls) && defined(__LP64__)
119 return clear_pac_bits(
120 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_pc));
121#else
122 return m_state.context.gpr.__pc;
123#endif
124 return failValue;
125}
126
127kern_return_t DNBArchMachARM64::SetPC(uint64_t value) {
128 // Get program counter
129 kern_return_t err = GetGPRState(false);
130 if (err == KERN_SUCCESS) {
131#if defined(__LP64__)
132#if __has_feature(ptrauth_calls)
133 // The incoming value could be garbage. Strip it to avoid
134 // trapping when it gets resigned in the thread state.
135 value = (uint64_t) ptrauth_strip((void*) value, ptrauth_key_function_pointer);
136 value = (uint64_t) ptrauth_sign_unauthenticated((void*) value, ptrauth_key_function_pointer, 0);
137#endif
138 arm_thread_state64_set_pc_fptr (m_state.context.gpr, (void*) value);
139#else
140 m_state.context.gpr.__pc = value;
141#endif
142 err = SetGPRState();
143 }
144 return err == KERN_SUCCESS;
145}
146
147uint64_t DNBArchMachARM64::GetSP(uint64_t failValue) {
148 // Get stack pointer
149 if (GetGPRState(false) == KERN_SUCCESS)
150#if __has_feature(ptrauth_calls) && defined(__LP64__)
151 return clear_pac_bits(
152 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_sp));
153#else
154 return m_state.context.gpr.__sp;
155#endif
156 return failValue;
157}
158
159kern_return_t DNBArchMachARM64::GetGPRState(bool force) {
160 int set = e_regSetGPR;
161 // Check if we have valid cached registers
162 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
163 return KERN_SUCCESS;
164
165 // Read the registers from our thread
166 mach_msg_type_number_t count = e_regSetGPRCount;
167 kern_return_t kret =
168 ::thread_get_state(m_thread->MachPortNumber(), ARM_THREAD_STATE64,
169 (thread_state_t)&m_state.context.gpr, &count);
170 if (DNBLogEnabledForAny(LOG_THREAD)) {
171 uint64_t *x = &m_state.context.gpr.__x[0];
172 DNBLogThreaded("thread_get_state signed regs "
173 "\n fp=%16.16llx"
174 "\n lr=%16.16llx"
175 "\n sp=%16.16llx"
176 "\n pc=%16.16llx",
177#if __has_feature(ptrauth_calls) && defined(__LP64__)
178 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_fp),
179 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_lr),
180 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_sp),
181 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_pc)
182#else
183 m_state.context.gpr.__fp,
184 m_state.context.gpr.__lr,
185 m_state.context.gpr.__sp,
186 m_state.context.gpr.__pc
187#endif
188 );
189
190#if __has_feature(ptrauth_calls) && defined(__LP64__)
191 uint64_t log_fp = clear_pac_bits(
192 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_fp));
193 uint64_t log_lr = clear_pac_bits(
194 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_lr));
195 uint64_t log_sp = clear_pac_bits(
196 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_sp));
197 uint64_t log_pc = clear_pac_bits(
198 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_pc));
199#else
200 uint64_t log_fp = m_state.context.gpr.__fp;
201 uint64_t log_lr = m_state.context.gpr.__lr;
202 uint64_t log_sp = m_state.context.gpr.__sp;
203 uint64_t log_pc = m_state.context.gpr.__pc;
204#endif
205 DNBLogThreaded(
206 "thread_get_state(0x%4.4x, %u, &gpr, %u) => 0x%8.8x (count = %u) regs"
207 "\n x0=%16.16llx"
208 "\n x1=%16.16llx"
209 "\n x2=%16.16llx"
210 "\n x3=%16.16llx"
211 "\n x4=%16.16llx"
212 "\n x5=%16.16llx"
213 "\n x6=%16.16llx"
214 "\n x7=%16.16llx"
215 "\n x8=%16.16llx"
216 "\n x9=%16.16llx"
217 "\n x10=%16.16llx"
218 "\n x11=%16.16llx"
219 "\n x12=%16.16llx"
220 "\n x13=%16.16llx"
221 "\n x14=%16.16llx"
222 "\n x15=%16.16llx"
223 "\n x16=%16.16llx"
224 "\n x17=%16.16llx"
225 "\n x18=%16.16llx"
226 "\n x19=%16.16llx"
227 "\n x20=%16.16llx"
228 "\n x21=%16.16llx"
229 "\n x22=%16.16llx"
230 "\n x23=%16.16llx"
231 "\n x24=%16.16llx"
232 "\n x25=%16.16llx"
233 "\n x26=%16.16llx"
234 "\n x27=%16.16llx"
235 "\n x28=%16.16llx"
236 "\n fp=%16.16llx"
237 "\n lr=%16.16llx"
238 "\n sp=%16.16llx"
239 "\n pc=%16.16llx"
240 "\n cpsr=%8.8x",
241 m_thread->MachPortNumber(), e_regSetGPR, e_regSetGPRCount, kret, count,
242 x[0], x[1], x[2], x[3], x[4], x[5], x[6], x[7], x[8], x[9], x[0], x[11],
243 x[12], x[13], x[14], x[15], x[16], x[17], x[18], x[19], x[20], x[21],
244 x[22], x[23], x[24], x[25], x[26], x[27], x[28],
245 log_fp, log_lr, log_sp, log_pc, m_state.context.gpr.__cpsr);
246 }
247 m_state.SetError(set, Read, kret);
248 return kret;
249}
250
251kern_return_t DNBArchMachARM64::GetVFPState(bool force) {
252 int set = e_regSetVFP;
253 // Check if we have valid cached registers
254 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
255 return KERN_SUCCESS;
256
257 // Read the registers from our thread
258 mach_msg_type_number_t count = e_regSetVFPCount;
259 kern_return_t kret =
260 ::thread_get_state(m_thread->MachPortNumber(), ARM_NEON_STATE64,
261 (thread_state_t)&m_state.context.vfp, &count);
262 if (DNBLogEnabledForAny(LOG_THREAD)) {
263#if defined(__arm64__) || defined(__aarch64__)
264 DNBLogThreaded(
265 "thread_get_state(0x%4.4x, %u, &vfp, %u) => 0x%8.8x (count = %u) regs"
266 "\n q0 = 0x%16.16llx%16.16llx"
267 "\n q1 = 0x%16.16llx%16.16llx"
268 "\n q2 = 0x%16.16llx%16.16llx"
269 "\n q3 = 0x%16.16llx%16.16llx"
270 "\n q4 = 0x%16.16llx%16.16llx"
271 "\n q5 = 0x%16.16llx%16.16llx"
272 "\n q6 = 0x%16.16llx%16.16llx"
273 "\n q7 = 0x%16.16llx%16.16llx"
274 "\n q8 = 0x%16.16llx%16.16llx"
275 "\n q9 = 0x%16.16llx%16.16llx"
276 "\n q10 = 0x%16.16llx%16.16llx"
277 "\n q11 = 0x%16.16llx%16.16llx"
278 "\n q12 = 0x%16.16llx%16.16llx"
279 "\n q13 = 0x%16.16llx%16.16llx"
280 "\n q14 = 0x%16.16llx%16.16llx"
281 "\n q15 = 0x%16.16llx%16.16llx"
282 "\n q16 = 0x%16.16llx%16.16llx"
283 "\n q17 = 0x%16.16llx%16.16llx"
284 "\n q18 = 0x%16.16llx%16.16llx"
285 "\n q19 = 0x%16.16llx%16.16llx"
286 "\n q20 = 0x%16.16llx%16.16llx"
287 "\n q21 = 0x%16.16llx%16.16llx"
288 "\n q22 = 0x%16.16llx%16.16llx"
289 "\n q23 = 0x%16.16llx%16.16llx"
290 "\n q24 = 0x%16.16llx%16.16llx"
291 "\n q25 = 0x%16.16llx%16.16llx"
292 "\n q26 = 0x%16.16llx%16.16llx"
293 "\n q27 = 0x%16.16llx%16.16llx"
294 "\n q28 = 0x%16.16llx%16.16llx"
295 "\n q29 = 0x%16.16llx%16.16llx"
296 "\n q30 = 0x%16.16llx%16.16llx"
297 "\n q31 = 0x%16.16llx%16.16llx"
298 "\n fpsr = 0x%8.8x"
299 "\n fpcr = 0x%8.8x\n\n",
300 m_thread->MachPortNumber(), e_regSetVFP, e_regSetVFPCount, kret, count,
301 ((uint64_t *)&m_state.context.vfp.__v[0])[0],
302 ((uint64_t *)&m_state.context.vfp.__v[0])[1],
303 ((uint64_t *)&m_state.context.vfp.__v[1])[0],
304 ((uint64_t *)&m_state.context.vfp.__v[1])[1],
305 ((uint64_t *)&m_state.context.vfp.__v[2])[0],
306 ((uint64_t *)&m_state.context.vfp.__v[2])[1],
307 ((uint64_t *)&m_state.context.vfp.__v[3])[0],
308 ((uint64_t *)&m_state.context.vfp.__v[3])[1],
309 ((uint64_t *)&m_state.context.vfp.__v[4])[0],
310 ((uint64_t *)&m_state.context.vfp.__v[4])[1],
311 ((uint64_t *)&m_state.context.vfp.__v[5])[0],
312 ((uint64_t *)&m_state.context.vfp.__v[5])[1],
313 ((uint64_t *)&m_state.context.vfp.__v[6])[0],
314 ((uint64_t *)&m_state.context.vfp.__v[6])[1],
315 ((uint64_t *)&m_state.context.vfp.__v[7])[0],
316 ((uint64_t *)&m_state.context.vfp.__v[7])[1],
317 ((uint64_t *)&m_state.context.vfp.__v[8])[0],
318 ((uint64_t *)&m_state.context.vfp.__v[8])[1],
319 ((uint64_t *)&m_state.context.vfp.__v[9])[0],
320 ((uint64_t *)&m_state.context.vfp.__v[9])[1],
321 ((uint64_t *)&m_state.context.vfp.__v[10])[0],
322 ((uint64_t *)&m_state.context.vfp.__v[10])[1],
323 ((uint64_t *)&m_state.context.vfp.__v[11])[0],
324 ((uint64_t *)&m_state.context.vfp.__v[11])[1],
325 ((uint64_t *)&m_state.context.vfp.__v[12])[0],
326 ((uint64_t *)&m_state.context.vfp.__v[12])[1],
327 ((uint64_t *)&m_state.context.vfp.__v[13])[0],
328 ((uint64_t *)&m_state.context.vfp.__v[13])[1],
329 ((uint64_t *)&m_state.context.vfp.__v[14])[0],
330 ((uint64_t *)&m_state.context.vfp.__v[14])[1],
331 ((uint64_t *)&m_state.context.vfp.__v[15])[0],
332 ((uint64_t *)&m_state.context.vfp.__v[15])[1],
333 ((uint64_t *)&m_state.context.vfp.__v[16])[0],
334 ((uint64_t *)&m_state.context.vfp.__v[16])[1],
335 ((uint64_t *)&m_state.context.vfp.__v[17])[0],
336 ((uint64_t *)&m_state.context.vfp.__v[17])[1],
337 ((uint64_t *)&m_state.context.vfp.__v[18])[0],
338 ((uint64_t *)&m_state.context.vfp.__v[18])[1],
339 ((uint64_t *)&m_state.context.vfp.__v[19])[0],
340 ((uint64_t *)&m_state.context.vfp.__v[19])[1],
341 ((uint64_t *)&m_state.context.vfp.__v[20])[0],
342 ((uint64_t *)&m_state.context.vfp.__v[20])[1],
343 ((uint64_t *)&m_state.context.vfp.__v[21])[0],
344 ((uint64_t *)&m_state.context.vfp.__v[21])[1],
345 ((uint64_t *)&m_state.context.vfp.__v[22])[0],
346 ((uint64_t *)&m_state.context.vfp.__v[22])[1],
347 ((uint64_t *)&m_state.context.vfp.__v[23])[0],
348 ((uint64_t *)&m_state.context.vfp.__v[23])[1],
349 ((uint64_t *)&m_state.context.vfp.__v[24])[0],
350 ((uint64_t *)&m_state.context.vfp.__v[24])[1],
351 ((uint64_t *)&m_state.context.vfp.__v[25])[0],
352 ((uint64_t *)&m_state.context.vfp.__v[25])[1],
353 ((uint64_t *)&m_state.context.vfp.__v[26])[0],
354 ((uint64_t *)&m_state.context.vfp.__v[26])[1],
355 ((uint64_t *)&m_state.context.vfp.__v[27])[0],
356 ((uint64_t *)&m_state.context.vfp.__v[27])[1],
357 ((uint64_t *)&m_state.context.vfp.__v[28])[0],
358 ((uint64_t *)&m_state.context.vfp.__v[28])[1],
359 ((uint64_t *)&m_state.context.vfp.__v[29])[0],
360 ((uint64_t *)&m_state.context.vfp.__v[29])[1],
361 ((uint64_t *)&m_state.context.vfp.__v[30])[0],
362 ((uint64_t *)&m_state.context.vfp.__v[30])[1],
363 ((uint64_t *)&m_state.context.vfp.__v[31])[0],
364 ((uint64_t *)&m_state.context.vfp.__v[31])[1],
365 m_state.context.vfp.__fpsr, m_state.context.vfp.__fpcr);
366#endif
367 }
368 m_state.SetError(set, Read, kret);
369 return kret;
370}
371
372kern_return_t DNBArchMachARM64::GetEXCState(bool force) {
373 int set = e_regSetEXC;
374 // Check if we have valid cached registers
375 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
376 return KERN_SUCCESS;
377
378 // Read the registers from our thread
379 mach_msg_type_number_t count = e_regSetEXCCount;
380 kern_return_t kret =
381 ::thread_get_state(m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64,
382 (thread_state_t)&m_state.context.exc, &count);
383 m_state.SetError(set, Read, kret);
384 return kret;
385}
386
387#if 0
388static void DumpDBGState(const arm_debug_state_t &dbg) {
389 uint32_t i = 0;
390 for (i = 0; i < 16; i++)
391 DNBLogThreadedIf(LOG_STEP, "BVR%-2u/BCR%-2u = { 0x%8.8x, 0x%8.8x } "
392 "WVR%-2u/WCR%-2u = { 0x%8.8x, 0x%8.8x }",
393 i, i, dbg.__bvr[i], dbg.__bcr[i], i, i, dbg.__wvr[i],
394 dbg.__wcr[i]);
395}
396#endif
397
398kern_return_t DNBArchMachARM64::GetDBGState(bool force) {
399 int set = e_regSetDBG;
400
401 // Check if we have valid cached registers
402 if (!force && m_state.GetError(set, Read) == KERN_SUCCESS)
403 return KERN_SUCCESS;
404
405 // Read the registers from our thread
406 mach_msg_type_number_t count = e_regSetDBGCount;
407 kern_return_t kret =
408 ::thread_get_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64,
409 (thread_state_t)&m_state.dbg, &count);
410 m_state.SetError(set, Read, kret);
411
412 return kret;
413}
414
415kern_return_t DNBArchMachARM64::SetGPRState() {
416 int set = e_regSetGPR;
417 kern_return_t kret = ::thread_set_state(
418 m_thread->MachPortNumber(), ARM_THREAD_STATE64,
419 (thread_state_t)&m_state.context.gpr, e_regSetGPRCount);
420 m_state.SetError(set, Write,
421 kret); // Set the current write error for this register set
422 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
423 // state in case registers are read
424 // back differently
425 return kret; // Return the error code
426}
427
428kern_return_t DNBArchMachARM64::SetVFPState() {
429 int set = e_regSetVFP;
430 kern_return_t kret = ::thread_set_state(
431 m_thread->MachPortNumber(), ARM_NEON_STATE64,
432 (thread_state_t)&m_state.context.vfp, e_regSetVFPCount);
433 m_state.SetError(set, Write,
434 kret); // Set the current write error for this register set
435 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
436 // state in case registers are read
437 // back differently
438 return kret; // Return the error code
439}
440
441kern_return_t DNBArchMachARM64::SetEXCState() {
442 int set = e_regSetEXC;
443 kern_return_t kret = ::thread_set_state(
444 m_thread->MachPortNumber(), ARM_EXCEPTION_STATE64,
445 (thread_state_t)&m_state.context.exc, e_regSetEXCCount);
446 m_state.SetError(set, Write,
447 kret); // Set the current write error for this register set
448 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
449 // state in case registers are read
450 // back differently
451 return kret; // Return the error code
452}
453
454kern_return_t DNBArchMachARM64::SetDBGState(bool also_set_on_task) {
455 int set = e_regSetDBG;
456 kern_return_t kret =
457 ::thread_set_state(m_thread->MachPortNumber(), ARM_DEBUG_STATE64,
458 (thread_state_t)&m_state.dbg, e_regSetDBGCount);
459 if (also_set_on_task) {
460 kern_return_t task_kret = task_set_state(
461 m_thread->Process()->Task().TaskPort(), ARM_DEBUG_STATE64,
462 (thread_state_t)&m_state.dbg, e_regSetDBGCount);
463 if (task_kret != KERN_SUCCESS)
464 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::SetDBGState failed "
465 "to set debug control register state: "
466 "0x%8.8x.",
467 task_kret);
468 }
469 m_state.SetError(set, Write,
470 kret); // Set the current write error for this register set
471 m_state.InvalidateRegisterSetState(set); // Invalidate the current register
472 // state in case registers are read
473 // back differently
474
475 return kret; // Return the error code
476}
477
478void DNBArchMachARM64::ThreadWillResume() {
479 // Do we need to step this thread? If so, let the mach thread tell us so.
480 if (m_thread->IsStepping()) {
481 EnableHardwareSingleStep(true);
482 }
483
484 // Disable the triggered watchpoint temporarily before we resume.
485 // Plus, we try to enable hardware single step to execute past the instruction
486 // which triggered our watchpoint.
487 if (m_watchpoint_did_occur) {
488 if (m_watchpoint_hw_index >= 0) {
489 kern_return_t kret = GetDBGState(false);
490 if (kret == KERN_SUCCESS &&
491 !IsWatchpointEnabled(m_state.dbg, m_watchpoint_hw_index)) {
492 // The watchpoint might have been disabled by the user. We don't need
493 // to do anything at all
494 // to enable hardware single stepping.
495 m_watchpoint_did_occur = false;
496 m_watchpoint_hw_index = -1;
497 return;
498 }
499
500 DisableHardwareWatchpoint(m_watchpoint_hw_index, false);
501 DNBLogThreadedIf(LOG_WATCHPOINTS,
502 "DNBArchMachARM64::ThreadWillResume() "
503 "DisableHardwareWatchpoint(%d) called",
504 m_watchpoint_hw_index);
505
506 // Enable hardware single step to move past the watchpoint-triggering
507 // instruction.
508 m_watchpoint_resume_single_step_enabled =
509 (EnableHardwareSingleStep(true) == KERN_SUCCESS);
510
511 // If we are not able to enable single step to move past the
512 // watchpoint-triggering instruction,
513 // at least we should reset the two watchpoint member variables so that
514 // the next time around
515 // this callback function is invoked, the enclosing logical branch is
516 // skipped.
517 if (!m_watchpoint_resume_single_step_enabled) {
518 // Reset the two watchpoint member variables.
519 m_watchpoint_did_occur = false;
520 m_watchpoint_hw_index = -1;
521 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::ThreadWillResume()"
522 " failed to enable single step");
523 } else
524 DNBLogThreadedIf(LOG_WATCHPOINTS,
525 "DNBArchMachARM64::ThreadWillResume() "
526 "succeeded to enable single step");
527 }
528 }
529}
530
531bool DNBArchMachARM64::NotifyException(MachException::Data &exc) {
532
533 switch (exc.exc_type) {
534 default:
535 break;
536 case EXC_BREAKPOINT:
537 if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_DA_DEBUG) {
538 // The data break address is passed as exc_data[1].
539 nub_addr_t addr = exc.exc_data[1];
540 // Find the hardware index with the side effect of possibly massaging the
541 // addr to return the starting address as seen from the debugger side.
542 uint32_t hw_index = GetHardwareWatchpointHit(addr);
543
544 // One logical watchpoint was split into two watchpoint locations because
545 // it was too big. If the watchpoint exception is indicating the 2nd half
546 // of the two-parter, find the address of the 1st half and report that --
547 // that's what lldb is going to expect to see.
548 DNBLogThreadedIf(LOG_WATCHPOINTS,
549 "DNBArchMachARM64::NotifyException "
550 "watchpoint %d was hit on address "
551 "0x%llx",
552 hw_index, (uint64_t)addr);
553 const uint32_t num_watchpoints = NumSupportedHardwareWatchpoints();
554 for (uint32_t i = 0; i < num_watchpoints; i++) {
555 if (LoHi[i] != 0 && LoHi[i] == hw_index && LoHi[i] != i &&
556 GetWatchpointAddressByIndex(i) != INVALID_NUB_ADDRESS) {
557 addr = GetWatchpointAddressByIndex(i);
558 DNBLogThreadedIf(LOG_WATCHPOINTS,
559 "DNBArchMachARM64::NotifyException "
560 "It is a linked watchpoint; "
561 "rewritten to index %d addr 0x%llx",
562 LoHi[i], (uint64_t)addr);
563 }
564 }
565
566 if (hw_index != INVALID_NUB_HW_INDEX) {
567 m_watchpoint_did_occur = true;
568 m_watchpoint_hw_index = hw_index;
569 exc.exc_data[1] = addr;
570 // Piggyback the hw_index in the exc.data.
571 exc.exc_data.push_back(hw_index);
572 }
573
574 return true;
575 }
576 // detect a __builtin_debugtrap instruction pattern ("brk #0xf000")
577 // and advance the $pc past it, so that the user can continue execution.
578 // Generally speaking, this knowledge should be centralized in lldb,
579 // recognizing the builtin_trap instruction and knowing how to advance
580 // the pc past it, so that continue etc work.
581 if (exc.exc_data.size() == 2 && exc.exc_data[0] == EXC_ARM_BREAKPOINT) {
582 nub_addr_t pc = GetPC(INVALID_NUB_ADDRESS);
583 if (pc != INVALID_NUB_ADDRESS && pc > 0) {
584 DNBBreakpoint *bp =
585 m_thread->Process()->Breakpoints().FindByAddress(pc);
586 if (bp == nullptr) {
587 uint8_t insnbuf[4];
588 if (m_thread->Process()->ReadMemory(pc, 4, insnbuf) == 4) {
589 uint8_t builtin_debugtrap_insn[4] = {0x00, 0x00, 0x3e,
590 0xd4}; // brk #0xf000
591 if (memcmp(insnbuf, builtin_debugtrap_insn, 4) == 0) {
592 SetPC(pc + 4);
593 }
594 }
595 }
596 }
597 }
598 break;
599 }
600 return false;
601}
602
603bool DNBArchMachARM64::ThreadDidStop() {
604 bool success = true;
605
606 m_state.InvalidateAllRegisterStates();
607
608 if (m_watchpoint_resume_single_step_enabled) {
609 // Great! We now disable the hardware single step as well as re-enable the
610 // hardware watchpoint.
611 // See also ThreadWillResume().
612 if (EnableHardwareSingleStep(false) == KERN_SUCCESS) {
613 if (m_watchpoint_did_occur && m_watchpoint_hw_index >= 0) {
614 ReenableHardwareWatchpoint(m_watchpoint_hw_index);
615 m_watchpoint_resume_single_step_enabled = false;
616 m_watchpoint_did_occur = false;
617 m_watchpoint_hw_index = -1;
618 } else {
619 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled "
620 "is true but (m_watchpoint_did_occur && "
621 "m_watchpoint_hw_index >= 0) does not hold!");
622 }
623 } else {
624 DNBLogError("internal error detected: m_watchpoint_resume_step_enabled "
625 "is true but unable to disable single step!");
626 }
627 }
628
629 // Are we stepping a single instruction?
630 if (GetGPRState(true) == KERN_SUCCESS) {
631 // We are single stepping, was this the primary thread?
632 if (m_thread->IsStepping()) {
633 // This was the primary thread, we need to clear the trace
634 // bit if so.
635 success = EnableHardwareSingleStep(false) == KERN_SUCCESS;
636 } else {
637 // The MachThread will automatically restore the suspend count
638 // in ThreadDidStop(), so we don't need to do anything here if
639 // we weren't the primary thread the last time
640 }
641 }
642 return success;
643}
644
645// Set the single step bit in the processor status register.
646kern_return_t DNBArchMachARM64::EnableHardwareSingleStep(bool enable) {
647 DNBError err;
648 DNBLogThreadedIf(LOG_STEP, "%s( enable = %d )", __FUNCTION__, enable);
649
650 err = GetGPRState(false);
651
652 if (err.Fail()) {
653 err.LogThreaded("%s: failed to read the GPR registers", __FUNCTION__);
654 return err.Status();
655 }
656
657 err = GetDBGState(false);
658
659 if (err.Fail()) {
660 err.LogThreaded("%s: failed to read the DBG registers", __FUNCTION__);
661 return err.Status();
662 }
663
664#if __has_feature(ptrauth_calls) && defined(__LP64__)
665 uint64_t pc = clear_pac_bits(
666 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_pc));
667#else
668 uint64_t pc = m_state.context.gpr.__pc;
669#endif
670
671 if (enable) {
672 DNBLogThreadedIf(LOG_STEP,
673 "%s: Setting MDSCR_EL1 Single Step bit at pc 0x%llx",
674 __FUNCTION__, pc);
675 m_state.dbg.__mdscr_el1 |= SS_ENABLE;
676 } else {
677 DNBLogThreadedIf(LOG_STEP,
678 "%s: Clearing MDSCR_EL1 Single Step bit at pc 0x%llx",
679 __FUNCTION__, pc);
680 m_state.dbg.__mdscr_el1 &= ~(SS_ENABLE);
681 }
682
683 return SetDBGState(false);
684}
685
686// return 1 if bit "BIT" is set in "value"
687static inline uint32_t bit(uint32_t value, uint32_t bit) {
688 return (value >> bit) & 1u;
689}
690
691// return the bitfield "value[msbit:lsbit]".
692static inline uint64_t bits(uint64_t value, uint32_t msbit, uint32_t lsbit) {
693 assert(msbit >= lsbit);
694 uint64_t shift_left = sizeof(value) * 8 - 1 - msbit;
695 value <<=
696 shift_left; // shift anything above the msbit off of the unsigned edge
697 value >>= shift_left + lsbit; // shift it back again down to the lsbit
698 // (including undoing any shift from above)
699 return value; // return our result
700}
701
702uint32_t DNBArchMachARM64::NumSupportedHardwareWatchpoints() {
703 // Set the init value to something that will let us know that we need to
704 // autodetect how many watchpoints are supported dynamically...
705 static uint32_t g_num_supported_hw_watchpoints = UINT_MAX;
706 if (g_num_supported_hw_watchpoints == UINT_MAX) {
707 // Set this to zero in case we can't tell if there are any HW breakpoints
708 g_num_supported_hw_watchpoints = 0;
709
710 size_t len;
711 uint32_t n = 0;
712 len = sizeof(n);
713 if (::sysctlbyname("hw.optional.watchpoint", &n, &len, NULL, 0) == 0) {
714 g_num_supported_hw_watchpoints = n;
715 DNBLogThreadedIf(LOG_THREAD, "hw.optional.watchpoint=%u", n);
716 } else {
717// For AArch64 we would need to look at ID_AA64DFR0_EL1 but debugserver runs in
718// EL0 so it can't
719// access that reg. The kernel should have filled in the sysctls based on it
720// though.
721#if defined(__arm__)
722 uint32_t register_DBGDIDR;
723
724 asm("mrc p14, 0, %0, c0, c0, 0" : "=r"(register_DBGDIDR));
725 uint32_t numWRPs = bits(register_DBGDIDR, 31, 28);
726 // Zero is reserved for the WRP count, so don't increment it if it is zero
727 if (numWRPs > 0)
728 numWRPs++;
729 g_num_supported_hw_watchpoints = numWRPs;
730 DNBLogThreadedIf(LOG_THREAD,
731 "Number of supported hw watchpoints via asm(): %d",
732 g_num_supported_hw_watchpoints);
733#endif
734 }
735 }
736 return g_num_supported_hw_watchpoints;
737}
738
739uint32_t DNBArchMachARM64::NumSupportedHardwareBreakpoints() {
740 // Set the init value to something that will let us know that we need to
741 // autodetect how many breakpoints are supported dynamically...
742 static uint32_t g_num_supported_hw_breakpoints = UINT_MAX;
743 if (g_num_supported_hw_breakpoints == UINT_MAX) {
744 // Set this to zero in case we can't tell if there are any HW breakpoints
745 g_num_supported_hw_breakpoints = 0;
746
747 size_t len;
748 uint32_t n = 0;
749 len = sizeof(n);
750 if (::sysctlbyname("hw.optional.breakpoint", &n, &len, NULL, 0) == 0) {
751 g_num_supported_hw_breakpoints = n;
752 DNBLogThreadedIf(LOG_THREAD, "hw.optional.breakpoint=%u", n);
753 } else {
754// For AArch64 we would need to look at ID_AA64DFR0_EL1 but debugserver runs in
755// EL0 so it can't access that reg. The kernel should have filled in the
756// sysctls based on it though.
757#if defined(__arm__)
758 uint32_t register_DBGDIDR;
759
760 asm("mrc p14, 0, %0, c0, c0, 0" : "=r"(register_DBGDIDR));
761 uint32_t numWRPs = bits(register_DBGDIDR, 31, 28);
762 // Zero is reserved for the WRP count, so don't increment it if it is zero
763 if (numWRPs > 0)
764 numWRPs++;
765 g_num_supported_hw_breakpoints = numWRPs;
766 DNBLogThreadedIf(LOG_THREAD,
767 "Number of supported hw breakpoint via asm(): %d",
768 g_num_supported_hw_breakpoints);
769#endif
770 }
771 }
772 return g_num_supported_hw_breakpoints;
773}
774
775uint32_t DNBArchMachARM64::EnableHardwareBreakpoint(nub_addr_t addr,
776 nub_size_t size,
777 bool also_set_on_task) {
778 DNBLogThreadedIf(LOG_WATCHPOINTS,
779 "DNBArchMachARM64::EnableHardwareBreakpoint(addr = "
780 "0x%8.8llx, size = %zu)",
781 (uint64_t)addr, size);
782
783 const uint32_t num_hw_breakpoints = NumSupportedHardwareBreakpoints();
784
785 nub_addr_t aligned_bp_address = addr;
786 uint32_t control_value = 0;
787
788 switch (size) {
789 case 2:
790 control_value = (0x3 << 5) | 7;
791 aligned_bp_address &= ~1;
792 break;
793 case 4:
794 control_value = (0xfu << 5) | 7;
795 aligned_bp_address &= ~3;
796 break;
797 };
798
799 // Read the debug state
800 kern_return_t kret = GetDBGState(false);
801 if (kret == KERN_SUCCESS) {
802 // Check to make sure we have the needed hardware support
803 uint32_t i = 0;
804
805 for (i = 0; i < num_hw_breakpoints; ++i) {
806 if ((m_state.dbg.__bcr[i] & BCR_ENABLE) == 0)
807 break; // We found an available hw breakpoint slot (in i)
808 }
809
810 // See if we found an available hw breakpoint slot above
811 if (i < num_hw_breakpoints) {
812 m_state.dbg.__bvr[i] = aligned_bp_address;
813 m_state.dbg.__bcr[i] = control_value;
814
815 DNBLogThreadedIf(LOG_WATCHPOINTS,
816 "DNBArchMachARM64::EnableHardwareBreakpoint() "
817 "adding breakpoint on address 0x%llx with control "
818 "register value 0x%x",
819 (uint64_t)m_state.dbg.__bvr[i],
820 (uint32_t)m_state.dbg.__bcr[i]);
821
822 kret = SetDBGState(also_set_on_task);
823
824 DNBLogThreadedIf(LOG_WATCHPOINTS,
825 "DNBArchMachARM64::"
826 "EnableHardwareBreakpoint() "
827 "SetDBGState() => 0x%8.8x.",
828 kret);
829
830 if (kret == KERN_SUCCESS)
831 return i;
832 } else {
833 DNBLogThreadedIf(LOG_WATCHPOINTS,
834 "DNBArchMachARM64::"
835 "EnableHardwareBreakpoint(): All "
836 "hardware resources (%u) are in use.",
837 num_hw_breakpoints);
838 }
839 }
840 return INVALID_NUB_HW_INDEX;
841}
842
843// This should be `std::bit_ceil(aligned_size)` but
844// that requires C++20.
845// Calculates the smallest integral power of two that is not smaller than x.
846static uint64_t bit_ceil(uint64_t input) {
847 if (input <= 1 || __builtin_popcount(input) == 1)
848 return input;
849
850 return 1ULL << (64 - __builtin_clzll(input));
851}
852
853std::vector<DNBArchMachARM64::WatchpointSpec>
854DNBArchMachARM64::AlignRequestedWatchpoint(nub_addr_t requested_addr,
855 nub_size_t requested_size) {
856
857 // Can't watch zero bytes
858 if (requested_size == 0)
859 return {};
860
861 // Smallest size we can watch on AArch64 is 8 bytes
862 constexpr nub_size_t min_watchpoint_alignment = 8;
863 nub_size_t aligned_size = std::max(requested_size, min_watchpoint_alignment);
864
865 /// Round up \a requested_size to the next power-of-2 size, at least 8
866 /// bytes
867 /// requested_size == 8 -> aligned_size == 8
868 /// requested_size == 9 -> aligned_size == 16
869 aligned_size = aligned_size = bit_ceil(aligned_size);
870
871 nub_addr_t aligned_start = requested_addr & ~(aligned_size - 1);
872 // Does this power-of-2 memory range, aligned to power-of-2, completely
873 // encompass the requested watch region.
874 if (aligned_start + aligned_size >= requested_addr + requested_size) {
875 WatchpointSpec wp;
876 wp.aligned_start = aligned_start;
877 wp.requested_start = requested_addr;
878 wp.aligned_size = aligned_size;
879 wp.requested_size = requested_size;
880 return {{wp}};
881 }
882
883 // We need to split this into two watchpoints, split on the aligned_size
884 // boundary and re-evaluate the alignment of each half.
885 //
886 // requested_addr 48 requested_size 20 -> aligned_size 32
887 // aligned_start 32
888 // split_addr 64
889 // first_requested_addr 48
890 // first_requested_size 16
891 // second_requested_addr 64
892 // second_requested_size 4
893 nub_addr_t split_addr = aligned_start + aligned_size;
894
895 nub_addr_t first_requested_addr = requested_addr;
896 nub_size_t first_requested_size = split_addr - requested_addr;
897 nub_addr_t second_requested_addr = split_addr;
898 nub_size_t second_requested_size = requested_size - first_requested_size;
899
900 std::vector<WatchpointSpec> first_wp =
901 AlignRequestedWatchpoint(first_requested_addr, first_requested_size);
902 std::vector<WatchpointSpec> second_wp =
903 AlignRequestedWatchpoint(second_requested_addr, second_requested_size);
904 if (first_wp.size() != 1 || second_wp.size() != 1)
905 return {};
906
907 return {{first_wp[0], second_wp[0]}};
908}
909
910uint32_t DNBArchMachARM64::EnableHardwareWatchpoint(nub_addr_t addr,
911 nub_size_t size, bool read,
912 bool write,
913 bool also_set_on_task) {
914 DNBLogThreadedIf(LOG_WATCHPOINTS,
915 "DNBArchMachARM64::EnableHardwareWatchpoint(addr = "
916 "0x%8.8llx, size = %zu, read = %u, write = %u)",
917 (uint64_t)addr, size, read, write);
918
919 std::vector<DNBArchMachARM64::WatchpointSpec> wps =
920 AlignRequestedWatchpoint(addr, size);
921 DNBLogThreadedIf(LOG_WATCHPOINTS,
922 "DNBArchMachARM64::EnableHardwareWatchpoint() using %zu "
923 "hardware watchpoints",
924 wps.size());
925
926 if (wps.size() == 0)
927 return INVALID_NUB_HW_INDEX;
928
929 // We must watch for either read or write
930 if (read == false && write == false)
931 return INVALID_NUB_HW_INDEX;
932
933 // Only one hardware watchpoint needed
934 // to implement the user's request.
935 if (wps.size() == 1) {
936 if (wps[0].aligned_size <= 8)
937 return SetBASWatchpoint(wps[0], read, write, also_set_on_task);
938 else
939 return SetMASKWatchpoint(wps[0], read, write, also_set_on_task);
940 }
941
942 // We have multiple WatchpointSpecs
943
944 std::vector<uint32_t> wp_slots_used;
945 for (size_t i = 0; i < wps.size(); i++) {
946 uint32_t idx =
947 EnableHardwareWatchpoint(wps[i].requested_start, wps[i].requested_size,
948 read, write, also_set_on_task);
949 if (idx != INVALID_NUB_HW_INDEX)
950 wp_slots_used.push_back(idx);
951 }
952
953 // Did we fail to set all of the WatchpointSpecs needed
954 // for this user's request?
955 if (wps.size() != wp_slots_used.size()) {
956 for (int wp_slot : wp_slots_used)
957 DisableHardwareWatchpoint(wp_slot, also_set_on_task);
958 return INVALID_NUB_HW_INDEX;
959 }
960
961 LoHi[wp_slots_used[0]] = wp_slots_used[1];
962 return wp_slots_used[0];
963}
964
965uint32_t DNBArchMachARM64::SetBASWatchpoint(DNBArchMachARM64::WatchpointSpec wp,
966 bool read, bool write,
967 bool also_set_on_task) {
968 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
969
970 nub_addr_t aligned_dword_addr = wp.aligned_start;
971 nub_addr_t watching_offset = wp.requested_start - wp.aligned_start;
972 nub_size_t watching_size = wp.requested_size;
973
974 // If user asks to watch 3 bytes at 0x1005,
975 // aligned_dword_addr 0x1000
976 // watching_offset 5
977 // watching_size 3
978
979 // Set the Byte Address Selects bits DBGWCRn_EL1 bits [12:5] based on the
980 // above.
981 // The bit shift and negation operation will give us 0b11 for 2, 0b1111 for 4,
982 // etc, up to 0b11111111 for 8.
983 // then we shift those bits left by the offset into this dword that we are
984 // interested in.
985 // e.g. if we are watching bytes 4,5,6,7 in a dword we want a BAS of
986 // 0b11110000.
987 uint32_t byte_address_select = ((1 << watching_size) - 1) << watching_offset;
988
989 // Read the debug state
990 kern_return_t kret = GetDBGState(false);
991 if (kret != KERN_SUCCESS)
992 return INVALID_NUB_HW_INDEX;
993
994 // Check to make sure we have the needed hardware support
995 uint32_t i = 0;
996
997 for (i = 0; i < num_hw_watchpoints; ++i) {
998 if ((m_state.dbg.__wcr[i] & WCR_ENABLE) == 0)
999 break; // We found an available hw watchpoint slot
1000 }
1001 if (i == num_hw_watchpoints) {
1002 DNBLogThreadedIf(LOG_WATCHPOINTS,
1003 "DNBArchMachARM64::"
1004 "SetBASWatchpoint(): All "
1005 "hardware resources (%u) are in use.",
1006 num_hw_watchpoints);
1007 return INVALID_NUB_HW_INDEX;
1008 }
1009
1010 DNBLogThreadedIf(LOG_WATCHPOINTS,
1011 "DNBArchMachARM64::"
1012 "SetBASWatchpoint() "
1013 "set hardware register %d to BAS watchpoint "
1014 "aligned start address 0x%llx, watch region start "
1015 "offset %lld, number of bytes %zu",
1016 i, aligned_dword_addr, watching_offset, watching_size);
1017
1018 // Clear any previous LoHi joined-watchpoint that may have been in use
1019 LoHi[i] = 0;
1020
1021 // shift our Byte Address Select bits up to the correct bit range for the
1022 // DBGWCRn_EL1
1023 byte_address_select = byte_address_select << 5;
1024
1025 // Make sure bits 1:0 are clear in our address
1026 m_state.dbg.__wvr[i] = aligned_dword_addr; // DVA (Data Virtual Address)
1027 m_state.dbg.__wcr[i] = byte_address_select | // Which bytes that follow
1028 // the DVA that we will watch
1029 S_USER | // Stop only in user mode
1030 (read ? WCR_LOAD : 0) | // Stop on read access?
1031 (write ? WCR_STORE : 0) | // Stop on write access?
1032 WCR_ENABLE; // Enable this watchpoint;
1033
1034 DNBLogThreadedIf(LOG_WATCHPOINTS,
1035 "DNBArchMachARM64::SetBASWatchpoint() "
1036 "adding watchpoint on address 0x%llx with control "
1037 "register value 0x%x",
1038 (uint64_t)m_state.dbg.__wvr[i],
1039 (uint32_t)m_state.dbg.__wcr[i]);
1040
1041 kret = SetDBGState(also_set_on_task);
1042 // DumpDBGState(m_state.dbg);
1043
1044 DNBLogThreadedIf(LOG_WATCHPOINTS,
1045 "DNBArchMachARM64::"
1046 "SetBASWatchpoint() "
1047 "SetDBGState() => 0x%8.8x.",
1048 kret);
1049
1050 if (kret == KERN_SUCCESS)
1051 return i;
1052
1053 return INVALID_NUB_HW_INDEX;
1054}
1055
1056uint32_t
1057DNBArchMachARM64::SetMASKWatchpoint(DNBArchMachARM64::WatchpointSpec wp,
1058 bool read, bool write,
1059 bool also_set_on_task) {
1060 const uint32_t num_hw_watchpoints = NumSupportedHardwareWatchpoints();
1061
1062 // Read the debug state
1063 kern_return_t kret = GetDBGState(false);
1064 if (kret != KERN_SUCCESS)
1065 return INVALID_NUB_HW_INDEX;
1066
1067 // Check to make sure we have the needed hardware support
1068 uint32_t i = 0;
1069
1070 for (i = 0; i < num_hw_watchpoints; ++i) {
1071 if ((m_state.dbg.__wcr[i] & WCR_ENABLE) == 0)
1072 break; // We found an available hw watchpoint slot
1073 }
1074 if (i == num_hw_watchpoints) {
1075 DNBLogThreadedIf(LOG_WATCHPOINTS,
1076 "DNBArchMachARM64::"
1077 "SetMASKWatchpoint(): All "
1078 "hardware resources (%u) are in use.",
1079 num_hw_watchpoints);
1080 return INVALID_NUB_HW_INDEX;
1081 }
1082
1083 DNBLogThreadedIf(LOG_WATCHPOINTS,
1084 "DNBArchMachARM64::"
1085 "SetMASKWatchpoint() "
1086 "set hardware register %d to MASK watchpoint "
1087 "aligned start address 0x%llx, aligned size %zu",
1088 i, wp.aligned_start, wp.aligned_size);
1089
1090 // Clear any previous LoHi joined-watchpoint that may have been in use
1091 LoHi[i] = 0;
1092
1093 // MASK field is the number of low bits that are masked off
1094 // when comparing the address with the DBGWVR<n>_EL1 values.
1095 // If aligned size is 16, that means we ignore low 4 bits, 0b1111.
1096 // popcount(16 - 1) give us the correct value of 4.
1097 // 2GB is max watchable region, which is 31 bits (low bits 0x7fffffff
1098 // masked off) -- a MASK value of 31.
1099 const uint64_t mask = __builtin_popcountl(wp.aligned_size - 1) << 24;
1100 // A '0b11111111' BAS value needed for mask watchpoints plus a
1101 // nonzero mask value.
1102 const uint64_t not_bas_wp = 0xff << 5;
1103
1104 m_state.dbg.__wvr[i] = wp.aligned_start;
1105 m_state.dbg.__wcr[i] = mask | not_bas_wp | S_USER | // Stop only in user mode
1106 (read ? WCR_LOAD : 0) | // Stop on read access?
1107 (write ? WCR_STORE : 0) | // Stop on write access?
1108 WCR_ENABLE; // Enable this watchpoint;
1109
1110 DNBLogThreadedIf(LOG_WATCHPOINTS,
1111 "DNBArchMachARM64::SetMASKWatchpoint() "
1112 "adding watchpoint on address 0x%llx with control "
1113 "register value 0x%llx",
1114 (uint64_t)m_state.dbg.__wvr[i],
1115 (uint64_t)m_state.dbg.__wcr[i]);
1116
1117 kret = SetDBGState(also_set_on_task);
1118
1119 DNBLogThreadedIf(LOG_WATCHPOINTS,
1120 "DNBArchMachARM64::"
1121 "SetMASKWatchpoint() "
1122 "SetDBGState() => 0x%8.8x.",
1123 kret);
1124
1125 if (kret == KERN_SUCCESS)
1126 return i;
1127
1128 return INVALID_NUB_HW_INDEX;
1129}
1130
1131bool DNBArchMachARM64::ReenableHardwareWatchpoint(uint32_t hw_index) {
1132 // If this logical watchpoint # is actually implemented using
1133 // two hardware watchpoint registers, re-enable both of them.
1134
1135 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) {
1136 return ReenableHardwareWatchpoint_helper(hw_index) &&
1137 ReenableHardwareWatchpoint_helper(LoHi[hw_index]);
1138 } else {
1139 return ReenableHardwareWatchpoint_helper(hw_index);
1140 }
1141}
1142
1143bool DNBArchMachARM64::ReenableHardwareWatchpoint_helper(uint32_t hw_index) {
1144 kern_return_t kret = GetDBGState(false);
1145 if (kret != KERN_SUCCESS)
1146 return false;
1147
1148 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
1149 if (hw_index >= num_hw_points)
1150 return false;
1151
1152 m_state.dbg.__wvr[hw_index] = m_disabled_watchpoints[hw_index].addr;
1153 m_state.dbg.__wcr[hw_index] = m_disabled_watchpoints[hw_index].control;
1154
1155 DNBLogThreadedIf(LOG_WATCHPOINTS,
1156 "DNBArchMachARM64::"
1157 "ReenableHardwareWatchpoint_helper( %u ) - WVR%u = "
1158 "0x%8.8llx WCR%u = 0x%8.8llx",
1159 hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index],
1160 hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]);
1161
1162 kret = SetDBGState(false);
1163
1164 return (kret == KERN_SUCCESS);
1165}
1166
1167bool DNBArchMachARM64::DisableHardwareWatchpoint(uint32_t hw_index,
1168 bool also_set_on_task) {
1169 if (hw_index < NumSupportedHardwareWatchpoints() && LoHi[hw_index]) {
1170 return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task) &&
1171 DisableHardwareWatchpoint_helper(LoHi[hw_index], also_set_on_task);
1172 } else {
1173 return DisableHardwareWatchpoint_helper(hw_index, also_set_on_task);
1174 }
1175}
1176
1177bool DNBArchMachARM64::DisableHardwareWatchpoint_helper(uint32_t hw_index,
1178 bool also_set_on_task) {
1179 kern_return_t kret = GetDBGState(false);
1180 if (kret != KERN_SUCCESS)
1181 return false;
1182
1183 const uint32_t num_hw_points = NumSupportedHardwareWatchpoints();
1184 if (hw_index >= num_hw_points)
1185 return false;
1186
1187 m_disabled_watchpoints[hw_index].addr = m_state.dbg.__wvr[hw_index];
1188 m_disabled_watchpoints[hw_index].control = m_state.dbg.__wcr[hw_index];
1189
1190 m_state.dbg.__wcr[hw_index] &= ~((nub_addr_t)WCR_ENABLE);
1191 DNBLogThreadedIf(LOG_WATCHPOINTS, "DNBArchMachARM64::"
1192 "DisableHardwareWatchpoint( %u ) - WVR%u = "
1193 "0x%8.8llx WCR%u = 0x%8.8llx",
1194 hw_index, hw_index, (uint64_t)m_state.dbg.__wvr[hw_index],
1195 hw_index, (uint64_t)m_state.dbg.__wcr[hw_index]);
1196
1197 kret = SetDBGState(also_set_on_task);
1198
1199 return (kret == KERN_SUCCESS);
1200}
1201
1202bool DNBArchMachARM64::DisableHardwareBreakpoint(uint32_t hw_index,
1203 bool also_set_on_task) {
1204 kern_return_t kret = GetDBGState(false);
1205 if (kret != KERN_SUCCESS)
1206 return false;
1207
1208 const uint32_t num_hw_points = NumSupportedHardwareBreakpoints();
1209 if (hw_index >= num_hw_points)
1210 return false;
1211
1212 m_disabled_breakpoints[hw_index].addr = m_state.dbg.__bvr[hw_index];
1213 m_disabled_breakpoints[hw_index].control = m_state.dbg.__bcr[hw_index];
1214
1215 m_state.dbg.__bcr[hw_index] = 0;
1216 DNBLogThreadedIf(LOG_WATCHPOINTS,
1217 "DNBArchMachARM64::"
1218 "DisableHardwareBreakpoint( %u ) - WVR%u = "
1219 "0x%8.8llx BCR%u = 0x%8.8llx",
1220 hw_index, hw_index, (uint64_t)m_state.dbg.__bvr[hw_index],
1221 hw_index, (uint64_t)m_state.dbg.__bcr[hw_index]);
1222
1223 kret = SetDBGState(also_set_on_task);
1224
1225 return (kret == KERN_SUCCESS);
1226}
1227
1228// This is for checking the Byte Address Select bits in the DBRWCRn_EL1 control
1229// register.
1230// Returns -1 if the trailing bit patterns are not one of:
1231// { 0b???????1, 0b??????10, 0b?????100, 0b????1000, 0b???10000, 0b??100000,
1232// 0b?1000000, 0b10000000 }.
1233static inline int32_t LowestBitSet(uint32_t val) {
1234 for (unsigned i = 0; i < 8; ++i) {
1235 if (bit(val, i))
1236 return i;
1237 }
1238 return -1;
1239}
1240
1241// Iterate through the debug registers; return the index of the first watchpoint
1242// whose address matches.
1243// As a side effect, the starting address as understood by the debugger is
1244// returned which could be
1245// different from 'addr' passed as an in/out argument.
1246uint32_t DNBArchMachARM64::GetHardwareWatchpointHit(nub_addr_t &addr) {
1247 // Read the debug state
1248 kern_return_t kret = GetDBGState(true);
1249 // DumpDBGState(m_state.dbg);
1250 DNBLogThreadedIf(
1251 LOG_WATCHPOINTS,
1252 "DNBArchMachARM64::GetHardwareWatchpointHit() GetDBGState() => 0x%8.8x.",
1253 kret);
1254 DNBLogThreadedIf(LOG_WATCHPOINTS,
1255 "DNBArchMachARM64::GetHardwareWatchpointHit() addr = 0x%llx",
1256 (uint64_t)addr);
1257
1258 if (kret == KERN_SUCCESS) {
1259 DBG &debug_state = m_state.dbg;
1260 uint32_t i, num = NumSupportedHardwareWatchpoints();
1261 for (i = 0; i < num; ++i) {
1262 nub_addr_t wp_addr = GetWatchAddress(debug_state, i);
1263
1264 DNBLogThreadedIf(LOG_WATCHPOINTS,
1265 "DNBArchImplARM64::"
1266 "GetHardwareWatchpointHit() slot: %u "
1267 "(addr = 0x%llx, WCR = 0x%llx)",
1268 i, wp_addr, debug_state.__wcr[i]);
1269
1270 if (!IsWatchpointEnabled(debug_state, i))
1271 continue;
1272
1273 // DBGWCR<n>EL1.BAS are the bits of the doubleword that are watched
1274 // with a BAS watchpoint.
1275 uint32_t bas_bits = bits(debug_state.__wcr[i], 12, 5);
1276 // DBGWCR<n>EL1.MASK is the number of bits that are masked off the
1277 // virtual address when comparing to DBGWVR<n>_EL1.
1278 uint32_t mask = bits(debug_state.__wcr[i], 28, 24);
1279
1280 const bool is_bas_watchpoint = mask == 0;
1281
1282 DNBLogThreadedIf(
1283 LOG_WATCHPOINTS,
1284 "DNBArchImplARM64::"
1285 "GetHardwareWatchpointHit() slot: %u %s",
1286 i, is_bas_watchpoint ? "is BAS watchpoint" : "is MASK watchpoint");
1287
1288 if (is_bas_watchpoint) {
1289 if (bits(wp_addr, 48, 3) != bits(addr, 48, 3))
1290 continue;
1291 } else {
1292 if (bits(wp_addr, 48, mask) == bits(addr, 48, mask)) {
1293 DNBLogThreadedIf(LOG_WATCHPOINTS,
1294 "DNBArchImplARM64::"
1295 "GetHardwareWatchpointHit() slot: %u matched MASK "
1296 "ignoring %u low bits",
1297 i, mask);
1298 return i;
1299 }
1300 }
1301
1302 if (is_bas_watchpoint) {
1303 // Sanity check the bas_bits
1304 uint32_t lsb = LowestBitSet(bas_bits);
1305 if (lsb < 0)
1306 continue;
1307
1308 uint64_t byte_to_match = bits(addr, 2, 0);
1309
1310 if (bas_bits & (1 << byte_to_match)) {
1311 addr = wp_addr + lsb;
1312 DNBLogThreadedIf(LOG_WATCHPOINTS,
1313 "DNBArchImplARM64::"
1314 "GetHardwareWatchpointHit() slot: %u matched BAS",
1315 i);
1316 return i;
1317 }
1318 }
1319 }
1320 }
1321 return INVALID_NUB_HW_INDEX;
1322}
1323
1324nub_addr_t DNBArchMachARM64::GetWatchpointAddressByIndex(uint32_t hw_index) {
1325 kern_return_t kret = GetDBGState(true);
1326 if (kret != KERN_SUCCESS)
1327 return INVALID_NUB_ADDRESS;
1328 const uint32_t num = NumSupportedHardwareWatchpoints();
1329 if (hw_index >= num)
1330 return INVALID_NUB_ADDRESS;
1331 if (IsWatchpointEnabled(m_state.dbg, hw_index))
1332 return GetWatchAddress(m_state.dbg, hw_index);
1333 return INVALID_NUB_ADDRESS;
1334}
1335
1336bool DNBArchMachARM64::IsWatchpointEnabled(const DBG &debug_state,
1337 uint32_t hw_index) {
1338 // Watchpoint Control Registers, bitfield definitions
1339 // ...
1340 // Bits Value Description
1341 // [0] 0 Watchpoint disabled
1342 // 1 Watchpoint enabled.
1343 return (debug_state.__wcr[hw_index] & 1u);
1344}
1345
1346nub_addr_t DNBArchMachARM64::GetWatchAddress(const DBG &debug_state,
1347 uint32_t hw_index) {
1348 // Watchpoint Value Registers, bitfield definitions
1349 // Bits Description
1350 // [31:2] Watchpoint value (word address, i.e., 4-byte aligned)
1351 // [1:0] RAZ/SBZP
1352 return bits(debug_state.__wvr[hw_index], 63, 0);
1353}
1354
1355// Register information definitions for 64 bit ARMv8.
1356enum gpr_regnums {
1357 gpr_x0 = 0,
1358 gpr_x1,
1359 gpr_x2,
1360 gpr_x3,
1361 gpr_x4,
1362 gpr_x5,
1363 gpr_x6,
1364 gpr_x7,
1365 gpr_x8,
1366 gpr_x9,
1367 gpr_x10,
1368 gpr_x11,
1369 gpr_x12,
1370 gpr_x13,
1371 gpr_x14,
1372 gpr_x15,
1373 gpr_x16,
1374 gpr_x17,
1375 gpr_x18,
1376 gpr_x19,
1377 gpr_x20,
1378 gpr_x21,
1379 gpr_x22,
1380 gpr_x23,
1381 gpr_x24,
1382 gpr_x25,
1383 gpr_x26,
1384 gpr_x27,
1385 gpr_x28,
1386 gpr_fp,
1387 gpr_x29 = gpr_fp,
1388 gpr_lr,
1389 gpr_x30 = gpr_lr,
1390 gpr_sp,
1391 gpr_x31 = gpr_sp,
1392 gpr_pc,
1393 gpr_cpsr,
1394 gpr_w0,
1395 gpr_w1,
1396 gpr_w2,
1397 gpr_w3,
1398 gpr_w4,
1399 gpr_w5,
1400 gpr_w6,
1401 gpr_w7,
1402 gpr_w8,
1403 gpr_w9,
1404 gpr_w10,
1405 gpr_w11,
1406 gpr_w12,
1407 gpr_w13,
1408 gpr_w14,
1409 gpr_w15,
1410 gpr_w16,
1411 gpr_w17,
1412 gpr_w18,
1413 gpr_w19,
1414 gpr_w20,
1415 gpr_w21,
1416 gpr_w22,
1417 gpr_w23,
1418 gpr_w24,
1419 gpr_w25,
1420 gpr_w26,
1421 gpr_w27,
1422 gpr_w28
1423
1424};
1425
1426enum {
1427 vfp_v0 = 0,
1428 vfp_v1,
1429 vfp_v2,
1430 vfp_v3,
1431 vfp_v4,
1432 vfp_v5,
1433 vfp_v6,
1434 vfp_v7,
1435 vfp_v8,
1436 vfp_v9,
1437 vfp_v10,
1438 vfp_v11,
1439 vfp_v12,
1440 vfp_v13,
1441 vfp_v14,
1442 vfp_v15,
1443 vfp_v16,
1444 vfp_v17,
1445 vfp_v18,
1446 vfp_v19,
1447 vfp_v20,
1448 vfp_v21,
1449 vfp_v22,
1450 vfp_v23,
1451 vfp_v24,
1452 vfp_v25,
1453 vfp_v26,
1454 vfp_v27,
1455 vfp_v28,
1456 vfp_v29,
1457 vfp_v30,
1458 vfp_v31,
1459 vfp_fpsr,
1460 vfp_fpcr,
1461
1462 // lower 32 bits of the corresponding vfp_v<n> reg.
1463 vfp_s0,
1464 vfp_s1,
1465 vfp_s2,
1466 vfp_s3,
1467 vfp_s4,
1468 vfp_s5,
1469 vfp_s6,
1470 vfp_s7,
1471 vfp_s8,
1472 vfp_s9,
1473 vfp_s10,
1474 vfp_s11,
1475 vfp_s12,
1476 vfp_s13,
1477 vfp_s14,
1478 vfp_s15,
1479 vfp_s16,
1480 vfp_s17,
1481 vfp_s18,
1482 vfp_s19,
1483 vfp_s20,
1484 vfp_s21,
1485 vfp_s22,
1486 vfp_s23,
1487 vfp_s24,
1488 vfp_s25,
1489 vfp_s26,
1490 vfp_s27,
1491 vfp_s28,
1492 vfp_s29,
1493 vfp_s30,
1494 vfp_s31,
1495
1496 // lower 64 bits of the corresponding vfp_v<n> reg.
1497 vfp_d0,
1498 vfp_d1,
1499 vfp_d2,
1500 vfp_d3,
1501 vfp_d4,
1502 vfp_d5,
1503 vfp_d6,
1504 vfp_d7,
1505 vfp_d8,
1506 vfp_d9,
1507 vfp_d10,
1508 vfp_d11,
1509 vfp_d12,
1510 vfp_d13,
1511 vfp_d14,
1512 vfp_d15,
1513 vfp_d16,
1514 vfp_d17,
1515 vfp_d18,
1516 vfp_d19,
1517 vfp_d20,
1518 vfp_d21,
1519 vfp_d22,
1520 vfp_d23,
1521 vfp_d24,
1522 vfp_d25,
1523 vfp_d26,
1524 vfp_d27,
1525 vfp_d28,
1526 vfp_d29,
1527 vfp_d30,
1528 vfp_d31
1529};
1530
1531enum { exc_far = 0, exc_esr, exc_exception };
1532
1533// These numbers from the "DWARF for the ARM 64-bit Architecture (AArch64)"
1534// document.
1535
1536enum {
1537 dwarf_x0 = 0,
1538 dwarf_x1,
1539 dwarf_x2,
1540 dwarf_x3,
1541 dwarf_x4,
1542 dwarf_x5,
1543 dwarf_x6,
1544 dwarf_x7,
1545 dwarf_x8,
1546 dwarf_x9,
1547 dwarf_x10,
1548 dwarf_x11,
1549 dwarf_x12,
1550 dwarf_x13,
1551 dwarf_x14,
1552 dwarf_x15,
1553 dwarf_x16,
1554 dwarf_x17,
1555 dwarf_x18,
1556 dwarf_x19,
1557 dwarf_x20,
1558 dwarf_x21,
1559 dwarf_x22,
1560 dwarf_x23,
1561 dwarf_x24,
1562 dwarf_x25,
1563 dwarf_x26,
1564 dwarf_x27,
1565 dwarf_x28,
1566 dwarf_x29,
1567 dwarf_x30,
1568 dwarf_x31,
1569 dwarf_pc = 32,
1570 dwarf_elr_mode = 33,
1571 dwarf_fp = dwarf_x29,
1572 dwarf_lr = dwarf_x30,
1573 dwarf_sp = dwarf_x31,
1574 // 34-63 reserved
1575
1576 // V0-V31 (128 bit vector registers)
1577 dwarf_v0 = 64,
1578 dwarf_v1,
1579 dwarf_v2,
1580 dwarf_v3,
1581 dwarf_v4,
1582 dwarf_v5,
1583 dwarf_v6,
1584 dwarf_v7,
1585 dwarf_v8,
1586 dwarf_v9,
1587 dwarf_v10,
1588 dwarf_v11,
1589 dwarf_v12,
1590 dwarf_v13,
1591 dwarf_v14,
1592 dwarf_v15,
1593 dwarf_v16,
1594 dwarf_v17,
1595 dwarf_v18,
1596 dwarf_v19,
1597 dwarf_v20,
1598 dwarf_v21,
1599 dwarf_v22,
1600 dwarf_v23,
1601 dwarf_v24,
1602 dwarf_v25,
1603 dwarf_v26,
1604 dwarf_v27,
1605 dwarf_v28,
1606 dwarf_v29,
1607 dwarf_v30,
1608 dwarf_v31
1609
1610 // 96-127 reserved
1611};
1612
1613enum {
1614 debugserver_gpr_x0 = 0,
1615 debugserver_gpr_x1,
1616 debugserver_gpr_x2,
1617 debugserver_gpr_x3,
1618 debugserver_gpr_x4,
1619 debugserver_gpr_x5,
1620 debugserver_gpr_x6,
1621 debugserver_gpr_x7,
1622 debugserver_gpr_x8,
1623 debugserver_gpr_x9,
1624 debugserver_gpr_x10,
1625 debugserver_gpr_x11,
1626 debugserver_gpr_x12,
1627 debugserver_gpr_x13,
1628 debugserver_gpr_x14,
1629 debugserver_gpr_x15,
1630 debugserver_gpr_x16,
1631 debugserver_gpr_x17,
1632 debugserver_gpr_x18,
1633 debugserver_gpr_x19,
1634 debugserver_gpr_x20,
1635 debugserver_gpr_x21,
1636 debugserver_gpr_x22,
1637 debugserver_gpr_x23,
1638 debugserver_gpr_x24,
1639 debugserver_gpr_x25,
1640 debugserver_gpr_x26,
1641 debugserver_gpr_x27,
1642 debugserver_gpr_x28,
1643 debugserver_gpr_fp, // x29
1644 debugserver_gpr_lr, // x30
1645 debugserver_gpr_sp, // sp aka xsp
1646 debugserver_gpr_pc,
1647 debugserver_gpr_cpsr,
1648 debugserver_vfp_v0,
1649 debugserver_vfp_v1,
1650 debugserver_vfp_v2,
1651 debugserver_vfp_v3,
1652 debugserver_vfp_v4,
1653 debugserver_vfp_v5,
1654 debugserver_vfp_v6,
1655 debugserver_vfp_v7,
1656 debugserver_vfp_v8,
1657 debugserver_vfp_v9,
1658 debugserver_vfp_v10,
1659 debugserver_vfp_v11,
1660 debugserver_vfp_v12,
1661 debugserver_vfp_v13,
1662 debugserver_vfp_v14,
1663 debugserver_vfp_v15,
1664 debugserver_vfp_v16,
1665 debugserver_vfp_v17,
1666 debugserver_vfp_v18,
1667 debugserver_vfp_v19,
1668 debugserver_vfp_v20,
1669 debugserver_vfp_v21,
1670 debugserver_vfp_v22,
1671 debugserver_vfp_v23,
1672 debugserver_vfp_v24,
1673 debugserver_vfp_v25,
1674 debugserver_vfp_v26,
1675 debugserver_vfp_v27,
1676 debugserver_vfp_v28,
1677 debugserver_vfp_v29,
1678 debugserver_vfp_v30,
1679 debugserver_vfp_v31,
1680 debugserver_vfp_fpsr,
1681 debugserver_vfp_fpcr
1682};
1683
1684const char *g_contained_x0[]{"x0", NULL};
1685const char *g_contained_x1[]{"x1", NULL};
1686const char *g_contained_x2[]{"x2", NULL};
1687const char *g_contained_x3[]{"x3", NULL};
1688const char *g_contained_x4[]{"x4", NULL};
1689const char *g_contained_x5[]{"x5", NULL};
1690const char *g_contained_x6[]{"x6", NULL};
1691const char *g_contained_x7[]{"x7", NULL};
1692const char *g_contained_x8[]{"x8", NULL};
1693const char *g_contained_x9[]{"x9", NULL};
1694const char *g_contained_x10[]{"x10", NULL};
1695const char *g_contained_x11[]{"x11", NULL};
1696const char *g_contained_x12[]{"x12", NULL};
1697const char *g_contained_x13[]{"x13", NULL};
1698const char *g_contained_x14[]{"x14", NULL};
1699const char *g_contained_x15[]{"x15", NULL};
1700const char *g_contained_x16[]{"x16", NULL};
1701const char *g_contained_x17[]{"x17", NULL};
1702const char *g_contained_x18[]{"x18", NULL};
1703const char *g_contained_x19[]{"x19", NULL};
1704const char *g_contained_x20[]{"x20", NULL};
1705const char *g_contained_x21[]{"x21", NULL};
1706const char *g_contained_x22[]{"x22", NULL};
1707const char *g_contained_x23[]{"x23", NULL};
1708const char *g_contained_x24[]{"x24", NULL};
1709const char *g_contained_x25[]{"x25", NULL};
1710const char *g_contained_x26[]{"x26", NULL};
1711const char *g_contained_x27[]{"x27", NULL};
1712const char *g_contained_x28[]{"x28", NULL};
1713
1714const char *g_invalidate_x0[]{"x0", "w0", NULL};
1715const char *g_invalidate_x1[]{"x1", "w1", NULL};
1716const char *g_invalidate_x2[]{"x2", "w2", NULL};
1717const char *g_invalidate_x3[]{"x3", "w3", NULL};
1718const char *g_invalidate_x4[]{"x4", "w4", NULL};
1719const char *g_invalidate_x5[]{"x5", "w5", NULL};
1720const char *g_invalidate_x6[]{"x6", "w6", NULL};
1721const char *g_invalidate_x7[]{"x7", "w7", NULL};
1722const char *g_invalidate_x8[]{"x8", "w8", NULL};
1723const char *g_invalidate_x9[]{"x9", "w9", NULL};
1724const char *g_invalidate_x10[]{"x10", "w10", NULL};
1725const char *g_invalidate_x11[]{"x11", "w11", NULL};
1726const char *g_invalidate_x12[]{"x12", "w12", NULL};
1727const char *g_invalidate_x13[]{"x13", "w13", NULL};
1728const char *g_invalidate_x14[]{"x14", "w14", NULL};
1729const char *g_invalidate_x15[]{"x15", "w15", NULL};
1730const char *g_invalidate_x16[]{"x16", "w16", NULL};
1731const char *g_invalidate_x17[]{"x17", "w17", NULL};
1732const char *g_invalidate_x18[]{"x18", "w18", NULL};
1733const char *g_invalidate_x19[]{"x19", "w19", NULL};
1734const char *g_invalidate_x20[]{"x20", "w20", NULL};
1735const char *g_invalidate_x21[]{"x21", "w21", NULL};
1736const char *g_invalidate_x22[]{"x22", "w22", NULL};
1737const char *g_invalidate_x23[]{"x23", "w23", NULL};
1738const char *g_invalidate_x24[]{"x24", "w24", NULL};
1739const char *g_invalidate_x25[]{"x25", "w25", NULL};
1740const char *g_invalidate_x26[]{"x26", "w26", NULL};
1741const char *g_invalidate_x27[]{"x27", "w27", NULL};
1742const char *g_invalidate_x28[]{"x28", "w28", NULL};
1743
1744#define GPR_OFFSET_IDX(idx) (offsetof(DNBArchMachARM64::GPR, __x[idx]))
1745
1746#define GPR_OFFSET_NAME(reg) (offsetof(DNBArchMachARM64::GPR, __##reg))
1747
1748// These macros will auto define the register name, alt name, register size,
1749// register offset, encoding, format and native register. This ensures that
1750// the register state structures are defined correctly and have the correct
1751// sizes and offsets.
1752#define DEFINE_GPR_IDX(idx, reg, alt, gen) \
1753 { \
1754 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_IDX(idx), \
1755 dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, \
1756 g_invalidate_x##idx \
1757 }
1758#define DEFINE_GPR_NAME(reg, alt, gen) \
1759 { \
1760 e_regSetGPR, gpr_##reg, #reg, alt, Uint, Hex, 8, GPR_OFFSET_NAME(reg), \
1761 dwarf_##reg, dwarf_##reg, gen, debugserver_gpr_##reg, NULL, NULL \
1762 }
1763#define DEFINE_PSEUDO_GPR_IDX(idx, reg) \
1764 { \
1765 e_regSetGPR, gpr_##reg, #reg, NULL, Uint, Hex, 4, 0, INVALID_NUB_REGNUM, \
1766 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1767 g_contained_x##idx, g_invalidate_x##idx \
1768 }
1769
1770//_STRUCT_ARM_THREAD_STATE64
1771//{
1772// uint64_t x[29]; /* General purpose registers x0-x28 */
1773// uint64_t fp; /* Frame pointer x29 */
1774// uint64_t lr; /* Link register x30 */
1775// uint64_t sp; /* Stack pointer x31 */
1776// uint64_t pc; /* Program counter */
1777// uint32_t cpsr; /* Current program status register */
1778//};
1779
1780// General purpose registers
1781const DNBRegisterInfo DNBArchMachARM64::g_gpr_registers[] = {
1782 DEFINE_GPR_IDX(0, x0, "arg1", GENERIC_REGNUM_ARG1),
1783 DEFINE_GPR_IDX(1, x1, "arg2", GENERIC_REGNUM_ARG2),
1784 DEFINE_GPR_IDX(2, x2, "arg3", GENERIC_REGNUM_ARG3),
1785 DEFINE_GPR_IDX(3, x3, "arg4", GENERIC_REGNUM_ARG4),
1786 DEFINE_GPR_IDX(4, x4, "arg5", GENERIC_REGNUM_ARG5),
1787 DEFINE_GPR_IDX(5, x5, "arg6", GENERIC_REGNUM_ARG6),
1788 DEFINE_GPR_IDX(6, x6, "arg7", GENERIC_REGNUM_ARG7),
1789 DEFINE_GPR_IDX(7, x7, "arg8", GENERIC_REGNUM_ARG8),
1790 DEFINE_GPR_IDX(8, x8, NULL, INVALID_NUB_REGNUM),
1791 DEFINE_GPR_IDX(9, x9, NULL, INVALID_NUB_REGNUM),
1792 DEFINE_GPR_IDX(10, x10, NULL, INVALID_NUB_REGNUM),
1793 DEFINE_GPR_IDX(11, x11, NULL, INVALID_NUB_REGNUM),
1794 DEFINE_GPR_IDX(12, x12, NULL, INVALID_NUB_REGNUM),
1795 DEFINE_GPR_IDX(13, x13, NULL, INVALID_NUB_REGNUM),
1796 DEFINE_GPR_IDX(14, x14, NULL, INVALID_NUB_REGNUM),
1797 DEFINE_GPR_IDX(15, x15, NULL, INVALID_NUB_REGNUM),
1798 DEFINE_GPR_IDX(16, x16, NULL, INVALID_NUB_REGNUM),
1799 DEFINE_GPR_IDX(17, x17, NULL, INVALID_NUB_REGNUM),
1800 DEFINE_GPR_IDX(18, x18, NULL, INVALID_NUB_REGNUM),
1801 DEFINE_GPR_IDX(19, x19, NULL, INVALID_NUB_REGNUM),
1802 DEFINE_GPR_IDX(20, x20, NULL, INVALID_NUB_REGNUM),
1803 DEFINE_GPR_IDX(21, x21, NULL, INVALID_NUB_REGNUM),
1804 DEFINE_GPR_IDX(22, x22, NULL, INVALID_NUB_REGNUM),
1805 DEFINE_GPR_IDX(23, x23, NULL, INVALID_NUB_REGNUM),
1806 DEFINE_GPR_IDX(24, x24, NULL, INVALID_NUB_REGNUM),
1807 DEFINE_GPR_IDX(25, x25, NULL, INVALID_NUB_REGNUM),
1808 DEFINE_GPR_IDX(26, x26, NULL, INVALID_NUB_REGNUM),
1809 DEFINE_GPR_IDX(27, x27, NULL, INVALID_NUB_REGNUM),
1810 DEFINE_GPR_IDX(28, x28, NULL, INVALID_NUB_REGNUM),
1811 // For the G/g packet we want to show where the offset into the regctx
1812 // is for fp/lr/sp/pc, but we cannot directly access them on arm64e
1813 // devices (and therefore can't offsetof() them)) - add the offset based
1814 // on the last accessible register by hand for advertising the location
1815 // in the regctx to lldb. We'll go through the accessor functions when
1816 // we read/write them here.
1817 {
1818 e_regSetGPR, gpr_fp, "fp", "x29", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 8,
1819 dwarf_fp, dwarf_fp, GENERIC_REGNUM_FP, debugserver_gpr_fp, NULL, NULL
1820 },
1821 {
1822 e_regSetGPR, gpr_lr, "lr", "x30", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 16,
1823 dwarf_lr, dwarf_lr, GENERIC_REGNUM_RA, debugserver_gpr_lr, NULL, NULL
1824 },
1825 {
1826 e_regSetGPR, gpr_sp, "sp", "xsp", Uint, Hex, 8, GPR_OFFSET_IDX(28) + 24,
1827 dwarf_sp, dwarf_sp, GENERIC_REGNUM_SP, debugserver_gpr_sp, NULL, NULL
1828 },
1829 {
1830 e_regSetGPR, gpr_pc, "pc", NULL, Uint, Hex, 8, GPR_OFFSET_IDX(28) + 32,
1831 dwarf_pc, dwarf_pc, GENERIC_REGNUM_PC, debugserver_gpr_pc, NULL, NULL
1832 },
1833
1834 // in armv7 we specify that writing to the CPSR should invalidate r8-12, sp,
1835 // lr.
1836 // this should be specified for arm64 too even though debugserver is only
1837 // used for
1838 // userland debugging.
1839 {e_regSetGPR, gpr_cpsr, "cpsr", "flags", Uint, Hex, 4,
1840 GPR_OFFSET_NAME(cpsr), dwarf_elr_mode, dwarf_elr_mode, GENERIC_REGNUM_FLAGS,
1841 debugserver_gpr_cpsr, NULL, NULL},
1842
1843 DEFINE_PSEUDO_GPR_IDX(0, w0),
1844 DEFINE_PSEUDO_GPR_IDX(1, w1),
1845 DEFINE_PSEUDO_GPR_IDX(2, w2),
1846 DEFINE_PSEUDO_GPR_IDX(3, w3),
1847 DEFINE_PSEUDO_GPR_IDX(4, w4),
1848 DEFINE_PSEUDO_GPR_IDX(5, w5),
1849 DEFINE_PSEUDO_GPR_IDX(6, w6),
1850 DEFINE_PSEUDO_GPR_IDX(7, w7),
1851 DEFINE_PSEUDO_GPR_IDX(8, w8),
1852 DEFINE_PSEUDO_GPR_IDX(9, w9),
1853 DEFINE_PSEUDO_GPR_IDX(10, w10),
1854 DEFINE_PSEUDO_GPR_IDX(11, w11),
1855 DEFINE_PSEUDO_GPR_IDX(12, w12),
1856 DEFINE_PSEUDO_GPR_IDX(13, w13),
1857 DEFINE_PSEUDO_GPR_IDX(14, w14),
1858 DEFINE_PSEUDO_GPR_IDX(15, w15),
1859 DEFINE_PSEUDO_GPR_IDX(16, w16),
1860 DEFINE_PSEUDO_GPR_IDX(17, w17),
1861 DEFINE_PSEUDO_GPR_IDX(18, w18),
1862 DEFINE_PSEUDO_GPR_IDX(19, w19),
1863 DEFINE_PSEUDO_GPR_IDX(20, w20),
1864 DEFINE_PSEUDO_GPR_IDX(21, w21),
1865 DEFINE_PSEUDO_GPR_IDX(22, w22),
1866 DEFINE_PSEUDO_GPR_IDX(23, w23),
1867 DEFINE_PSEUDO_GPR_IDX(24, w24),
1868 DEFINE_PSEUDO_GPR_IDX(25, w25),
1869 DEFINE_PSEUDO_GPR_IDX(26, w26),
1870 DEFINE_PSEUDO_GPR_IDX(27, w27),
1871 DEFINE_PSEUDO_GPR_IDX(28, w28)};
1872
1873const char *g_contained_v0[]{"v0", NULL};
1874const char *g_contained_v1[]{"v1", NULL};
1875const char *g_contained_v2[]{"v2", NULL};
1876const char *g_contained_v3[]{"v3", NULL};
1877const char *g_contained_v4[]{"v4", NULL};
1878const char *g_contained_v5[]{"v5", NULL};
1879const char *g_contained_v6[]{"v6", NULL};
1880const char *g_contained_v7[]{"v7", NULL};
1881const char *g_contained_v8[]{"v8", NULL};
1882const char *g_contained_v9[]{"v9", NULL};
1883const char *g_contained_v10[]{"v10", NULL};
1884const char *g_contained_v11[]{"v11", NULL};
1885const char *g_contained_v12[]{"v12", NULL};
1886const char *g_contained_v13[]{"v13", NULL};
1887const char *g_contained_v14[]{"v14", NULL};
1888const char *g_contained_v15[]{"v15", NULL};
1889const char *g_contained_v16[]{"v16", NULL};
1890const char *g_contained_v17[]{"v17", NULL};
1891const char *g_contained_v18[]{"v18", NULL};
1892const char *g_contained_v19[]{"v19", NULL};
1893const char *g_contained_v20[]{"v20", NULL};
1894const char *g_contained_v21[]{"v21", NULL};
1895const char *g_contained_v22[]{"v22", NULL};
1896const char *g_contained_v23[]{"v23", NULL};
1897const char *g_contained_v24[]{"v24", NULL};
1898const char *g_contained_v25[]{"v25", NULL};
1899const char *g_contained_v26[]{"v26", NULL};
1900const char *g_contained_v27[]{"v27", NULL};
1901const char *g_contained_v28[]{"v28", NULL};
1902const char *g_contained_v29[]{"v29", NULL};
1903const char *g_contained_v30[]{"v30", NULL};
1904const char *g_contained_v31[]{"v31", NULL};
1905
1906const char *g_invalidate_v0[]{"v0", "d0", "s0", NULL};
1907const char *g_invalidate_v1[]{"v1", "d1", "s1", NULL};
1908const char *g_invalidate_v2[]{"v2", "d2", "s2", NULL};
1909const char *g_invalidate_v3[]{"v3", "d3", "s3", NULL};
1910const char *g_invalidate_v4[]{"v4", "d4", "s4", NULL};
1911const char *g_invalidate_v5[]{"v5", "d5", "s5", NULL};
1912const char *g_invalidate_v6[]{"v6", "d6", "s6", NULL};
1913const char *g_invalidate_v7[]{"v7", "d7", "s7", NULL};
1914const char *g_invalidate_v8[]{"v8", "d8", "s8", NULL};
1915const char *g_invalidate_v9[]{"v9", "d9", "s9", NULL};
1916const char *g_invalidate_v10[]{"v10", "d10", "s10", NULL};
1917const char *g_invalidate_v11[]{"v11", "d11", "s11", NULL};
1918const char *g_invalidate_v12[]{"v12", "d12", "s12", NULL};
1919const char *g_invalidate_v13[]{"v13", "d13", "s13", NULL};
1920const char *g_invalidate_v14[]{"v14", "d14", "s14", NULL};
1921const char *g_invalidate_v15[]{"v15", "d15", "s15", NULL};
1922const char *g_invalidate_v16[]{"v16", "d16", "s16", NULL};
1923const char *g_invalidate_v17[]{"v17", "d17", "s17", NULL};
1924const char *g_invalidate_v18[]{"v18", "d18", "s18", NULL};
1925const char *g_invalidate_v19[]{"v19", "d19", "s19", NULL};
1926const char *g_invalidate_v20[]{"v20", "d20", "s20", NULL};
1927const char *g_invalidate_v21[]{"v21", "d21", "s21", NULL};
1928const char *g_invalidate_v22[]{"v22", "d22", "s22", NULL};
1929const char *g_invalidate_v23[]{"v23", "d23", "s23", NULL};
1930const char *g_invalidate_v24[]{"v24", "d24", "s24", NULL};
1931const char *g_invalidate_v25[]{"v25", "d25", "s25", NULL};
1932const char *g_invalidate_v26[]{"v26", "d26", "s26", NULL};
1933const char *g_invalidate_v27[]{"v27", "d27", "s27", NULL};
1934const char *g_invalidate_v28[]{"v28", "d28", "s28", NULL};
1935const char *g_invalidate_v29[]{"v29", "d29", "s29", NULL};
1936const char *g_invalidate_v30[]{"v30", "d30", "s30", NULL};
1937const char *g_invalidate_v31[]{"v31", "d31", "s31", NULL};
1938
1939#if defined(__arm64__) || defined(__aarch64__)
1940#define VFP_V_OFFSET_IDX(idx) \
1941 (offsetof(DNBArchMachARM64::FPU, __v) + (idx * 16) + \
1942 offsetof(DNBArchMachARM64::Context, vfp))
1943#else
1944#define VFP_V_OFFSET_IDX(idx) \
1945 (offsetof(DNBArchMachARM64::FPU, opaque) + (idx * 16) + \
1946 offsetof(DNBArchMachARM64::Context, vfp))
1947#endif
1948#define VFP_OFFSET_NAME(reg) \
1949 (offsetof(DNBArchMachARM64::FPU, reg) + \
1950 offsetof(DNBArchMachARM64::Context, vfp))
1951#define EXC_OFFSET(reg) \
1952 (offsetof(DNBArchMachARM64::EXC, reg) + \
1953 offsetof(DNBArchMachARM64::Context, exc))
1954
1955//#define FLOAT_FORMAT Float
1956#define DEFINE_VFP_V_IDX(idx) \
1957 { \
1958 e_regSetVFP, vfp_v##idx, "v" #idx, "q" #idx, Vector, VectorOfUInt8, 16, \
1959 VFP_V_OFFSET_IDX(idx), INVALID_NUB_REGNUM, dwarf_v##idx, \
1960 INVALID_NUB_REGNUM, debugserver_vfp_v##idx, NULL, g_invalidate_v##idx \
1961 }
1962#define DEFINE_PSEUDO_VFP_S_IDX(idx) \
1963 { \
1964 e_regSetVFP, vfp_s##idx, "s" #idx, NULL, IEEE754, Float, 4, 0, \
1965 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1966 INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx \
1967 }
1968#define DEFINE_PSEUDO_VFP_D_IDX(idx) \
1969 { \
1970 e_regSetVFP, vfp_d##idx, "d" #idx, NULL, IEEE754, Float, 8, 0, \
1971 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, \
1972 INVALID_NUB_REGNUM, g_contained_v##idx, g_invalidate_v##idx \
1973 }
1974
1975// Floating point registers
1976const DNBRegisterInfo DNBArchMachARM64::g_vfp_registers[] = {
1977 DEFINE_VFP_V_IDX(0),
1978 DEFINE_VFP_V_IDX(1),
1979 DEFINE_VFP_V_IDX(2),
1980 DEFINE_VFP_V_IDX(3),
1981 DEFINE_VFP_V_IDX(4),
1982 DEFINE_VFP_V_IDX(5),
1983 DEFINE_VFP_V_IDX(6),
1984 DEFINE_VFP_V_IDX(7),
1985 DEFINE_VFP_V_IDX(8),
1986 DEFINE_VFP_V_IDX(9),
1987 DEFINE_VFP_V_IDX(10),
1988 DEFINE_VFP_V_IDX(11),
1989 DEFINE_VFP_V_IDX(12),
1990 DEFINE_VFP_V_IDX(13),
1991 DEFINE_VFP_V_IDX(14),
1992 DEFINE_VFP_V_IDX(15),
1993 DEFINE_VFP_V_IDX(16),
1994 DEFINE_VFP_V_IDX(17),
1995 DEFINE_VFP_V_IDX(18),
1996 DEFINE_VFP_V_IDX(19),
1997 DEFINE_VFP_V_IDX(20),
1998 DEFINE_VFP_V_IDX(21),
1999 DEFINE_VFP_V_IDX(22),
2000 DEFINE_VFP_V_IDX(23),
2001 DEFINE_VFP_V_IDX(24),
2002 DEFINE_VFP_V_IDX(25),
2003 DEFINE_VFP_V_IDX(26),
2004 DEFINE_VFP_V_IDX(27),
2005 DEFINE_VFP_V_IDX(28),
2006 DEFINE_VFP_V_IDX(29),
2007 DEFINE_VFP_V_IDX(30),
2008 DEFINE_VFP_V_IDX(31),
2009 {e_regSetVFP, vfp_fpsr, "fpsr", NULL, Uint, Hex, 4,
2010 VFP_V_OFFSET_IDX(32) + 0, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2011 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL},
2012 {e_regSetVFP, vfp_fpcr, "fpcr", NULL, Uint, Hex, 4,
2013 VFP_V_OFFSET_IDX(32) + 4, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2014 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL},
2015
2016 DEFINE_PSEUDO_VFP_S_IDX(0),
2017 DEFINE_PSEUDO_VFP_S_IDX(1),
2018 DEFINE_PSEUDO_VFP_S_IDX(2),
2019 DEFINE_PSEUDO_VFP_S_IDX(3),
2020 DEFINE_PSEUDO_VFP_S_IDX(4),
2021 DEFINE_PSEUDO_VFP_S_IDX(5),
2022 DEFINE_PSEUDO_VFP_S_IDX(6),
2023 DEFINE_PSEUDO_VFP_S_IDX(7),
2024 DEFINE_PSEUDO_VFP_S_IDX(8),
2025 DEFINE_PSEUDO_VFP_S_IDX(9),
2026 DEFINE_PSEUDO_VFP_S_IDX(10),
2027 DEFINE_PSEUDO_VFP_S_IDX(11),
2028 DEFINE_PSEUDO_VFP_S_IDX(12),
2029 DEFINE_PSEUDO_VFP_S_IDX(13),
2030 DEFINE_PSEUDO_VFP_S_IDX(14),
2031 DEFINE_PSEUDO_VFP_S_IDX(15),
2032 DEFINE_PSEUDO_VFP_S_IDX(16),
2033 DEFINE_PSEUDO_VFP_S_IDX(17),
2034 DEFINE_PSEUDO_VFP_S_IDX(18),
2035 DEFINE_PSEUDO_VFP_S_IDX(19),
2036 DEFINE_PSEUDO_VFP_S_IDX(20),
2037 DEFINE_PSEUDO_VFP_S_IDX(21),
2038 DEFINE_PSEUDO_VFP_S_IDX(22),
2039 DEFINE_PSEUDO_VFP_S_IDX(23),
2040 DEFINE_PSEUDO_VFP_S_IDX(24),
2041 DEFINE_PSEUDO_VFP_S_IDX(25),
2042 DEFINE_PSEUDO_VFP_S_IDX(26),
2043 DEFINE_PSEUDO_VFP_S_IDX(27),
2044 DEFINE_PSEUDO_VFP_S_IDX(28),
2045 DEFINE_PSEUDO_VFP_S_IDX(29),
2046 DEFINE_PSEUDO_VFP_S_IDX(30),
2047 DEFINE_PSEUDO_VFP_S_IDX(31),
2048
2049 DEFINE_PSEUDO_VFP_D_IDX(0),
2050 DEFINE_PSEUDO_VFP_D_IDX(1),
2051 DEFINE_PSEUDO_VFP_D_IDX(2),
2052 DEFINE_PSEUDO_VFP_D_IDX(3),
2053 DEFINE_PSEUDO_VFP_D_IDX(4),
2054 DEFINE_PSEUDO_VFP_D_IDX(5),
2055 DEFINE_PSEUDO_VFP_D_IDX(6),
2056 DEFINE_PSEUDO_VFP_D_IDX(7),
2057 DEFINE_PSEUDO_VFP_D_IDX(8),
2058 DEFINE_PSEUDO_VFP_D_IDX(9),
2059 DEFINE_PSEUDO_VFP_D_IDX(10),
2060 DEFINE_PSEUDO_VFP_D_IDX(11),
2061 DEFINE_PSEUDO_VFP_D_IDX(12),
2062 DEFINE_PSEUDO_VFP_D_IDX(13),
2063 DEFINE_PSEUDO_VFP_D_IDX(14),
2064 DEFINE_PSEUDO_VFP_D_IDX(15),
2065 DEFINE_PSEUDO_VFP_D_IDX(16),
2066 DEFINE_PSEUDO_VFP_D_IDX(17),
2067 DEFINE_PSEUDO_VFP_D_IDX(18),
2068 DEFINE_PSEUDO_VFP_D_IDX(19),
2069 DEFINE_PSEUDO_VFP_D_IDX(20),
2070 DEFINE_PSEUDO_VFP_D_IDX(21),
2071 DEFINE_PSEUDO_VFP_D_IDX(22),
2072 DEFINE_PSEUDO_VFP_D_IDX(23),
2073 DEFINE_PSEUDO_VFP_D_IDX(24),
2074 DEFINE_PSEUDO_VFP_D_IDX(25),
2075 DEFINE_PSEUDO_VFP_D_IDX(26),
2076 DEFINE_PSEUDO_VFP_D_IDX(27),
2077 DEFINE_PSEUDO_VFP_D_IDX(28),
2078 DEFINE_PSEUDO_VFP_D_IDX(29),
2079 DEFINE_PSEUDO_VFP_D_IDX(30),
2080 DEFINE_PSEUDO_VFP_D_IDX(31)
2081
2082};
2083
2084//_STRUCT_ARM_EXCEPTION_STATE64
2085//{
2086// uint64_t far; /* Virtual Fault Address */
2087// uint32_t esr; /* Exception syndrome */
2088// uint32_t exception; /* number of arm exception taken */
2089//};
2090
2091// Exception registers
2092const DNBRegisterInfo DNBArchMachARM64::g_exc_registers[] = {
2093 {e_regSetEXC, exc_far, "far", NULL, Uint, Hex, 8, EXC_OFFSET(__far),
2094 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2095 INVALID_NUB_REGNUM, NULL, NULL},
2096 {e_regSetEXC, exc_esr, "esr", NULL, Uint, Hex, 4, EXC_OFFSET(__esr),
2097 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2098 INVALID_NUB_REGNUM, NULL, NULL},
2099 {e_regSetEXC, exc_exception, "exception", NULL, Uint, Hex, 4,
2100 EXC_OFFSET(__exception), INVALID_NUB_REGNUM, INVALID_NUB_REGNUM,
2101 INVALID_NUB_REGNUM, INVALID_NUB_REGNUM, NULL, NULL}};
2102
2103// Number of registers in each register set
2104const size_t DNBArchMachARM64::k_num_gpr_registers =
2105 sizeof(g_gpr_registers) / sizeof(DNBRegisterInfo);
2106const size_t DNBArchMachARM64::k_num_vfp_registers =
2107 sizeof(g_vfp_registers) / sizeof(DNBRegisterInfo);
2108const size_t DNBArchMachARM64::k_num_exc_registers =
2109 sizeof(g_exc_registers) / sizeof(DNBRegisterInfo);
2110const size_t DNBArchMachARM64::k_num_all_registers =
2111 k_num_gpr_registers + k_num_vfp_registers + k_num_exc_registers;
2112
2113// Register set definitions. The first definitions at register set index
2114// of zero is for all registers, followed by other registers sets. The
2115// register information for the all register set need not be filled in.
2116const DNBRegisterSetInfo DNBArchMachARM64::g_reg_sets[] = {
2117 {"ARM64 Registers", NULL, k_num_all_registers},
2118 {"General Purpose Registers", g_gpr_registers, k_num_gpr_registers},
2119 {"Floating Point Registers", g_vfp_registers, k_num_vfp_registers},
2120 {"Exception State Registers", g_exc_registers, k_num_exc_registers}};
2121// Total number of register sets for this architecture
2122const size_t DNBArchMachARM64::k_num_register_sets =
2123 sizeof(g_reg_sets) / sizeof(DNBRegisterSetInfo);
2124
2125const DNBRegisterSetInfo *
2126DNBArchMachARM64::GetRegisterSetInfo(nub_size_t *num_reg_sets) {
2127 *num_reg_sets = k_num_register_sets;
2128 return g_reg_sets;
2129}
2130
2131bool DNBArchMachARM64::FixGenericRegisterNumber(uint32_t &set, uint32_t &reg) {
2132 if (set == REGISTER_SET_GENERIC) {
2133 switch (reg) {
2134 case GENERIC_REGNUM_PC: // Program Counter
2135 set = e_regSetGPR;
2136 reg = gpr_pc;
2137 break;
2138
2139 case GENERIC_REGNUM_SP: // Stack Pointer
2140 set = e_regSetGPR;
2141 reg = gpr_sp;
2142 break;
2143
2144 case GENERIC_REGNUM_FP: // Frame Pointer
2145 set = e_regSetGPR;
2146 reg = gpr_fp;
2147 break;
2148
2149 case GENERIC_REGNUM_RA: // Return Address
2150 set = e_regSetGPR;
2151 reg = gpr_lr;
2152 break;
2153
2154 case GENERIC_REGNUM_FLAGS: // Processor flags register
2155 set = e_regSetGPR;
2156 reg = gpr_cpsr;
2157 break;
2158
2159 case GENERIC_REGNUM_ARG1:
2160 case GENERIC_REGNUM_ARG2:
2161 case GENERIC_REGNUM_ARG3:
2162 case GENERIC_REGNUM_ARG4:
2163 case GENERIC_REGNUM_ARG5:
2164 case GENERIC_REGNUM_ARG6:
2165 set = e_regSetGPR;
2166 reg = gpr_x0 + reg - GENERIC_REGNUM_ARG1;
2167 break;
2168
2169 default:
2170 return false;
2171 }
2172 }
2173 return true;
2174}
2175bool DNBArchMachARM64::GetRegisterValue(uint32_t set, uint32_t reg,
2176 DNBRegisterValue *value) {
2177 if (!FixGenericRegisterNumber(set, reg))
2178 return false;
2179
2180 if (GetRegisterState(set, false) != KERN_SUCCESS)
2181 return false;
2182
2183 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
2184 if (regInfo) {
2185 value->info = *regInfo;
2186 switch (set) {
2187 case e_regSetGPR:
2188 if (reg <= gpr_pc) {
2189 switch (reg) {
2190#if __has_feature(ptrauth_calls) && defined(__LP64__)
2191 case gpr_pc:
2192 value->value.uint64 = clear_pac_bits(
2193 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_pc));
2194 break;
2195 case gpr_lr:
2196 value->value.uint64 = arm_thread_state64_get_lr(m_state.context.gpr);
2197 break;
2198 case gpr_sp:
2199 value->value.uint64 = clear_pac_bits(
2200 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_sp));
2201 break;
2202 case gpr_fp:
2203 value->value.uint64 = clear_pac_bits(
2204 reinterpret_cast<uint64_t>(m_state.context.gpr.__opaque_fp));
2205 break;
2206#else
2207 case gpr_pc:
2208 value->value.uint64 = clear_pac_bits(m_state.context.gpr.__pc);
2209 break;
2210 case gpr_lr:
2211 value->value.uint64 = clear_pac_bits(m_state.context.gpr.__lr);
2212 break;
2213 case gpr_sp:
2214 value->value.uint64 = clear_pac_bits(m_state.context.gpr.__sp);
2215 break;
2216 case gpr_fp:
2217 value->value.uint64 = clear_pac_bits(m_state.context.gpr.__fp);
2218 break;
2219#endif
2220 default:
2221 value->value.uint64 = m_state.context.gpr.__x[reg];
2222 }
2223 return true;
2224 } else if (reg == gpr_cpsr) {
2225 value->value.uint32 = m_state.context.gpr.__cpsr;
2226 return true;
2227 }
2228 break;
2229
2230 case e_regSetVFP:
2231
2232 if (reg >= vfp_v0 && reg <= vfp_v31) {
2233#if defined(__arm64__) || defined(__aarch64__)
2234 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_v0],
2235 16);
2236#else
2237 memcpy(&value->value.v_uint8,
2238 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16),
2239 16);
2240#endif
2241 return true;
2242 } else if (reg == vfp_fpsr) {
2243#if defined(__arm64__) || defined(__aarch64__)
2244 memcpy(&value->value.uint32, &m_state.context.vfp.__fpsr, 4);
2245#else
2246 memcpy(&value->value.uint32,
2247 ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0, 4);
2248#endif
2249 return true;
2250 } else if (reg == vfp_fpcr) {
2251#if defined(__arm64__) || defined(__aarch64__)
2252 memcpy(&value->value.uint32, &m_state.context.vfp.__fpcr, 4);
2253#else
2254 memcpy(&value->value.uint32,
2255 ((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 4, 4);
2256#endif
2257 return true;
2258 } else if (reg >= vfp_s0 && reg <= vfp_s31) {
2259#if defined(__arm64__) || defined(__aarch64__)
2260 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_s0],
2261 4);
2262#else
2263 memcpy(&value->value.v_uint8,
2264 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16),
2265 4);
2266#endif
2267 return true;
2268 } else if (reg >= vfp_d0 && reg <= vfp_d31) {
2269#if defined(__arm64__) || defined(__aarch64__)
2270 memcpy(&value->value.v_uint8, &m_state.context.vfp.__v[reg - vfp_d0],
2271 8);
2272#else
2273 memcpy(&value->value.v_uint8,
2274 ((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16),
2275 8);
2276#endif
2277 return true;
2278 }
2279 break;
2280
2281 case e_regSetEXC:
2282 if (reg == exc_far) {
2283 value->value.uint64 = m_state.context.exc.__far;
2284 return true;
2285 } else if (reg == exc_esr) {
2286 value->value.uint32 = m_state.context.exc.__esr;
2287 return true;
2288 } else if (reg == exc_exception) {
2289 value->value.uint32 = m_state.context.exc.__exception;
2290 return true;
2291 }
2292 break;
2293 }
2294 }
2295 return false;
2296}
2297
2298bool DNBArchMachARM64::SetRegisterValue(uint32_t set, uint32_t reg,
2299 const DNBRegisterValue *value) {
2300 if (!FixGenericRegisterNumber(set, reg))
2301 return false;
2302
2303 if (GetRegisterState(set, false) != KERN_SUCCESS)
2304 return false;
2305
2306 bool success = false;
2307 const DNBRegisterInfo *regInfo = m_thread->GetRegisterInfo(set, reg);
2308 if (regInfo) {
2309 switch (set) {
2310 case e_regSetGPR:
2311 if (reg <= gpr_pc) {
2312#if defined(__LP64__)
2313 uint64_t signed_value = value->value.uint64;
2314#if __has_feature(ptrauth_calls)
2315 // The incoming value could be garbage. Strip it to avoid
2316 // trapping when it gets resigned in the thread state.
2317 signed_value = (uint64_t) ptrauth_strip((void*) signed_value, ptrauth_key_function_pointer);
2318 signed_value = (uint64_t) ptrauth_sign_unauthenticated((void*) signed_value, ptrauth_key_function_pointer, 0);
2319#endif
2320 if (reg == gpr_pc)
2321 arm_thread_state64_set_pc_fptr (m_state.context.gpr, (void*) signed_value);
2322 else if (reg == gpr_lr)
2323 arm_thread_state64_set_lr_fptr (m_state.context.gpr, (void*) signed_value);
2324 else if (reg == gpr_sp)
2325 arm_thread_state64_set_sp (m_state.context.gpr, value->value.uint64);
2326 else if (reg == gpr_fp)
2327 arm_thread_state64_set_fp (m_state.context.gpr, value->value.uint64);
2328 else
2329 m_state.context.gpr.__x[reg] = value->value.uint64;
2330#else
2331 m_state.context.gpr.__x[reg] = value->value.uint64;
2332#endif
2333 success = true;
2334 } else if (reg == gpr_cpsr) {
2335 m_state.context.gpr.__cpsr = value->value.uint32;
2336 success = true;
2337 }
2338 break;
2339
2340 case e_regSetVFP:
2341 if (reg >= vfp_v0 && reg <= vfp_v31) {
2342#if defined(__arm64__) || defined(__aarch64__)
2343 memcpy(&m_state.context.vfp.__v[reg - vfp_v0], &value->value.v_uint8,
2344 16);
2345#else
2346 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_v0) * 16),
2347 &value->value.v_uint8, 16);
2348#endif
2349 success = true;
2350 } else if (reg == vfp_fpsr) {
2351#if defined(__arm64__) || defined(__aarch64__)
2352 memcpy(&m_state.context.vfp.__fpsr, &value->value.uint32, 4);
2353#else
2354 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + (32 * 16) + 0,
2355 &value->value.uint32, 4);
2356#endif
2357 success = true;
2358 } else if (reg == vfp_fpcr) {
2359#if defined(__arm64__) || defined(__aarch64__)
2360 memcpy(&m_state.context.vfp.__fpcr, &value->value.uint32, 4);
2361#else
2362 memcpy(((uint8_t *)m_state.context.vfp.opaque) + (32 * 16) + 4,
2363 &value->value.uint32, 4);
2364#endif
2365 success = true;
2366 } else if (reg >= vfp_s0 && reg <= vfp_s31) {
2367#if defined(__arm64__) || defined(__aarch64__)
2368 memcpy(&m_state.context.vfp.__v[reg - vfp_s0], &value->value.v_uint8,
2369 4);
2370#else
2371 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_s0) * 16),
2372 &value->value.v_uint8, 4);
2373#endif
2374 success = true;
2375 } else if (reg >= vfp_d0 && reg <= vfp_d31) {
2376#if defined(__arm64__) || defined(__aarch64__)
2377 memcpy(&m_state.context.vfp.__v[reg - vfp_d0], &value->value.v_uint8,
2378 8);
2379#else
2380 memcpy(((uint8_t *)&m_state.context.vfp.opaque) + ((reg - vfp_d0) * 16),
2381 &value->value.v_uint8, 8);
2382#endif
2383 success = true;
2384 }
2385 break;
2386
2387 case e_regSetEXC:
2388 if (reg == exc_far) {
2389 m_state.context.exc.__far = value->value.uint64;
2390 success = true;
2391 } else if (reg == exc_esr) {
2392 m_state.context.exc.__esr = value->value.uint32;
2393 success = true;
2394 } else if (reg == exc_exception) {
2395 m_state.context.exc.__exception = value->value.uint32;
2396 success = true;
2397 }
2398 break;
2399 }
2400 }
2401 if (success)
2402 return SetRegisterState(set) == KERN_SUCCESS;
2403 return false;
2404}
2405
2406kern_return_t DNBArchMachARM64::GetRegisterState(int set, bool force) {
2407 switch (set) {
2408 case e_regSetALL:
2409 return GetGPRState(force) | GetVFPState(force) | GetEXCState(force) |
2410 GetDBGState(force);
2411 case e_regSetGPR:
2412 return GetGPRState(force);
2413 case e_regSetVFP:
2414 return GetVFPState(force);
2415 case e_regSetEXC:
2416 return GetEXCState(force);
2417 case e_regSetDBG:
2418 return GetDBGState(force);
2419 default:
2420 break;
2421 }
2422 return KERN_INVALID_ARGUMENT;
2423}
2424
2425kern_return_t DNBArchMachARM64::SetRegisterState(int set) {
2426 // Make sure we have a valid context to set.
2427 kern_return_t err = GetRegisterState(set, false);
2428 if (err != KERN_SUCCESS)
2429 return err;
2430
2431 switch (set) {
2432 case e_regSetALL:
2433 return SetGPRState() | SetVFPState() | SetEXCState() | SetDBGState(false);
2434 case e_regSetGPR:
2435 return SetGPRState();
2436 case e_regSetVFP:
2437 return SetVFPState();
2438 case e_regSetEXC:
2439 return SetEXCState();
2440 case e_regSetDBG:
2441 return SetDBGState(false);
2442 default:
2443 break;
2444 }
2445 return KERN_INVALID_ARGUMENT;
2446}
2447
2448bool DNBArchMachARM64::RegisterSetStateIsValid(int set) const {
2449 return m_state.RegsAreValid(set);
2450}
2451
2452nub_size_t DNBArchMachARM64::GetRegisterContext(void *buf, nub_size_t buf_len) {
2453 nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) +
2454 sizeof(m_state.context.exc);
2455
2456 if (buf && buf_len) {
2457 if (size > buf_len)
2458 size = buf_len;
2459
2460 bool force = false;
2461 if (GetGPRState(force) | GetVFPState(force) | GetEXCState(force))
2462 return 0;
2463
2464 // Copy each struct individually to avoid any padding that might be between
2465 // the structs in m_state.context
2466 uint8_t *p = (uint8_t *)buf;
2467 ::memcpy(p, &m_state.context.gpr, sizeof(m_state.context.gpr));
2468 p += sizeof(m_state.context.gpr);
2469 ::memcpy(p, &m_state.context.vfp, sizeof(m_state.context.vfp));
2470 p += sizeof(m_state.context.vfp);
2471 ::memcpy(p, &m_state.context.exc, sizeof(m_state.context.exc));
2472 p += sizeof(m_state.context.exc);
2473
2474 size_t bytes_written = p - (uint8_t *)buf;
2475 UNUSED_IF_ASSERT_DISABLED(bytes_written);
2476 assert(bytes_written == size);
2477 }
2478 DNBLogThreadedIf(
2479 LOG_THREAD,
2480 "DNBArchMachARM64::GetRegisterContext (buf = %p, len = %zu) => %zu", buf,
2481 buf_len, size);
2482 // Return the size of the register context even if NULL was passed in
2483 return size;
2484}
2485
2486nub_size_t DNBArchMachARM64::SetRegisterContext(const void *buf,
2487 nub_size_t buf_len) {
2488 nub_size_t size = sizeof(m_state.context.gpr) + sizeof(m_state.context.vfp) +
2489 sizeof(m_state.context.exc);
2490
2491 if (buf == NULL || buf_len == 0)
2492 size = 0;
2493
2494 if (size) {
2495 if (size > buf_len)
2496 size = buf_len;
2497
2498 // Copy each struct individually to avoid any padding that might be between
2499 // the structs in m_state.context
2500 uint8_t *p = const_cast<uint8_t*>(reinterpret_cast<const uint8_t *>(buf));
2501 ::memcpy(&m_state.context.gpr, p, sizeof(m_state.context.gpr));
2502 p += sizeof(m_state.context.gpr);
2503 ::memcpy(&m_state.context.vfp, p, sizeof(m_state.context.vfp));
2504 p += sizeof(m_state.context.vfp);
2505 ::memcpy(&m_state.context.exc, p, sizeof(m_state.context.exc));
2506 p += sizeof(m_state.context.exc);
2507
2508 size_t bytes_written = p - reinterpret_cast<const uint8_t *>(buf);
2509 UNUSED_IF_ASSERT_DISABLED(bytes_written);
2510 assert(bytes_written == size);
2511 SetGPRState();
2512 SetVFPState();
2513 SetEXCState();
2514 }
2515 DNBLogThreadedIf(
2516 LOG_THREAD,
2517 "DNBArchMachARM64::SetRegisterContext (buf = %p, len = %zu) => %zu", buf,
2518 buf_len, size);
2519 return size;
2520}
2521
2522uint32_t DNBArchMachARM64::SaveRegisterState() {
2523 kern_return_t kret = ::thread_abort_safely(m_thread->MachPortNumber());
2524 DNBLogThreadedIf(
2525 LOG_THREAD, "thread = 0x%4.4x calling thread_abort_safely (tid) => %u "
2526 "(SetGPRState() for stop_count = %u)",
2527 m_thread->MachPortNumber(), kret, m_thread->Process()->StopCount());
2528
2529 // Always re-read the registers because above we call thread_abort_safely();
2530 bool force = true;
2531
2532 if ((kret = GetGPRState(force)) != KERN_SUCCESS) {
2533 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () "
2534 "error: GPR regs failed to read: %u ",
2535 kret);
2536 } else if ((kret = GetVFPState(force)) != KERN_SUCCESS) {
2537 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::SaveRegisterState () "
2538 "error: %s regs failed to read: %u",
2539 "VFP", kret);
2540 } else {
2541 const uint32_t save_id = GetNextRegisterStateSaveID();
2542 m_saved_register_states[save_id] = m_state.context;
2543 return save_id;
2544 }
2545 return UINT32_MAX;
2546}
2547
2548bool DNBArchMachARM64::RestoreRegisterState(uint32_t save_id) {
2549 SaveRegisterStates::iterator pos = m_saved_register_states.find(save_id);
2550 if (pos != m_saved_register_states.end()) {
2551 m_state.context.gpr = pos->second.gpr;
2552 m_state.context.vfp = pos->second.vfp;
2553 kern_return_t kret;
2554 bool success = true;
2555 if ((kret = SetGPRState()) != KERN_SUCCESS) {
2556 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState "
2557 "(save_id = %u) error: GPR regs failed to "
2558 "write: %u",
2559 save_id, kret);
2560 success = false;
2561 } else if ((kret = SetVFPState()) != KERN_SUCCESS) {
2562 DNBLogThreadedIf(LOG_THREAD, "DNBArchMachARM64::RestoreRegisterState "
2563 "(save_id = %u) error: %s regs failed to "
2564 "write: %u",
2565 save_id, "VFP", kret);
2566 success = false;
2567 }
2568 m_saved_register_states.erase(pos);
2569 return success;
2570 }
2571 return false;
2572}
2573
2574#endif // #if defined (ARM_THREAD_STATE64_COUNT)
2575#endif // #if defined (__arm__) || defined (__arm64__) || defined (__aarch64__)
2576

source code of lldb/tools/debugserver/source/MacOSX/arm64/DNBArchImplARM64.cpp