| 1 | //===-- ABISysV_x86_64.cpp ------------------------------------------------===// |
| 2 | // |
| 3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
| 4 | // See https://llvm.org/LICENSE.txt for license information. |
| 5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
| 6 | // |
| 7 | //===----------------------------------------------------------------------===// |
| 8 | |
| 9 | #include "ABISysV_x86_64.h" |
| 10 | |
| 11 | #include "llvm/ADT/STLExtras.h" |
| 12 | #include "llvm/ADT/StringSwitch.h" |
| 13 | #include "llvm/TargetParser/Triple.h" |
| 14 | |
| 15 | #include "lldb/Core/Module.h" |
| 16 | #include "lldb/Core/PluginManager.h" |
| 17 | #include "lldb/Core/Value.h" |
| 18 | #include "lldb/Symbol/UnwindPlan.h" |
| 19 | #include "lldb/Target/Process.h" |
| 20 | #include "lldb/Target/RegisterContext.h" |
| 21 | #include "lldb/Target/StackFrame.h" |
| 22 | #include "lldb/Target/Target.h" |
| 23 | #include "lldb/Target/Thread.h" |
| 24 | #include "lldb/Utility/ConstString.h" |
| 25 | #include "lldb/Utility/DataExtractor.h" |
| 26 | #include "lldb/Utility/LLDBLog.h" |
| 27 | #include "lldb/Utility/Log.h" |
| 28 | #include "lldb/Utility/RegisterValue.h" |
| 29 | #include "lldb/Utility/Status.h" |
| 30 | #include "lldb/ValueObject/ValueObjectConstResult.h" |
| 31 | #include "lldb/ValueObject/ValueObjectMemory.h" |
| 32 | #include "lldb/ValueObject/ValueObjectRegister.h" |
| 33 | |
| 34 | #include <optional> |
| 35 | #include <vector> |
| 36 | |
| 37 | using namespace lldb; |
| 38 | using namespace lldb_private; |
| 39 | |
| 40 | LLDB_PLUGIN_DEFINE(ABISysV_x86_64) |
| 41 | |
| 42 | enum dwarf_regnums { |
| 43 | dwarf_rax = 0, |
| 44 | dwarf_rdx, |
| 45 | dwarf_rcx, |
| 46 | dwarf_rbx, |
| 47 | dwarf_rsi, |
| 48 | dwarf_rdi, |
| 49 | dwarf_rbp, |
| 50 | dwarf_rsp, |
| 51 | dwarf_r8, |
| 52 | dwarf_r9, |
| 53 | dwarf_r10, |
| 54 | dwarf_r11, |
| 55 | dwarf_r12, |
| 56 | dwarf_r13, |
| 57 | dwarf_r14, |
| 58 | dwarf_r15, |
| 59 | dwarf_rip, |
| 60 | }; |
| 61 | |
| 62 | bool ABISysV_x86_64::GetPointerReturnRegister(const char *&name) { |
| 63 | name = "rax" ; |
| 64 | return true; |
| 65 | } |
| 66 | |
| 67 | size_t ABISysV_x86_64::GetRedZoneSize() const { return 128; } |
| 68 | |
| 69 | // Static Functions |
| 70 | |
| 71 | ABISP |
| 72 | ABISysV_x86_64::CreateInstance(lldb::ProcessSP process_sp, const ArchSpec &arch) { |
| 73 | const llvm::Triple::ArchType arch_type = arch.GetTriple().getArch(); |
| 74 | const llvm::Triple::OSType os_type = arch.GetTriple().getOS(); |
| 75 | const llvm::Triple::EnvironmentType os_env = |
| 76 | arch.GetTriple().getEnvironment(); |
| 77 | if (arch_type == llvm::Triple::x86_64) { |
| 78 | switch(os_type) { |
| 79 | case llvm::Triple::OSType::IOS: |
| 80 | case llvm::Triple::OSType::TvOS: |
| 81 | case llvm::Triple::OSType::WatchOS: |
| 82 | case llvm::Triple::OSType::XROS: |
| 83 | switch (os_env) { |
| 84 | case llvm::Triple::EnvironmentType::MacABI: |
| 85 | case llvm::Triple::EnvironmentType::Simulator: |
| 86 | case llvm::Triple::EnvironmentType::UnknownEnvironment: |
| 87 | // UnknownEnvironment is needed for older compilers that don't |
| 88 | // support the simulator environment. |
| 89 | return ABISP(new ABISysV_x86_64(std::move(process_sp), |
| 90 | MakeMCRegisterInfo(arch))); |
| 91 | default: |
| 92 | return ABISP(); |
| 93 | } |
| 94 | case llvm::Triple::OSType::Darwin: |
| 95 | case llvm::Triple::OSType::FreeBSD: |
| 96 | case llvm::Triple::OSType::Linux: |
| 97 | case llvm::Triple::OSType::MacOSX: |
| 98 | case llvm::Triple::OSType::NetBSD: |
| 99 | case llvm::Triple::OSType::OpenBSD: |
| 100 | case llvm::Triple::OSType::Solaris: |
| 101 | case llvm::Triple::OSType::UnknownOS: |
| 102 | return ABISP( |
| 103 | new ABISysV_x86_64(std::move(process_sp), MakeMCRegisterInfo(arch))); |
| 104 | default: |
| 105 | return ABISP(); |
| 106 | } |
| 107 | } |
| 108 | return ABISP(); |
| 109 | } |
| 110 | |
| 111 | bool ABISysV_x86_64::PrepareTrivialCall(Thread &thread, addr_t sp, |
| 112 | addr_t func_addr, addr_t return_addr, |
| 113 | llvm::ArrayRef<addr_t> args) const { |
| 114 | Log *log = GetLog(mask: LLDBLog::Expressions); |
| 115 | |
| 116 | if (log) { |
| 117 | StreamString s; |
| 118 | s.Printf(format: "ABISysV_x86_64::PrepareTrivialCall (tid = 0x%" PRIx64 |
| 119 | ", sp = 0x%" PRIx64 ", func_addr = 0x%" PRIx64 |
| 120 | ", return_addr = 0x%" PRIx64, |
| 121 | thread.GetID(), (uint64_t)sp, (uint64_t)func_addr, |
| 122 | (uint64_t)return_addr); |
| 123 | |
| 124 | for (size_t i = 0; i < args.size(); ++i) |
| 125 | s.Printf(format: ", arg%" PRIu64 " = 0x%" PRIx64, static_cast<uint64_t>(i + 1), |
| 126 | args[i]); |
| 127 | s.PutCString(cstr: ")" ); |
| 128 | log->PutString(str: s.GetString()); |
| 129 | } |
| 130 | |
| 131 | RegisterContext *reg_ctx = thread.GetRegisterContext().get(); |
| 132 | if (!reg_ctx) |
| 133 | return false; |
| 134 | |
| 135 | const RegisterInfo *reg_info = nullptr; |
| 136 | |
| 137 | if (args.size() > 6) // TODO handle more than 6 arguments |
| 138 | return false; |
| 139 | |
| 140 | for (size_t i = 0; i < args.size(); ++i) { |
| 141 | reg_info = reg_ctx->GetRegisterInfo(reg_kind: eRegisterKindGeneric, |
| 142 | LLDB_REGNUM_GENERIC_ARG1 + i); |
| 143 | LLDB_LOGF(log, "About to write arg%" PRIu64 " (0x%" PRIx64 ") into %s" , |
| 144 | static_cast<uint64_t>(i + 1), args[i], reg_info->name); |
| 145 | if (!reg_ctx->WriteRegisterFromUnsigned(reg_info, uval: args[i])) |
| 146 | return false; |
| 147 | } |
| 148 | |
| 149 | // First, align the SP |
| 150 | |
| 151 | LLDB_LOGF(log, "16-byte aligning SP: 0x%" PRIx64 " to 0x%" PRIx64, |
| 152 | (uint64_t)sp, (uint64_t)(sp & ~0xfull)); |
| 153 | |
| 154 | sp &= ~(0xfull); // 16-byte alignment |
| 155 | |
| 156 | sp -= 8; |
| 157 | |
| 158 | Status error; |
| 159 | const RegisterInfo *pc_reg_info = |
| 160 | reg_ctx->GetRegisterInfo(reg_kind: eRegisterKindGeneric, LLDB_REGNUM_GENERIC_PC); |
| 161 | const RegisterInfo *sp_reg_info = |
| 162 | reg_ctx->GetRegisterInfo(reg_kind: eRegisterKindGeneric, LLDB_REGNUM_GENERIC_SP); |
| 163 | ProcessSP process_sp(thread.GetProcess()); |
| 164 | |
| 165 | RegisterValue reg_value; |
| 166 | LLDB_LOGF(log, |
| 167 | "Pushing the return address onto the stack: 0x%" PRIx64 |
| 168 | ": 0x%" PRIx64, |
| 169 | (uint64_t)sp, (uint64_t)return_addr); |
| 170 | |
| 171 | // Save return address onto the stack |
| 172 | if (!process_sp->WritePointerToMemory(vm_addr: sp, ptr_value: return_addr, error)) |
| 173 | return false; |
| 174 | |
| 175 | // %rsp is set to the actual stack value. |
| 176 | |
| 177 | LLDB_LOGF(log, "Writing SP: 0x%" PRIx64, (uint64_t)sp); |
| 178 | |
| 179 | if (!reg_ctx->WriteRegisterFromUnsigned(reg_info: sp_reg_info, uval: sp)) |
| 180 | return false; |
| 181 | |
| 182 | // %rip is set to the address of the called function. |
| 183 | |
| 184 | LLDB_LOGF(log, "Writing IP: 0x%" PRIx64, (uint64_t)func_addr); |
| 185 | |
| 186 | if (!reg_ctx->WriteRegisterFromUnsigned(reg_info: pc_reg_info, uval: func_addr)) |
| 187 | return false; |
| 188 | |
| 189 | return true; |
| 190 | } |
| 191 | |
| 192 | static bool ReadIntegerArgument(Scalar &scalar, unsigned int bit_width, |
| 193 | bool is_signed, Thread &thread, |
| 194 | uint32_t *argument_register_ids, |
| 195 | unsigned int ¤t_argument_register, |
| 196 | addr_t ¤t_stack_argument) { |
| 197 | if (bit_width > 64) |
| 198 | return false; // Scalar can't hold large integer arguments |
| 199 | |
| 200 | if (current_argument_register < 6) { |
| 201 | scalar = thread.GetRegisterContext()->ReadRegisterAsUnsigned( |
| 202 | reg: argument_register_ids[current_argument_register], fail_value: 0); |
| 203 | current_argument_register++; |
| 204 | if (is_signed) |
| 205 | scalar.SignExtend(bit_pos: bit_width); |
| 206 | } else { |
| 207 | uint32_t byte_size = (bit_width + (8 - 1)) / 8; |
| 208 | Status error; |
| 209 | if (thread.GetProcess()->ReadScalarIntegerFromMemory( |
| 210 | addr: current_stack_argument, byte_size, is_signed, scalar, error)) { |
| 211 | current_stack_argument += byte_size; |
| 212 | return true; |
| 213 | } |
| 214 | return false; |
| 215 | } |
| 216 | return true; |
| 217 | } |
| 218 | |
| 219 | bool ABISysV_x86_64::GetArgumentValues(Thread &thread, |
| 220 | ValueList &values) const { |
| 221 | unsigned int num_values = values.GetSize(); |
| 222 | unsigned int value_index; |
| 223 | |
| 224 | // Extract the register context so we can read arguments from registers |
| 225 | |
| 226 | RegisterContext *reg_ctx = thread.GetRegisterContext().get(); |
| 227 | |
| 228 | if (!reg_ctx) |
| 229 | return false; |
| 230 | |
| 231 | // Get the pointer to the first stack argument so we have a place to start |
| 232 | // when reading data |
| 233 | |
| 234 | addr_t sp = reg_ctx->GetSP(fail_value: 0); |
| 235 | |
| 236 | if (!sp) |
| 237 | return false; |
| 238 | |
| 239 | addr_t current_stack_argument = sp + 8; // jump over return address |
| 240 | |
| 241 | uint32_t argument_register_ids[6]; |
| 242 | |
| 243 | argument_register_ids[0] = |
| 244 | reg_ctx->GetRegisterInfo(reg_kind: eRegisterKindGeneric, LLDB_REGNUM_GENERIC_ARG1) |
| 245 | ->kinds[eRegisterKindLLDB]; |
| 246 | argument_register_ids[1] = |
| 247 | reg_ctx->GetRegisterInfo(reg_kind: eRegisterKindGeneric, LLDB_REGNUM_GENERIC_ARG2) |
| 248 | ->kinds[eRegisterKindLLDB]; |
| 249 | argument_register_ids[2] = |
| 250 | reg_ctx->GetRegisterInfo(reg_kind: eRegisterKindGeneric, LLDB_REGNUM_GENERIC_ARG3) |
| 251 | ->kinds[eRegisterKindLLDB]; |
| 252 | argument_register_ids[3] = |
| 253 | reg_ctx->GetRegisterInfo(reg_kind: eRegisterKindGeneric, LLDB_REGNUM_GENERIC_ARG4) |
| 254 | ->kinds[eRegisterKindLLDB]; |
| 255 | argument_register_ids[4] = |
| 256 | reg_ctx->GetRegisterInfo(reg_kind: eRegisterKindGeneric, LLDB_REGNUM_GENERIC_ARG5) |
| 257 | ->kinds[eRegisterKindLLDB]; |
| 258 | argument_register_ids[5] = |
| 259 | reg_ctx->GetRegisterInfo(reg_kind: eRegisterKindGeneric, LLDB_REGNUM_GENERIC_ARG6) |
| 260 | ->kinds[eRegisterKindLLDB]; |
| 261 | |
| 262 | unsigned int current_argument_register = 0; |
| 263 | |
| 264 | for (value_index = 0; value_index < num_values; ++value_index) { |
| 265 | Value *value = values.GetValueAtIndex(idx: value_index); |
| 266 | |
| 267 | if (!value) |
| 268 | return false; |
| 269 | |
| 270 | // We currently only support extracting values with Clang QualTypes. Do we |
| 271 | // care about others? |
| 272 | CompilerType compiler_type = value->GetCompilerType(); |
| 273 | std::optional<uint64_t> bit_size = |
| 274 | llvm::expectedToOptional(E: compiler_type.GetBitSize(exe_scope: &thread)); |
| 275 | if (!bit_size) |
| 276 | return false; |
| 277 | bool is_signed; |
| 278 | |
| 279 | if (compiler_type.IsIntegerOrEnumerationType(is_signed)) { |
| 280 | ReadIntegerArgument(scalar&: value->GetScalar(), bit_width: *bit_size, is_signed, thread, |
| 281 | argument_register_ids, current_argument_register, |
| 282 | current_stack_argument); |
| 283 | } else if (compiler_type.IsPointerType()) { |
| 284 | ReadIntegerArgument(scalar&: value->GetScalar(), bit_width: *bit_size, is_signed: false, thread, |
| 285 | argument_register_ids, current_argument_register, |
| 286 | current_stack_argument); |
| 287 | } |
| 288 | } |
| 289 | |
| 290 | return true; |
| 291 | } |
| 292 | |
| 293 | Status ABISysV_x86_64::SetReturnValueObject(lldb::StackFrameSP &frame_sp, |
| 294 | lldb::ValueObjectSP &new_value_sp) { |
| 295 | Status error; |
| 296 | if (!new_value_sp) { |
| 297 | error = Status::FromErrorString(str: "Empty value object for return value." ); |
| 298 | return error; |
| 299 | } |
| 300 | |
| 301 | CompilerType compiler_type = new_value_sp->GetCompilerType(); |
| 302 | if (!compiler_type) { |
| 303 | error = Status::FromErrorString(str: "Null clang type for return value." ); |
| 304 | return error; |
| 305 | } |
| 306 | |
| 307 | Thread *thread = frame_sp->GetThread().get(); |
| 308 | |
| 309 | bool is_signed; |
| 310 | uint32_t count; |
| 311 | bool is_complex; |
| 312 | |
| 313 | RegisterContext *reg_ctx = thread->GetRegisterContext().get(); |
| 314 | |
| 315 | bool set_it_simple = false; |
| 316 | if (compiler_type.IsIntegerOrEnumerationType(is_signed) || |
| 317 | compiler_type.IsPointerType()) { |
| 318 | const RegisterInfo *reg_info = reg_ctx->GetRegisterInfoByName(reg_name: "rax" , start_idx: 0); |
| 319 | |
| 320 | DataExtractor data; |
| 321 | Status data_error; |
| 322 | size_t num_bytes = new_value_sp->GetData(data, error&: data_error); |
| 323 | if (data_error.Fail()) { |
| 324 | error = Status::FromErrorStringWithFormat( |
| 325 | format: "Couldn't convert return value to raw data: %s" , |
| 326 | data_error.AsCString()); |
| 327 | return error; |
| 328 | } |
| 329 | lldb::offset_t offset = 0; |
| 330 | if (num_bytes <= 8) { |
| 331 | uint64_t raw_value = data.GetMaxU64(offset_ptr: &offset, byte_size: num_bytes); |
| 332 | |
| 333 | if (reg_ctx->WriteRegisterFromUnsigned(reg_info, uval: raw_value)) |
| 334 | set_it_simple = true; |
| 335 | } else { |
| 336 | error = Status::FromErrorString( |
| 337 | str: "We don't support returning longer than 64 bit " |
| 338 | "integer values at present." ); |
| 339 | } |
| 340 | } else if (compiler_type.IsFloatingPointType(count, is_complex)) { |
| 341 | if (is_complex) |
| 342 | error = Status::FromErrorString( |
| 343 | str: "We don't support returning complex values at present" ); |
| 344 | else { |
| 345 | std::optional<uint64_t> bit_width = |
| 346 | llvm::expectedToOptional(E: compiler_type.GetBitSize(exe_scope: frame_sp.get())); |
| 347 | if (!bit_width) { |
| 348 | error = Status::FromErrorString(str: "can't get type size" ); |
| 349 | return error; |
| 350 | } |
| 351 | if (*bit_width <= 64) { |
| 352 | const RegisterInfo *xmm0_info = |
| 353 | reg_ctx->GetRegisterInfoByName(reg_name: "xmm0" , start_idx: 0); |
| 354 | RegisterValue xmm0_value; |
| 355 | DataExtractor data; |
| 356 | Status data_error; |
| 357 | size_t num_bytes = new_value_sp->GetData(data, error&: data_error); |
| 358 | if (data_error.Fail()) { |
| 359 | error = Status::FromErrorStringWithFormat( |
| 360 | format: "Couldn't convert return value to raw data: %s" , |
| 361 | data_error.AsCString()); |
| 362 | return error; |
| 363 | } |
| 364 | |
| 365 | unsigned char buffer[16]; |
| 366 | ByteOrder byte_order = data.GetByteOrder(); |
| 367 | |
| 368 | data.CopyByteOrderedData(src_offset: 0, src_len: num_bytes, dst: buffer, dst_len: 16, dst_byte_order: byte_order); |
| 369 | xmm0_value.SetBytes(bytes: buffer, length: 16, byte_order); |
| 370 | reg_ctx->WriteRegister(reg_info: xmm0_info, reg_value: xmm0_value); |
| 371 | set_it_simple = true; |
| 372 | } else { |
| 373 | // FIXME - don't know how to do 80 bit long doubles yet. |
| 374 | error = Status::FromErrorString( |
| 375 | str: "We don't support returning float values > 64 bits at present" ); |
| 376 | } |
| 377 | } |
| 378 | } |
| 379 | |
| 380 | if (!set_it_simple) { |
| 381 | // Okay we've got a structure or something that doesn't fit in a simple |
| 382 | // register. We should figure out where it really goes, but we don't |
| 383 | // support this yet. |
| 384 | error = Status::FromErrorString( |
| 385 | str: "We only support setting simple integer and float " |
| 386 | "return types at present." ); |
| 387 | } |
| 388 | |
| 389 | return error; |
| 390 | } |
| 391 | |
| 392 | ValueObjectSP ABISysV_x86_64::GetReturnValueObjectSimple( |
| 393 | Thread &thread, CompilerType &return_compiler_type) const { |
| 394 | ValueObjectSP return_valobj_sp; |
| 395 | Value value; |
| 396 | |
| 397 | if (!return_compiler_type) |
| 398 | return return_valobj_sp; |
| 399 | |
| 400 | // value.SetContext (Value::eContextTypeClangType, return_value_type); |
| 401 | value.SetCompilerType(return_compiler_type); |
| 402 | |
| 403 | RegisterContext *reg_ctx = thread.GetRegisterContext().get(); |
| 404 | if (!reg_ctx) |
| 405 | return return_valobj_sp; |
| 406 | |
| 407 | const uint32_t type_flags = return_compiler_type.GetTypeInfo(); |
| 408 | if (type_flags & eTypeIsScalar) { |
| 409 | value.SetValueType(Value::ValueType::Scalar); |
| 410 | |
| 411 | bool success = false; |
| 412 | if (type_flags & eTypeIsInteger) { |
| 413 | // Extract the register context so we can read arguments from registers |
| 414 | |
| 415 | std::optional<uint64_t> byte_size = |
| 416 | llvm::expectedToOptional(E: return_compiler_type.GetByteSize(exe_scope: &thread)); |
| 417 | if (!byte_size) |
| 418 | return return_valobj_sp; |
| 419 | uint64_t raw_value = thread.GetRegisterContext()->ReadRegisterAsUnsigned( |
| 420 | reg_info: reg_ctx->GetRegisterInfoByName(reg_name: "rax" , start_idx: 0), fail_value: 0); |
| 421 | const bool is_signed = (type_flags & eTypeIsSigned) != 0; |
| 422 | switch (*byte_size) { |
| 423 | default: |
| 424 | break; |
| 425 | |
| 426 | case sizeof(uint64_t): |
| 427 | if (is_signed) |
| 428 | value.GetScalar() = (int64_t)(raw_value); |
| 429 | else |
| 430 | value.GetScalar() = (uint64_t)(raw_value); |
| 431 | success = true; |
| 432 | break; |
| 433 | |
| 434 | case sizeof(uint32_t): |
| 435 | if (is_signed) |
| 436 | value.GetScalar() = (int32_t)(raw_value & UINT32_MAX); |
| 437 | else |
| 438 | value.GetScalar() = (uint32_t)(raw_value & UINT32_MAX); |
| 439 | success = true; |
| 440 | break; |
| 441 | |
| 442 | case sizeof(uint16_t): |
| 443 | if (is_signed) |
| 444 | value.GetScalar() = (int16_t)(raw_value & UINT16_MAX); |
| 445 | else |
| 446 | value.GetScalar() = (uint16_t)(raw_value & UINT16_MAX); |
| 447 | success = true; |
| 448 | break; |
| 449 | |
| 450 | case sizeof(uint8_t): |
| 451 | if (is_signed) |
| 452 | value.GetScalar() = (int8_t)(raw_value & UINT8_MAX); |
| 453 | else |
| 454 | value.GetScalar() = (uint8_t)(raw_value & UINT8_MAX); |
| 455 | success = true; |
| 456 | break; |
| 457 | } |
| 458 | } else if (type_flags & eTypeIsFloat) { |
| 459 | if (type_flags & eTypeIsComplex) { |
| 460 | // Don't handle complex yet. |
| 461 | } else { |
| 462 | std::optional<uint64_t> byte_size = |
| 463 | llvm::expectedToOptional(E: return_compiler_type.GetByteSize(exe_scope: &thread)); |
| 464 | if (byte_size && *byte_size <= sizeof(long double)) { |
| 465 | const RegisterInfo *xmm0_info = |
| 466 | reg_ctx->GetRegisterInfoByName(reg_name: "xmm0" , start_idx: 0); |
| 467 | RegisterValue xmm0_value; |
| 468 | if (reg_ctx->ReadRegister(reg_info: xmm0_info, reg_value&: xmm0_value)) { |
| 469 | DataExtractor data; |
| 470 | if (xmm0_value.GetData(data)) { |
| 471 | lldb::offset_t offset = 0; |
| 472 | if (*byte_size == sizeof(float)) { |
| 473 | value.GetScalar() = (float)data.GetFloat(offset_ptr: &offset); |
| 474 | success = true; |
| 475 | } else if (*byte_size == sizeof(double)) { |
| 476 | value.GetScalar() = (double)data.GetDouble(offset_ptr: &offset); |
| 477 | success = true; |
| 478 | } else if (*byte_size == sizeof(long double)) { |
| 479 | // Don't handle long double since that can be encoded as 80 bit |
| 480 | // floats... |
| 481 | } |
| 482 | } |
| 483 | } |
| 484 | } |
| 485 | } |
| 486 | } |
| 487 | |
| 488 | if (success) |
| 489 | return_valobj_sp = ValueObjectConstResult::Create( |
| 490 | exe_scope: thread.GetStackFrameAtIndex(idx: 0).get(), value, name: ConstString("" )); |
| 491 | } else if (type_flags & eTypeIsPointer) { |
| 492 | unsigned rax_id = |
| 493 | reg_ctx->GetRegisterInfoByName(reg_name: "rax" , start_idx: 0)->kinds[eRegisterKindLLDB]; |
| 494 | value.GetScalar() = |
| 495 | (uint64_t)thread.GetRegisterContext()->ReadRegisterAsUnsigned(reg: rax_id, |
| 496 | fail_value: 0); |
| 497 | value.SetValueType(Value::ValueType::Scalar); |
| 498 | return_valobj_sp = ValueObjectConstResult::Create( |
| 499 | exe_scope: thread.GetStackFrameAtIndex(idx: 0).get(), value, name: ConstString("" )); |
| 500 | } else if (type_flags & eTypeIsVector) { |
| 501 | std::optional<uint64_t> byte_size = |
| 502 | llvm::expectedToOptional(E: return_compiler_type.GetByteSize(exe_scope: &thread)); |
| 503 | if (byte_size && *byte_size > 0) { |
| 504 | const RegisterInfo *altivec_reg = |
| 505 | reg_ctx->GetRegisterInfoByName(reg_name: "xmm0" , start_idx: 0); |
| 506 | if (altivec_reg == nullptr) |
| 507 | altivec_reg = reg_ctx->GetRegisterInfoByName(reg_name: "mm0" , start_idx: 0); |
| 508 | |
| 509 | if (altivec_reg) { |
| 510 | if (*byte_size <= altivec_reg->byte_size) { |
| 511 | ProcessSP process_sp(thread.GetProcess()); |
| 512 | if (process_sp) { |
| 513 | std::unique_ptr<DataBufferHeap> heap_data_up( |
| 514 | new DataBufferHeap(*byte_size, 0)); |
| 515 | const ByteOrder byte_order = process_sp->GetByteOrder(); |
| 516 | RegisterValue reg_value; |
| 517 | if (reg_ctx->ReadRegister(reg_info: altivec_reg, reg_value)) { |
| 518 | Status error; |
| 519 | if (reg_value.GetAsMemoryData( |
| 520 | reg_info: *altivec_reg, dst: heap_data_up->GetBytes(), |
| 521 | dst_len: heap_data_up->GetByteSize(), dst_byte_order: byte_order, error)) { |
| 522 | DataExtractor data(DataBufferSP(heap_data_up.release()), |
| 523 | byte_order, |
| 524 | process_sp->GetTarget() |
| 525 | .GetArchitecture() |
| 526 | .GetAddressByteSize()); |
| 527 | return_valobj_sp = ValueObjectConstResult::Create( |
| 528 | exe_scope: &thread, compiler_type: return_compiler_type, name: ConstString("" ), data); |
| 529 | } |
| 530 | } |
| 531 | } |
| 532 | } else if (*byte_size <= altivec_reg->byte_size * 2) { |
| 533 | const RegisterInfo *altivec_reg2 = |
| 534 | reg_ctx->GetRegisterInfoByName(reg_name: "xmm1" , start_idx: 0); |
| 535 | if (altivec_reg2) { |
| 536 | ProcessSP process_sp(thread.GetProcess()); |
| 537 | if (process_sp) { |
| 538 | std::unique_ptr<DataBufferHeap> heap_data_up( |
| 539 | new DataBufferHeap(*byte_size, 0)); |
| 540 | const ByteOrder byte_order = process_sp->GetByteOrder(); |
| 541 | RegisterValue reg_value; |
| 542 | RegisterValue reg_value2; |
| 543 | if (reg_ctx->ReadRegister(reg_info: altivec_reg, reg_value) && |
| 544 | reg_ctx->ReadRegister(reg_info: altivec_reg2, reg_value&: reg_value2)) { |
| 545 | |
| 546 | Status error; |
| 547 | if (reg_value.GetAsMemoryData( |
| 548 | reg_info: *altivec_reg, dst: heap_data_up->GetBytes(), |
| 549 | dst_len: altivec_reg->byte_size, dst_byte_order: byte_order, error) && |
| 550 | reg_value2.GetAsMemoryData( |
| 551 | reg_info: *altivec_reg2, |
| 552 | dst: heap_data_up->GetBytes() + altivec_reg->byte_size, |
| 553 | dst_len: heap_data_up->GetByteSize() - altivec_reg->byte_size, |
| 554 | dst_byte_order: byte_order, error)) { |
| 555 | DataExtractor data(DataBufferSP(heap_data_up.release()), |
| 556 | byte_order, |
| 557 | process_sp->GetTarget() |
| 558 | .GetArchitecture() |
| 559 | .GetAddressByteSize()); |
| 560 | return_valobj_sp = ValueObjectConstResult::Create( |
| 561 | exe_scope: &thread, compiler_type: return_compiler_type, name: ConstString("" ), data); |
| 562 | } |
| 563 | } |
| 564 | } |
| 565 | } |
| 566 | } |
| 567 | } |
| 568 | } |
| 569 | } |
| 570 | |
| 571 | return return_valobj_sp; |
| 572 | } |
| 573 | |
| 574 | // The compiler will flatten the nested aggregate type into single |
| 575 | // layer and push the value to stack |
| 576 | // This helper function will flatten an aggregate type |
| 577 | // and return true if it can be returned in register(s) by value |
| 578 | // return false if the aggregate is in memory |
| 579 | static bool FlattenAggregateType( |
| 580 | Thread &thread, ExecutionContext &exe_ctx, |
| 581 | CompilerType &return_compiler_type, |
| 582 | uint32_t data_byte_offset, |
| 583 | std::vector<uint32_t> &aggregate_field_offsets, |
| 584 | std::vector<CompilerType> &aggregate_compiler_types) { |
| 585 | |
| 586 | const uint32_t num_children = return_compiler_type.GetNumFields(); |
| 587 | for (uint32_t idx = 0; idx < num_children; ++idx) { |
| 588 | std::string name; |
| 589 | bool is_signed; |
| 590 | uint32_t count; |
| 591 | bool is_complex; |
| 592 | |
| 593 | uint64_t field_bit_offset = 0; |
| 594 | CompilerType field_compiler_type = return_compiler_type.GetFieldAtIndex( |
| 595 | idx, name, bit_offset_ptr: &field_bit_offset, bitfield_bit_size_ptr: nullptr, is_bitfield_ptr: nullptr); |
| 596 | std::optional<uint64_t> field_bit_width = |
| 597 | llvm::expectedToOptional(E: field_compiler_type.GetBitSize(exe_scope: &thread)); |
| 598 | |
| 599 | // if we don't know the size of the field (e.g. invalid type), exit |
| 600 | if (!field_bit_width || *field_bit_width == 0) { |
| 601 | return false; |
| 602 | } |
| 603 | |
| 604 | uint32_t field_byte_offset = field_bit_offset / 8 + data_byte_offset; |
| 605 | |
| 606 | const uint32_t field_type_flags = field_compiler_type.GetTypeInfo(); |
| 607 | if (field_compiler_type.IsIntegerOrEnumerationType(is_signed) || |
| 608 | field_compiler_type.IsPointerType() || |
| 609 | field_compiler_type.IsFloatingPointType(count, is_complex)) { |
| 610 | aggregate_field_offsets.push_back(x: field_byte_offset); |
| 611 | aggregate_compiler_types.push_back(x: field_compiler_type); |
| 612 | } else if (field_type_flags & eTypeHasChildren) { |
| 613 | if (!FlattenAggregateType(thread, exe_ctx, return_compiler_type&: field_compiler_type, |
| 614 | data_byte_offset: field_byte_offset, aggregate_field_offsets, |
| 615 | aggregate_compiler_types)) { |
| 616 | return false; |
| 617 | } |
| 618 | } |
| 619 | } |
| 620 | return true; |
| 621 | } |
| 622 | |
| 623 | ValueObjectSP ABISysV_x86_64::GetReturnValueObjectImpl( |
| 624 | Thread &thread, CompilerType &return_compiler_type) const { |
| 625 | ValueObjectSP return_valobj_sp; |
| 626 | |
| 627 | if (!return_compiler_type) |
| 628 | return return_valobj_sp; |
| 629 | |
| 630 | ExecutionContext exe_ctx(thread.shared_from_this()); |
| 631 | return_valobj_sp = GetReturnValueObjectSimple(thread, return_compiler_type); |
| 632 | if (return_valobj_sp) |
| 633 | return return_valobj_sp; |
| 634 | |
| 635 | RegisterContextSP reg_ctx_sp = thread.GetRegisterContext(); |
| 636 | if (!reg_ctx_sp) |
| 637 | return return_valobj_sp; |
| 638 | |
| 639 | std::optional<uint64_t> bit_width = |
| 640 | llvm::expectedToOptional(E: return_compiler_type.GetBitSize(exe_scope: &thread)); |
| 641 | if (!bit_width) |
| 642 | return return_valobj_sp; |
| 643 | if (return_compiler_type.IsAggregateType()) { |
| 644 | Target *target = exe_ctx.GetTargetPtr(); |
| 645 | bool is_memory = true; |
| 646 | std::vector<uint32_t> aggregate_field_offsets; |
| 647 | std::vector<CompilerType> aggregate_compiler_types; |
| 648 | auto ts = return_compiler_type.GetTypeSystem(); |
| 649 | if (ts && ts->CanPassInRegisters(type: return_compiler_type) && |
| 650 | *bit_width <= 128 && |
| 651 | FlattenAggregateType(thread, exe_ctx, return_compiler_type, data_byte_offset: 0, |
| 652 | aggregate_field_offsets, |
| 653 | aggregate_compiler_types)) { |
| 654 | ByteOrder byte_order = target->GetArchitecture().GetByteOrder(); |
| 655 | WritableDataBufferSP data_sp(new DataBufferHeap(16, 0)); |
| 656 | DataExtractor return_ext(data_sp, byte_order, |
| 657 | target->GetArchitecture().GetAddressByteSize()); |
| 658 | |
| 659 | const RegisterInfo *rax_info = |
| 660 | reg_ctx_sp->GetRegisterInfoByName(reg_name: "rax" , start_idx: 0); |
| 661 | const RegisterInfo *rdx_info = |
| 662 | reg_ctx_sp->GetRegisterInfoByName(reg_name: "rdx" , start_idx: 0); |
| 663 | const RegisterInfo *xmm0_info = |
| 664 | reg_ctx_sp->GetRegisterInfoByName(reg_name: "xmm0" , start_idx: 0); |
| 665 | const RegisterInfo *xmm1_info = |
| 666 | reg_ctx_sp->GetRegisterInfoByName(reg_name: "xmm1" , start_idx: 0); |
| 667 | |
| 668 | RegisterValue rax_value, rdx_value, xmm0_value, xmm1_value; |
| 669 | reg_ctx_sp->ReadRegister(reg_info: rax_info, reg_value&: rax_value); |
| 670 | reg_ctx_sp->ReadRegister(reg_info: rdx_info, reg_value&: rdx_value); |
| 671 | reg_ctx_sp->ReadRegister(reg_info: xmm0_info, reg_value&: xmm0_value); |
| 672 | reg_ctx_sp->ReadRegister(reg_info: xmm1_info, reg_value&: xmm1_value); |
| 673 | |
| 674 | DataExtractor rax_data, rdx_data, xmm0_data, xmm1_data; |
| 675 | |
| 676 | rax_value.GetData(data&: rax_data); |
| 677 | rdx_value.GetData(data&: rdx_data); |
| 678 | xmm0_value.GetData(data&: xmm0_data); |
| 679 | xmm1_value.GetData(data&: xmm1_data); |
| 680 | |
| 681 | uint32_t fp_bytes = |
| 682 | 0; // Tracks how much of the xmm registers we've consumed so far |
| 683 | uint32_t integer_bytes = |
| 684 | 0; // Tracks how much of the rax/rds registers we've consumed so far |
| 685 | |
| 686 | // in case of the returned type is a subclass of non-abstract-base class |
| 687 | // it will have a padding to skip the base content |
| 688 | if (aggregate_field_offsets.size()) { |
| 689 | fp_bytes = aggregate_field_offsets[0]; |
| 690 | integer_bytes = aggregate_field_offsets[0]; |
| 691 | } |
| 692 | |
| 693 | const uint32_t num_children = aggregate_compiler_types.size(); |
| 694 | |
| 695 | // Since we are in the small struct regime, assume we are not in memory. |
| 696 | is_memory = false; |
| 697 | for (uint32_t idx = 0; idx < num_children; idx++) { |
| 698 | bool is_signed; |
| 699 | uint32_t count; |
| 700 | bool is_complex; |
| 701 | |
| 702 | CompilerType field_compiler_type = aggregate_compiler_types[idx]; |
| 703 | uint32_t field_byte_width = |
| 704 | (uint32_t)(llvm::expectedToOptional( |
| 705 | E: field_compiler_type.GetByteSize(exe_scope: &thread)) |
| 706 | .value_or(u: 0)); |
| 707 | uint32_t field_byte_offset = aggregate_field_offsets[idx]; |
| 708 | |
| 709 | uint32_t field_bit_width = field_byte_width * 8; |
| 710 | |
| 711 | DataExtractor * = nullptr; |
| 712 | uint32_t copy_from_offset = 0; |
| 713 | |
| 714 | if (field_compiler_type.IsIntegerOrEnumerationType(is_signed) || |
| 715 | field_compiler_type.IsPointerType()) { |
| 716 | if (integer_bytes < 8) { |
| 717 | if (integer_bytes + field_byte_width <= 8) { |
| 718 | // This is in RAX, copy from register to our result structure: |
| 719 | copy_from_extractor = &rax_data; |
| 720 | copy_from_offset = integer_bytes; |
| 721 | integer_bytes += field_byte_width; |
| 722 | } else { |
| 723 | // The next field wouldn't fit in the remaining space, so we |
| 724 | // pushed it to rdx. |
| 725 | copy_from_extractor = &rdx_data; |
| 726 | copy_from_offset = 0; |
| 727 | integer_bytes = 8 + field_byte_width; |
| 728 | } |
| 729 | } else if (integer_bytes + field_byte_width <= 16) { |
| 730 | copy_from_extractor = &rdx_data; |
| 731 | copy_from_offset = integer_bytes - 8; |
| 732 | integer_bytes += field_byte_width; |
| 733 | } else { |
| 734 | // The last field didn't fit. I can't see how that would happen |
| 735 | // w/o the overall size being greater than 16 bytes. For now, |
| 736 | // return a nullptr return value object. |
| 737 | return return_valobj_sp; |
| 738 | } |
| 739 | } else if (field_compiler_type.IsFloatingPointType(count, is_complex)) { |
| 740 | // Structs with long doubles are always passed in memory. |
| 741 | if (field_bit_width == 128) { |
| 742 | is_memory = true; |
| 743 | break; |
| 744 | } else if (field_bit_width == 64) { |
| 745 | // These have to be in a single xmm register. |
| 746 | if (fp_bytes == 0) |
| 747 | copy_from_extractor = &xmm0_data; |
| 748 | else |
| 749 | copy_from_extractor = &xmm1_data; |
| 750 | |
| 751 | copy_from_offset = 0; |
| 752 | fp_bytes += field_byte_width; |
| 753 | } else if (field_bit_width == 32) { |
| 754 | // This one is kind of complicated. If we are in an "eightbyte" |
| 755 | // with another float, we'll be stuffed into an xmm register with |
| 756 | // it. If we are in an "eightbyte" with one or more ints, then we |
| 757 | // will be stuffed into the appropriate GPR with them. |
| 758 | bool in_gpr; |
| 759 | if (field_byte_offset % 8 == 0) { |
| 760 | // We are at the beginning of one of the eightbytes, so check the |
| 761 | // next element (if any) |
| 762 | if (idx == num_children - 1) { |
| 763 | in_gpr = false; |
| 764 | } else { |
| 765 | CompilerType next_field_compiler_type = |
| 766 | aggregate_compiler_types[idx + 1]; |
| 767 | if (next_field_compiler_type.IsIntegerOrEnumerationType( |
| 768 | is_signed)) { |
| 769 | in_gpr = true; |
| 770 | } else { |
| 771 | copy_from_offset = 0; |
| 772 | in_gpr = false; |
| 773 | } |
| 774 | } |
| 775 | } else if (field_byte_offset % 4 == 0) { |
| 776 | // We are inside of an eightbyte, so see if the field before us |
| 777 | // is floating point: This could happen if somebody put padding |
| 778 | // in the structure. |
| 779 | if (idx == 0) { |
| 780 | in_gpr = false; |
| 781 | } else { |
| 782 | CompilerType prev_field_compiler_type = |
| 783 | aggregate_compiler_types[idx - 1]; |
| 784 | if (prev_field_compiler_type.IsIntegerOrEnumerationType( |
| 785 | is_signed)) { |
| 786 | in_gpr = true; |
| 787 | } else { |
| 788 | copy_from_offset = 4; |
| 789 | in_gpr = false; |
| 790 | } |
| 791 | } |
| 792 | } else { |
| 793 | is_memory = true; |
| 794 | continue; |
| 795 | } |
| 796 | |
| 797 | // Okay, we've figured out whether we are in GPR or XMM, now figure |
| 798 | // out which one. |
| 799 | if (in_gpr) { |
| 800 | if (integer_bytes < 8) { |
| 801 | // This is in RAX, copy from register to our result structure: |
| 802 | copy_from_extractor = &rax_data; |
| 803 | copy_from_offset = integer_bytes; |
| 804 | integer_bytes += field_byte_width; |
| 805 | } else { |
| 806 | copy_from_extractor = &rdx_data; |
| 807 | copy_from_offset = integer_bytes - 8; |
| 808 | integer_bytes += field_byte_width; |
| 809 | } |
| 810 | } else { |
| 811 | if (fp_bytes < 8) |
| 812 | copy_from_extractor = &xmm0_data; |
| 813 | else |
| 814 | copy_from_extractor = &xmm1_data; |
| 815 | |
| 816 | fp_bytes += field_byte_width; |
| 817 | } |
| 818 | } |
| 819 | } |
| 820 | // These two tests are just sanity checks. If I somehow get the type |
| 821 | // calculation wrong above it is better to just return nothing than to |
| 822 | // assert or crash. |
| 823 | if (!copy_from_extractor) |
| 824 | return return_valobj_sp; |
| 825 | if (copy_from_offset + field_byte_width > |
| 826 | copy_from_extractor->GetByteSize()) |
| 827 | return return_valobj_sp; |
| 828 | copy_from_extractor->CopyByteOrderedData( |
| 829 | src_offset: copy_from_offset, src_len: field_byte_width, |
| 830 | dst: data_sp->GetBytes() + field_byte_offset, dst_len: field_byte_width, |
| 831 | dst_byte_order: byte_order); |
| 832 | } |
| 833 | if (!is_memory) { |
| 834 | // The result is in our data buffer. Let's make a variable object out |
| 835 | // of it: |
| 836 | return_valobj_sp = ValueObjectConstResult::Create( |
| 837 | exe_scope: &thread, compiler_type: return_compiler_type, name: ConstString("" ), data: return_ext); |
| 838 | } |
| 839 | } |
| 840 | |
| 841 | // FIXME: This is just taking a guess, rax may very well no longer hold the |
| 842 | // return storage location. |
| 843 | // If we are going to do this right, when we make a new frame we should |
| 844 | // check to see if it uses a memory return, and if we are at the first |
| 845 | // instruction and if so stash away the return location. Then we would |
| 846 | // only return the memory return value if we know it is valid. |
| 847 | |
| 848 | if (is_memory) { |
| 849 | unsigned rax_id = |
| 850 | reg_ctx_sp->GetRegisterInfoByName(reg_name: "rax" , start_idx: 0)->kinds[eRegisterKindLLDB]; |
| 851 | lldb::addr_t storage_addr = |
| 852 | (uint64_t)thread.GetRegisterContext()->ReadRegisterAsUnsigned(reg: rax_id, |
| 853 | fail_value: 0); |
| 854 | return_valobj_sp = ValueObjectMemory::Create( |
| 855 | exe_scope: &thread, name: "" , address: Address(storage_addr, nullptr), ast_type: return_compiler_type); |
| 856 | } |
| 857 | } |
| 858 | |
| 859 | return return_valobj_sp; |
| 860 | } |
| 861 | |
| 862 | // This defines the CFA as rsp+8 |
| 863 | // the saved pc is at CFA-8 (i.e. rsp+0) |
| 864 | // The saved rsp is CFA+0 |
| 865 | |
| 866 | UnwindPlanSP ABISysV_x86_64::CreateFunctionEntryUnwindPlan() { |
| 867 | uint32_t sp_reg_num = dwarf_rsp; |
| 868 | uint32_t pc_reg_num = dwarf_rip; |
| 869 | |
| 870 | UnwindPlan::Row row; |
| 871 | row.GetCFAValue().SetIsRegisterPlusOffset(reg_num: sp_reg_num, offset: 8); |
| 872 | row.SetRegisterLocationToAtCFAPlusOffset(reg_num: pc_reg_num, offset: -8, can_replace: false); |
| 873 | row.SetRegisterLocationToIsCFAPlusOffset(reg_num: sp_reg_num, offset: 0, can_replace: true); |
| 874 | |
| 875 | auto plan_sp = std::make_shared<UnwindPlan>(args: eRegisterKindDWARF); |
| 876 | plan_sp->AppendRow(row: std::move(row)); |
| 877 | plan_sp->SetSourceName("x86_64 at-func-entry default" ); |
| 878 | plan_sp->SetSourcedFromCompiler(eLazyBoolNo); |
| 879 | return plan_sp; |
| 880 | } |
| 881 | |
| 882 | // This defines the CFA as rbp+16 |
| 883 | // The saved pc is at CFA-8 (i.e. rbp+8) |
| 884 | // The saved rbp is at CFA-16 (i.e. rbp+0) |
| 885 | // The saved rsp is CFA+0 |
| 886 | |
| 887 | UnwindPlanSP ABISysV_x86_64::CreateDefaultUnwindPlan() { |
| 888 | uint32_t fp_reg_num = dwarf_rbp; |
| 889 | uint32_t sp_reg_num = dwarf_rsp; |
| 890 | uint32_t pc_reg_num = dwarf_rip; |
| 891 | |
| 892 | UnwindPlan::Row row; |
| 893 | |
| 894 | const int32_t ptr_size = 8; |
| 895 | row.GetCFAValue().SetIsRegisterPlusOffset(reg_num: dwarf_rbp, offset: 2 * ptr_size); |
| 896 | row.SetOffset(0); |
| 897 | row.SetUnspecifiedRegistersAreUndefined(true); |
| 898 | |
| 899 | row.SetRegisterLocationToAtCFAPlusOffset(reg_num: fp_reg_num, offset: ptr_size * -2, can_replace: true); |
| 900 | row.SetRegisterLocationToAtCFAPlusOffset(reg_num: pc_reg_num, offset: ptr_size * -1, can_replace: true); |
| 901 | row.SetRegisterLocationToIsCFAPlusOffset(reg_num: sp_reg_num, offset: 0, can_replace: true); |
| 902 | |
| 903 | auto plan_sp = std::make_shared<UnwindPlan>(args: eRegisterKindDWARF); |
| 904 | plan_sp->AppendRow(row: std::move(row)); |
| 905 | plan_sp->SetSourceName("x86_64 default unwind plan" ); |
| 906 | plan_sp->SetSourcedFromCompiler(eLazyBoolNo); |
| 907 | plan_sp->SetUnwindPlanValidAtAllInstructions(eLazyBoolNo); |
| 908 | plan_sp->SetUnwindPlanForSignalTrap(eLazyBoolNo); |
| 909 | return plan_sp; |
| 910 | } |
| 911 | |
| 912 | bool ABISysV_x86_64::RegisterIsVolatile(const RegisterInfo *reg_info) { |
| 913 | return !RegisterIsCalleeSaved(reg_info); |
| 914 | } |
| 915 | |
| 916 | // See "Register Usage" in the |
| 917 | // "System V Application Binary Interface" |
| 918 | // "AMD64 Architecture Processor Supplement" (or "x86-64(tm) Architecture |
| 919 | // Processor Supplement" in earlier revisions) (this doc is also commonly |
| 920 | // referred to as the x86-64/AMD64 psABI) Edited by Michael Matz, Jan Hubicka, |
| 921 | // Andreas Jaeger, and Mark Mitchell current version is 0.99.6 released |
| 922 | // 2012-07-02 at http://refspecs.linuxfoundation.org/elf/x86-64-abi-0.99.pdf |
| 923 | // It's being revised & updated at https://github.com/hjl-tools/x86-psABI/ |
| 924 | |
| 925 | bool ABISysV_x86_64::RegisterIsCalleeSaved(const RegisterInfo *reg_info) { |
| 926 | if (!reg_info) |
| 927 | return false; |
| 928 | assert(reg_info->name != nullptr && "unnamed register?" ); |
| 929 | std::string Name = std::string(reg_info->name); |
| 930 | bool IsCalleeSaved = |
| 931 | llvm::StringSwitch<bool>(Name) |
| 932 | .Cases(S0: "r12" , S1: "r13" , S2: "r14" , S3: "r15" , S4: "rbp" , S5: "ebp" , S6: "rbx" , S7: "ebx" , Value: true) |
| 933 | .Cases(S0: "rip" , S1: "eip" , S2: "rsp" , S3: "esp" , S4: "sp" , S5: "fp" , S6: "pc" , Value: true) |
| 934 | .Default(Value: false); |
| 935 | return IsCalleeSaved; |
| 936 | } |
| 937 | |
| 938 | uint32_t ABISysV_x86_64::GetGenericNum(llvm::StringRef name) { |
| 939 | return llvm::StringSwitch<uint32_t>(name) |
| 940 | .Case(S: "rip" , LLDB_REGNUM_GENERIC_PC) |
| 941 | .Case(S: "rsp" , LLDB_REGNUM_GENERIC_SP) |
| 942 | .Case(S: "rbp" , LLDB_REGNUM_GENERIC_FP) |
| 943 | .Case(S: "rflags" , LLDB_REGNUM_GENERIC_FLAGS) |
| 944 | // gdbserver uses eflags |
| 945 | .Case(S: "eflags" , LLDB_REGNUM_GENERIC_FLAGS) |
| 946 | .Case(S: "rdi" , LLDB_REGNUM_GENERIC_ARG1) |
| 947 | .Case(S: "rsi" , LLDB_REGNUM_GENERIC_ARG2) |
| 948 | .Case(S: "rdx" , LLDB_REGNUM_GENERIC_ARG3) |
| 949 | .Case(S: "rcx" , LLDB_REGNUM_GENERIC_ARG4) |
| 950 | .Case(S: "r8" , LLDB_REGNUM_GENERIC_ARG5) |
| 951 | .Case(S: "r9" , LLDB_REGNUM_GENERIC_ARG6) |
| 952 | .Default(LLDB_INVALID_REGNUM); |
| 953 | } |
| 954 | |
| 955 | void ABISysV_x86_64::Initialize() { |
| 956 | PluginManager::RegisterPlugin( |
| 957 | name: GetPluginNameStatic(), description: "System V ABI for x86_64 targets" , create_callback: CreateInstance); |
| 958 | } |
| 959 | |
| 960 | void ABISysV_x86_64::Terminate() { |
| 961 | PluginManager::UnregisterPlugin(create_callback: CreateInstance); |
| 962 | } |
| 963 | |