1//===-- MachVMRegion.cpp ----------------------------------------*- C++ -*-===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// Created by Greg Clayton on 6/26/07.
10//
11//===----------------------------------------------------------------------===//
12
13#include "MachVMRegion.h"
14#include "DNBLog.h"
15#include <cassert>
16#include <mach/mach_vm.h>
17
18MachVMRegion::MachVMRegion(task_t task)
19 : m_task(task), m_addr(INVALID_NUB_ADDRESS), m_err(),
20 m_start(INVALID_NUB_ADDRESS), m_size(0), m_depth(-1),
21 m_curr_protection(0), m_protection_addr(INVALID_NUB_ADDRESS),
22 m_protection_size(0) {
23 memset(&m_data, 0, sizeof(m_data));
24}
25
26MachVMRegion::~MachVMRegion() {
27 // Restore any original protections and clear our vars
28 Clear();
29}
30
31void MachVMRegion::Clear() {
32 RestoreProtections();
33 m_addr = INVALID_NUB_ADDRESS;
34 m_err.Clear();
35 m_start = INVALID_NUB_ADDRESS;
36 m_size = 0;
37 m_depth = -1;
38 memset(&m_data, 0, sizeof(m_data));
39 m_curr_protection = 0;
40 m_protection_addr = INVALID_NUB_ADDRESS;
41 m_protection_size = 0;
42}
43
44bool MachVMRegion::SetProtections(mach_vm_address_t addr, mach_vm_size_t size,
45 vm_prot_t prot) {
46 if (ContainsAddress(addr)) {
47 mach_vm_size_t prot_size = size;
48 mach_vm_address_t end_addr = EndAddress();
49 if (prot_size > (end_addr - addr))
50 prot_size = end_addr - addr;
51
52 if (prot_size > 0) {
53 if (prot == (m_curr_protection & VM_PROT_ALL)) {
54 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS | LOG_VERBOSE,
55 "MachVMRegion::%s: protections (%u) already "
56 "sufficient for task 0x%4.4x at address 0x%8.8llx) ",
57 __FUNCTION__, prot, m_task, (uint64_t)addr);
58 // Protections are already set as requested...
59 return true;
60 } else {
61 m_err = ::mach_vm_protect(m_task, addr, prot_size, 0, prot);
62 if (DNBLogCheckLogBit(LOG_MEMORY_PROTECTIONS))
63 m_err.LogThreaded("::mach_vm_protect ( task = 0x%4.4x, addr = "
64 "0x%8.8llx, size = %llu, set_max = %i, prot = %u )",
65 m_task, (uint64_t)addr, (uint64_t)prot_size, 0,
66 prot);
67 if (m_err.Fail()) {
68 // Try again with the ability to create a copy on write region
69 m_err = ::mach_vm_protect(m_task, addr, prot_size, 0,
70 prot | VM_PROT_COPY);
71 if (DNBLogCheckLogBit(LOG_MEMORY_PROTECTIONS) || m_err.Fail())
72 m_err.LogThreaded("::mach_vm_protect ( task = 0x%4.4x, addr = "
73 "0x%8.8llx, size = %llu, set_max = %i, prot = %u "
74 ")",
75 m_task, (uint64_t)addr, (uint64_t)prot_size, 0,
76 prot | VM_PROT_COPY);
77 }
78 if (m_err.Success()) {
79 m_curr_protection = prot;
80 m_protection_addr = addr;
81 m_protection_size = prot_size;
82 return true;
83 }
84 }
85 } else {
86 DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS | LOG_VERBOSE,
87 "%s: Zero size for task 0x%4.4x at address 0x%8.8llx) ",
88 __FUNCTION__, m_task, (uint64_t)addr);
89 }
90 }
91 return false;
92}
93
94bool MachVMRegion::RestoreProtections() {
95 if (m_curr_protection != m_data.protection && m_protection_size > 0) {
96 m_err = ::mach_vm_protect(m_task, m_protection_addr, m_protection_size, 0,
97 m_data.protection);
98 if (DNBLogCheckLogBit(LOG_MEMORY_PROTECTIONS) || m_err.Fail())
99 m_err.LogThreaded("::mach_vm_protect ( task = 0x%4.4x, addr = 0x%8.8llx, "
100 "size = %llu, set_max = %i, prot = %u )",
101 m_task, (uint64_t)m_protection_addr,
102 (uint64_t)m_protection_size, 0, m_data.protection);
103 if (m_err.Success()) {
104 m_protection_size = 0;
105 m_protection_addr = INVALID_NUB_ADDRESS;
106 m_curr_protection = m_data.protection;
107 return true;
108 }
109 } else {
110 m_err.Clear();
111 return true;
112 }
113
114 return false;
115}
116
117bool MachVMRegion::GetRegionForAddress(nub_addr_t addr) {
118 // Restore any original protections and clear our vars
119 Clear();
120 m_err.Clear();
121 m_addr = addr;
122 m_start = addr;
123 m_depth = 1024;
124 mach_msg_type_number_t info_size = kRegionInfoSize;
125 static_assert(sizeof(info_size) == 4);
126 m_err =
127 ::mach_vm_region_recurse(m_task, &m_start, &m_size, &m_depth,
128 (vm_region_recurse_info_t)&m_data, &info_size);
129
130 const bool failed = m_err.Fail();
131 const bool log_protections = DNBLogCheckLogBit(LOG_MEMORY_PROTECTIONS);
132
133 if (log_protections || failed)
134 m_err.LogThreaded("::mach_vm_region_recurse ( task = 0x%4.4x, address => "
135 "0x%8.8llx, size => %llu, nesting_depth => %d, info => "
136 "%p, infoCnt => %d) addr = 0x%8.8llx ",
137 m_task, (uint64_t)m_start, (uint64_t)m_size, m_depth,
138 &m_data, info_size, (uint64_t)addr);
139
140 if (failed)
141 return false;
142 if (log_protections) {
143 DNBLogThreaded("info = { prot = %u, "
144 "max_prot = %u, "
145 "inheritance = 0x%8.8x, "
146 "offset = 0x%8.8llx, "
147 "user_tag = 0x%8.8x, "
148 "ref_count = %u, "
149 "shadow_depth = %u, "
150 "ext_pager = %u, "
151 "share_mode = %u, "
152 "is_submap = %d, "
153 "behavior = %d, "
154 "object_id = 0x%8.8x, "
155 "user_wired_count = 0x%4.4x }",
156 m_data.protection, m_data.max_protection, m_data.inheritance,
157 (uint64_t)m_data.offset, m_data.user_tag, m_data.ref_count,
158 m_data.shadow_depth, m_data.external_pager,
159 m_data.share_mode, m_data.is_submap, m_data.behavior,
160 m_data.object_id, m_data.user_wired_count);
161 }
162 m_curr_protection = m_data.protection;
163
164 // We make a request for an address and got no error back, but this
165 // doesn't mean that "addr" is in the range. The data in this object will
166 // be valid though, so you could see where the next region begins. So we
167 // return false, yet leave "m_err" with a successfull return code.
168 return !((addr < m_start) || (addr >= (m_start + m_size)));
169}
170
171uint32_t MachVMRegion::GetDNBPermissions() const {
172 if (m_addr == INVALID_NUB_ADDRESS || m_start == INVALID_NUB_ADDRESS ||
173 m_size == 0)
174 return 0;
175 uint32_t dnb_permissions = 0;
176
177 if ((m_data.protection & VM_PROT_READ) == VM_PROT_READ)
178 dnb_permissions |= eMemoryPermissionsReadable;
179 if ((m_data.protection & VM_PROT_WRITE) == VM_PROT_WRITE)
180 dnb_permissions |= eMemoryPermissionsWritable;
181 if ((m_data.protection & VM_PROT_EXECUTE) == VM_PROT_EXECUTE)
182 dnb_permissions |= eMemoryPermissionsExecutable;
183 return dnb_permissions;
184}
185
186std::vector<std::string> MachVMRegion::GetMemoryTypes() const {
187 std::vector<std::string> types;
188 if (m_data.user_tag == VM_MEMORY_STACK) {
189 if (m_data.protection == VM_PROT_NONE) {
190 types.push_back("stack-guard");
191 } else {
192 types.push_back("stack");
193 }
194 }
195 if (m_data.user_tag == VM_MEMORY_MALLOC) {
196 if (m_data.protection == VM_PROT_NONE)
197 types.push_back("malloc-guard");
198 else if (m_data.share_mode == SM_EMPTY)
199 types.push_back("malloc-reserved");
200 else
201 types.push_back("malloc-metadata");
202 }
203 if (m_data.user_tag == VM_MEMORY_MALLOC_NANO ||
204 m_data.user_tag == VM_MEMORY_MALLOC_TINY ||
205 m_data.user_tag == VM_MEMORY_MALLOC_SMALL ||
206 m_data.user_tag == VM_MEMORY_MALLOC_LARGE ||
207 m_data.user_tag == VM_MEMORY_MALLOC_LARGE_REUSED ||
208 m_data.user_tag == VM_MEMORY_MALLOC_LARGE_REUSABLE ||
209 m_data.user_tag == VM_MEMORY_MALLOC_HUGE ||
210 m_data.user_tag == VM_MEMORY_REALLOC ||
211 m_data.user_tag == VM_MEMORY_SBRK) {
212 types.push_back("heap");
213 if (m_data.user_tag == VM_MEMORY_MALLOC_TINY) {
214 types.push_back("malloc-tiny");
215 }
216 if (m_data.user_tag == VM_MEMORY_MALLOC_LARGE) {
217 types.push_back("malloc-large");
218 }
219 if (m_data.user_tag == VM_MEMORY_MALLOC_SMALL) {
220 types.push_back("malloc-small");
221 }
222 }
223 return types;
224}
225

source code of lldb/tools/debugserver/source/MacOSX/MachVMRegion.cpp