1 | //===-- MachVMMemory.cpp ----------------------------------------*- C++ -*-===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // Created by Greg Clayton on 6/26/07. |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "MachVMMemory.h" |
14 | #include "DNBLog.h" |
15 | #include "MachVMRegion.h" |
16 | #include <dlfcn.h> |
17 | #include <mach/mach_vm.h> |
18 | #include <mach/shared_region.h> |
19 | #include <sys/sysctl.h> |
20 | |
21 | #if defined(WITH_FBS) || defined(WITH_BKS) |
22 | extern "C" { |
23 | #import <System/sys/kern_memorystatus.h> |
24 | } |
25 | #endif |
26 | |
27 | static const vm_size_t kInvalidPageSize = ~0; |
28 | |
29 | MachVMMemory::MachVMMemory() : m_page_size(kInvalidPageSize), m_err(0) {} |
30 | |
31 | MachVMMemory::~MachVMMemory() = default; |
32 | |
33 | nub_size_t MachVMMemory::PageSize(task_t task) { |
34 | if (m_page_size == kInvalidPageSize) { |
35 | #if defined(TASK_VM_INFO) && TASK_VM_INFO >= 22 |
36 | if (task != TASK_NULL) { |
37 | kern_return_t kr; |
38 | mach_msg_type_number_t info_count = TASK_VM_INFO_COUNT; |
39 | task_vm_info_data_t vm_info; |
40 | kr = task_info(task, TASK_VM_INFO, (task_info_t)&vm_info, &info_count); |
41 | if (kr == KERN_SUCCESS) { |
42 | DNBLogThreadedIf( |
43 | LOG_TASK, |
44 | "MachVMMemory::PageSize task_info returned page size of 0x%x" , |
45 | (int)vm_info.page_size); |
46 | m_page_size = vm_info.page_size; |
47 | return m_page_size; |
48 | } else { |
49 | DNBLogThreadedIf(LOG_TASK, "MachVMMemory::PageSize task_info call " |
50 | "failed to get page size, TASK_VM_INFO %d, " |
51 | "TASK_VM_INFO_COUNT %d, kern return %d" , |
52 | TASK_VM_INFO, TASK_VM_INFO_COUNT, kr); |
53 | } |
54 | } |
55 | #endif |
56 | m_err = ::host_page_size(::mach_host_self(), &m_page_size); |
57 | if (m_err.Fail()) |
58 | m_page_size = 0; |
59 | } |
60 | return m_page_size; |
61 | } |
62 | |
63 | nub_size_t MachVMMemory::MaxBytesLeftInPage(task_t task, nub_addr_t addr, |
64 | nub_size_t count) { |
65 | const nub_size_t page_size = PageSize(task); |
66 | if (page_size > 0) { |
67 | nub_size_t page_offset = (addr % page_size); |
68 | nub_size_t bytes_left_in_page = page_size - page_offset; |
69 | if (count > bytes_left_in_page) |
70 | count = bytes_left_in_page; |
71 | } |
72 | return count; |
73 | } |
74 | |
75 | #define MAX_STACK_ALLOC_DISPOSITIONS \ |
76 | (16 * 1024 / sizeof(int)) // 16K of allocations |
77 | |
78 | std::vector<nub_addr_t> get_dirty_pages(task_t task, mach_vm_address_t addr, |
79 | mach_vm_size_t size) { |
80 | std::vector<nub_addr_t> dirty_pages; |
81 | |
82 | int pages_to_query = size / vm_page_size; |
83 | // Don't try to fetch too many pages' dispositions in a single call or we |
84 | // could blow our stack out. |
85 | mach_vm_size_t dispositions_size = |
86 | std::min(pages_to_query, (int)MAX_STACK_ALLOC_DISPOSITIONS); |
87 | int dispositions[dispositions_size]; |
88 | |
89 | mach_vm_size_t chunk_count = |
90 | ((pages_to_query + MAX_STACK_ALLOC_DISPOSITIONS - 1) / |
91 | MAX_STACK_ALLOC_DISPOSITIONS); |
92 | |
93 | for (mach_vm_size_t cur_disposition_chunk = 0; |
94 | cur_disposition_chunk < chunk_count; cur_disposition_chunk++) { |
95 | mach_vm_size_t dispositions_already_queried = |
96 | cur_disposition_chunk * MAX_STACK_ALLOC_DISPOSITIONS; |
97 | |
98 | mach_vm_size_t chunk_pages_to_query = std::min( |
99 | pages_to_query - dispositions_already_queried, dispositions_size); |
100 | mach_vm_address_t chunk_page_aligned_start_addr = |
101 | addr + (dispositions_already_queried * vm_page_size); |
102 | |
103 | kern_return_t kr = mach_vm_page_range_query( |
104 | task, chunk_page_aligned_start_addr, |
105 | chunk_pages_to_query * vm_page_size, (mach_vm_address_t)dispositions, |
106 | &chunk_pages_to_query); |
107 | if (kr != KERN_SUCCESS) |
108 | return dirty_pages; |
109 | for (mach_vm_size_t i = 0; i < chunk_pages_to_query; i++) { |
110 | uint64_t dirty_addr = chunk_page_aligned_start_addr + (i * vm_page_size); |
111 | if (dispositions[i] & VM_PAGE_QUERY_PAGE_DIRTY) |
112 | dirty_pages.push_back(dirty_addr); |
113 | } |
114 | } |
115 | return dirty_pages; |
116 | } |
117 | |
118 | nub_bool_t MachVMMemory::GetMemoryRegionInfo(task_t task, nub_addr_t address, |
119 | DNBRegionInfo *region_info) { |
120 | MachVMRegion vmRegion(task); |
121 | |
122 | if (vmRegion.GetRegionForAddress(address)) { |
123 | region_info->addr = vmRegion.StartAddress(); |
124 | region_info->size = vmRegion.GetByteSize(); |
125 | region_info->permissions = vmRegion.GetDNBPermissions(); |
126 | region_info->dirty_pages = |
127 | get_dirty_pages(task, vmRegion.StartAddress(), vmRegion.GetByteSize()); |
128 | region_info->vm_types = vmRegion.GetMemoryTypes(); |
129 | } else { |
130 | region_info->addr = address; |
131 | region_info->size = 0; |
132 | if (vmRegion.GetError().Success()) { |
133 | // vmRegion.GetRegionForAddress() return false, indicating that "address" |
134 | // wasn't in a valid region, but the "vmRegion" info was successfully |
135 | // read from the task which means the info describes the next valid |
136 | // region from which we can infer the size of this invalid region |
137 | mach_vm_address_t start_addr = vmRegion.StartAddress(); |
138 | if (address < start_addr) |
139 | region_info->size = start_addr - address; |
140 | } |
141 | // If we can't get any info about the size from the next region it means |
142 | // we asked about an address that was past all mappings, so the size |
143 | // of this region will take up all remaining address space. |
144 | if (region_info->size == 0) |
145 | region_info->size = INVALID_NUB_ADDRESS - region_info->addr; |
146 | |
147 | // Not readable, writeable or executable |
148 | region_info->permissions = 0; |
149 | } |
150 | return true; |
151 | } |
152 | |
153 | static uint64_t GetPhysicalMemory() { |
154 | // This doesn't change often at all. No need to poll each time. |
155 | static uint64_t physical_memory = 0; |
156 | static bool calculated = false; |
157 | if (calculated) |
158 | return physical_memory; |
159 | |
160 | size_t len = sizeof(physical_memory); |
161 | sysctlbyname("hw.memsize" , &physical_memory, &len, NULL, 0); |
162 | |
163 | calculated = true; |
164 | return physical_memory; |
165 | } |
166 | |
167 | nub_bool_t MachVMMemory::GetMemoryProfile( |
168 | DNBProfileDataScanType scanType, task_t task, struct task_basic_info ti, |
169 | cpu_type_t cputype, nub_process_t pid, vm_statistics64_data_t &vminfo, |
170 | uint64_t &physical_memory, uint64_t &anonymous, |
171 | uint64_t &, uint64_t &memory_cap) |
172 | { |
173 | if (scanType & eProfileHostMemory) |
174 | physical_memory = GetPhysicalMemory(); |
175 | |
176 | if (scanType & eProfileMemory) { |
177 | static mach_port_t localHost = mach_host_self(); |
178 | mach_msg_type_number_t count = HOST_VM_INFO64_COUNT; |
179 | host_statistics64(localHost, HOST_VM_INFO64, (host_info64_t)&vminfo, |
180 | &count); |
181 | |
182 | kern_return_t kr; |
183 | mach_msg_type_number_t info_count; |
184 | task_vm_info_data_t vm_info; |
185 | |
186 | info_count = TASK_VM_INFO_COUNT; |
187 | kr = task_info(task, TASK_VM_INFO_PURGEABLE, (task_info_t)&vm_info, &info_count); |
188 | if (kr == KERN_SUCCESS) { |
189 | if (scanType & eProfileMemoryAnonymous) { |
190 | anonymous = vm_info.internal + vm_info.compressed - vm_info.purgeable_volatile_pmap; |
191 | } |
192 | |
193 | phys_footprint = vm_info.phys_footprint; |
194 | } |
195 | } |
196 | |
197 | #if defined(WITH_FBS) || defined(WITH_BKS) |
198 | if (scanType & eProfileMemoryCap) { |
199 | memorystatus_memlimit_properties_t memlimit_properties; |
200 | memset(&memlimit_properties, 0, sizeof(memlimit_properties)); |
201 | if (memorystatus_control(MEMORYSTATUS_CMD_GET_MEMLIMIT_PROPERTIES, pid, 0, &memlimit_properties, sizeof(memlimit_properties)) == 0) { |
202 | memory_cap = memlimit_properties.memlimit_active; |
203 | } |
204 | } |
205 | #endif |
206 | |
207 | return true; |
208 | } |
209 | |
210 | nub_size_t MachVMMemory::Read(task_t task, nub_addr_t address, void *data, |
211 | nub_size_t data_count) { |
212 | if (data == NULL || data_count == 0) |
213 | return 0; |
214 | |
215 | nub_size_t total_bytes_read = 0; |
216 | nub_addr_t curr_addr = address; |
217 | uint8_t *curr_data = (uint8_t *)data; |
218 | while (total_bytes_read < data_count) { |
219 | mach_vm_size_t curr_size = |
220 | MaxBytesLeftInPage(task, curr_addr, data_count - total_bytes_read); |
221 | mach_msg_type_number_t curr_bytes_read = 0; |
222 | vm_offset_t vm_memory = 0; |
223 | m_err = ::mach_vm_read(task, curr_addr, curr_size, &vm_memory, |
224 | &curr_bytes_read); |
225 | |
226 | if (DNBLogCheckLogBit(LOG_MEMORY)) |
227 | m_err.LogThreaded("::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, " |
228 | "size = %llu, data => %8.8p, dataCnt => %i )" , |
229 | task, (uint64_t)curr_addr, (uint64_t)curr_size, |
230 | vm_memory, curr_bytes_read); |
231 | |
232 | if (m_err.Success()) { |
233 | if (curr_bytes_read != curr_size) { |
234 | if (DNBLogCheckLogBit(LOG_MEMORY)) |
235 | m_err.LogThreaded( |
236 | "::mach_vm_read ( task = 0x%4.4x, addr = 0x%8.8llx, size = %llu, " |
237 | "data => %8.8p, dataCnt=>%i ) only read %u of %llu bytes" , |
238 | task, (uint64_t)curr_addr, (uint64_t)curr_size, vm_memory, |
239 | curr_bytes_read, curr_bytes_read, (uint64_t)curr_size); |
240 | } |
241 | ::memcpy(curr_data, (void *)vm_memory, curr_bytes_read); |
242 | ::vm_deallocate(mach_task_self(), vm_memory, curr_bytes_read); |
243 | total_bytes_read += curr_bytes_read; |
244 | curr_addr += curr_bytes_read; |
245 | curr_data += curr_bytes_read; |
246 | } else { |
247 | break; |
248 | } |
249 | } |
250 | return total_bytes_read; |
251 | } |
252 | |
253 | nub_size_t MachVMMemory::Write(task_t task, nub_addr_t address, |
254 | const void *data, nub_size_t data_count) { |
255 | MachVMRegion vmRegion(task); |
256 | |
257 | nub_size_t total_bytes_written = 0; |
258 | nub_addr_t curr_addr = address; |
259 | const uint8_t *curr_data = (const uint8_t *)data; |
260 | |
261 | while (total_bytes_written < data_count) { |
262 | if (vmRegion.GetRegionForAddress(curr_addr)) { |
263 | mach_vm_size_t curr_data_count = data_count - total_bytes_written; |
264 | mach_vm_size_t region_bytes_left = vmRegion.BytesRemaining(curr_addr); |
265 | if (region_bytes_left == 0) { |
266 | break; |
267 | } |
268 | if (curr_data_count > region_bytes_left) |
269 | curr_data_count = region_bytes_left; |
270 | |
271 | if (vmRegion.SetProtections(curr_addr, curr_data_count, |
272 | VM_PROT_READ | VM_PROT_WRITE)) { |
273 | nub_size_t bytes_written = |
274 | WriteRegion(task, curr_addr, curr_data, curr_data_count); |
275 | if (bytes_written <= 0) { |
276 | // Status should have already be posted by WriteRegion... |
277 | break; |
278 | } else { |
279 | total_bytes_written += bytes_written; |
280 | curr_addr += bytes_written; |
281 | curr_data += bytes_written; |
282 | } |
283 | } else { |
284 | DNBLogThreadedIf( |
285 | LOG_MEMORY_PROTECTIONS, "Failed to set read/write protections on " |
286 | "region for address: [0x%8.8llx-0x%8.8llx)" , |
287 | (uint64_t)curr_addr, (uint64_t)(curr_addr + curr_data_count)); |
288 | break; |
289 | } |
290 | } else { |
291 | DNBLogThreadedIf(LOG_MEMORY_PROTECTIONS, |
292 | "Failed to get region for address: 0x%8.8llx" , |
293 | (uint64_t)address); |
294 | break; |
295 | } |
296 | } |
297 | |
298 | return total_bytes_written; |
299 | } |
300 | |
301 | nub_size_t MachVMMemory::WriteRegion(task_t task, const nub_addr_t address, |
302 | const void *data, |
303 | const nub_size_t data_count) { |
304 | if (data == NULL || data_count == 0) |
305 | return 0; |
306 | |
307 | nub_size_t total_bytes_written = 0; |
308 | nub_addr_t curr_addr = address; |
309 | const uint8_t *curr_data = (const uint8_t *)data; |
310 | while (total_bytes_written < data_count) { |
311 | mach_msg_type_number_t curr_data_count = |
312 | static_cast<mach_msg_type_number_t>(MaxBytesLeftInPage( |
313 | task, curr_addr, data_count - total_bytes_written)); |
314 | m_err = |
315 | ::mach_vm_write(task, curr_addr, (pointer_t)curr_data, curr_data_count); |
316 | if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail()) |
317 | m_err.LogThreaded("::mach_vm_write ( task = 0x%4.4x, addr = 0x%8.8llx, " |
318 | "data = %8.8p, dataCnt = %u )" , |
319 | task, (uint64_t)curr_addr, curr_data, curr_data_count); |
320 | |
321 | #if !defined(__i386__) && !defined(__x86_64__) |
322 | vm_machine_attribute_val_t mattr_value = MATTR_VAL_CACHE_FLUSH; |
323 | |
324 | m_err = ::vm_machine_attribute(task, curr_addr, curr_data_count, |
325 | MATTR_CACHE, &mattr_value); |
326 | if (DNBLogCheckLogBit(LOG_MEMORY) || m_err.Fail()) |
327 | m_err.LogThreaded("::vm_machine_attribute ( task = 0x%4.4x, addr = " |
328 | "0x%8.8llx, size = %u, attr = MATTR_CACHE, mattr_value " |
329 | "=> MATTR_VAL_CACHE_FLUSH )" , |
330 | task, (uint64_t)curr_addr, curr_data_count); |
331 | #endif |
332 | |
333 | if (m_err.Success()) { |
334 | total_bytes_written += curr_data_count; |
335 | curr_addr += curr_data_count; |
336 | curr_data += curr_data_count; |
337 | } else { |
338 | break; |
339 | } |
340 | } |
341 | return total_bytes_written; |
342 | } |
343 | |