1 | //=-- lsan_common_mac.cpp -------------------------------------------------===// |
2 | // |
3 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
4 | // See https://llvm.org/LICENSE.txt for license information. |
5 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
6 | // |
7 | //===----------------------------------------------------------------------===// |
8 | // |
9 | // This file is a part of LeakSanitizer. |
10 | // Implementation of common leak checking functionality. Darwin-specific code. |
11 | // |
12 | //===----------------------------------------------------------------------===// |
13 | |
14 | #include "sanitizer_common/sanitizer_platform.h" |
15 | #include "sanitizer_common/sanitizer_libc.h" |
16 | #include "lsan_common.h" |
17 | |
18 | #if CAN_SANITIZE_LEAKS && SANITIZER_APPLE |
19 | |
20 | # include <mach/mach.h> |
21 | # include <mach/vm_statistics.h> |
22 | # include <pthread.h> |
23 | |
24 | # include "lsan_allocator.h" |
25 | # include "sanitizer_common/sanitizer_allocator_internal.h" |
26 | namespace __lsan { |
27 | |
28 | class ThreadContextLsanBase; |
29 | |
30 | enum class SeenRegion { |
31 | None = 0, |
32 | AllocOnce = 1 << 0, |
33 | LibDispatch = 1 << 1, |
34 | Foundation = 1 << 2, |
35 | All = AllocOnce | LibDispatch | Foundation |
36 | }; |
37 | |
38 | inline SeenRegion operator|(SeenRegion left, SeenRegion right) { |
39 | return static_cast<SeenRegion>(static_cast<int>(left) | |
40 | static_cast<int>(right)); |
41 | } |
42 | |
43 | inline SeenRegion &operator|=(SeenRegion &left, const SeenRegion &right) { |
44 | left = left | right; |
45 | return left; |
46 | } |
47 | |
48 | struct RegionScanState { |
49 | SeenRegion seen_regions = SeenRegion::None; |
50 | bool in_libdispatch = false; |
51 | }; |
52 | |
53 | typedef struct { |
54 | int disable_counter; |
55 | ThreadContextLsanBase *current_thread; |
56 | AllocatorCache cache; |
57 | } thread_local_data_t; |
58 | |
59 | static pthread_key_t key; |
60 | static pthread_once_t key_once = PTHREAD_ONCE_INIT; |
61 | |
62 | // The main thread destructor requires the current thread, |
63 | // so we can't destroy it until it's been used and reset. |
64 | void restore_tid_data(void *ptr) { |
65 | thread_local_data_t *data = (thread_local_data_t *)ptr; |
66 | if (data->current_thread) |
67 | pthread_setspecific(key, data); |
68 | } |
69 | |
70 | static void make_tls_key() { |
71 | CHECK_EQ(pthread_key_create(&key, restore_tid_data), 0); |
72 | } |
73 | |
74 | static thread_local_data_t *get_tls_val(bool alloc) { |
75 | pthread_once(&key_once, make_tls_key); |
76 | |
77 | thread_local_data_t *ptr = (thread_local_data_t *)pthread_getspecific(key); |
78 | if (ptr == NULL && alloc) { |
79 | ptr = (thread_local_data_t *)InternalAlloc(sizeof(*ptr)); |
80 | ptr->disable_counter = 0; |
81 | ptr->current_thread = nullptr; |
82 | ptr->cache = AllocatorCache(); |
83 | pthread_setspecific(key, ptr); |
84 | } |
85 | |
86 | return ptr; |
87 | } |
88 | |
89 | bool DisabledInThisThread() { |
90 | thread_local_data_t *data = get_tls_val(false); |
91 | return data ? data->disable_counter > 0 : false; |
92 | } |
93 | |
94 | void DisableInThisThread() { ++get_tls_val(true)->disable_counter; } |
95 | |
96 | void EnableInThisThread() { |
97 | int *disable_counter = &get_tls_val(true)->disable_counter; |
98 | if (*disable_counter == 0) { |
99 | DisableCounterUnderflow(); |
100 | } |
101 | --*disable_counter; |
102 | } |
103 | |
104 | ThreadContextLsanBase *GetCurrentThread() { |
105 | thread_local_data_t *data = get_tls_val(false); |
106 | return data ? data->current_thread : nullptr; |
107 | } |
108 | |
109 | void SetCurrentThread(ThreadContextLsanBase *tctx) { |
110 | get_tls_val(true)->current_thread = tctx; |
111 | } |
112 | |
113 | AllocatorCache *GetAllocatorCache() { return &get_tls_val(true)->cache; } |
114 | |
115 | LoadedModule *GetLinker() { return nullptr; } |
116 | |
117 | // Required on Linux for initialization of TLS behavior, but should not be |
118 | // required on Darwin. |
119 | void InitializePlatformSpecificModules() {} |
120 | |
121 | // Sections which can't contain contain global pointers. This list errs on the |
122 | // side of caution to avoid false positives, at the expense of performance. |
123 | // |
124 | // Other potentially safe sections include: |
125 | // __all_image_info, __crash_info, __const, __got, __interpose, __objc_msg_break |
126 | // |
127 | // Sections which definitely cannot be included here are: |
128 | // __objc_data, __objc_const, __data, __bss, __common, __thread_data, |
129 | // __thread_bss, __thread_vars, __objc_opt_rw, __objc_opt_ptrs |
130 | static const char *kSkippedSecNames[] = { |
131 | "__cfstring" , "__la_symbol_ptr" , "__mod_init_func" , |
132 | "__mod_term_func" , "__nl_symbol_ptr" , "__objc_classlist" , |
133 | "__objc_classrefs" , "__objc_imageinfo" , "__objc_nlclslist" , |
134 | "__objc_protolist" , "__objc_selrefs" , "__objc_superrefs" }; |
135 | |
136 | // Scans global variables for heap pointers. |
137 | void ProcessGlobalRegions(Frontier *frontier) { |
138 | for (auto name : kSkippedSecNames) |
139 | CHECK(internal_strnlen(name, kMaxSegName + 1) <= kMaxSegName); |
140 | |
141 | MemoryMappingLayout memory_mapping(false); |
142 | InternalMmapVector<LoadedModule> modules; |
143 | modules.reserve(128); |
144 | memory_mapping.DumpListOfModules(&modules); |
145 | for (uptr i = 0; i < modules.size(); ++i) { |
146 | // Even when global scanning is disabled, we still need to scan |
147 | // system libraries for stashed pointers |
148 | if (!flags()->use_globals && modules[i].instrumented()) continue; |
149 | |
150 | for (const __sanitizer::LoadedModule::AddressRange &range : |
151 | modules[i].ranges()) { |
152 | // Sections storing global variables are writable and non-executable |
153 | if (range.executable || !range.writable) continue; |
154 | |
155 | for (auto name : kSkippedSecNames) { |
156 | if (!internal_strcmp(range.name, name)) continue; |
157 | } |
158 | |
159 | ScanGlobalRange(range.beg, range.end, frontier); |
160 | } |
161 | } |
162 | } |
163 | |
164 | void ProcessPlatformSpecificAllocations(Frontier *frontier) { |
165 | vm_address_t address = 0; |
166 | kern_return_t err = KERN_SUCCESS; |
167 | |
168 | InternalMmapVector<Region> mapped_regions; |
169 | bool use_root_regions = flags()->use_root_regions && HasRootRegions(); |
170 | |
171 | RegionScanState scan_state; |
172 | while (err == KERN_SUCCESS) { |
173 | vm_size_t size = 0; |
174 | unsigned depth = 1; |
175 | struct vm_region_submap_info_64 info; |
176 | mach_msg_type_number_t count = VM_REGION_SUBMAP_INFO_COUNT_64; |
177 | err = vm_region_recurse_64(mach_task_self(), &address, &size, &depth, |
178 | (vm_region_info_t)&info, &count); |
179 | |
180 | uptr end_address = address + size; |
181 | if (info.user_tag == VM_MEMORY_OS_ALLOC_ONCE) { |
182 | // libxpc stashes some pointers in the Kernel Alloc Once page, |
183 | // make sure not to report those as leaks. |
184 | scan_state.seen_regions |= SeenRegion::AllocOnce; |
185 | ScanRangeForPointers(address, end_address, frontier, "GLOBAL" , |
186 | kReachable); |
187 | } else if (info.user_tag == VM_MEMORY_FOUNDATION) { |
188 | // Objective-C block trampolines use the Foundation region. |
189 | scan_state.seen_regions |= SeenRegion::Foundation; |
190 | ScanRangeForPointers(address, end_address, frontier, "GLOBAL" , |
191 | kReachable); |
192 | } else if (info.user_tag == VM_MEMORY_LIBDISPATCH) { |
193 | // Dispatch continuations use the libdispatch region. Empirically, there |
194 | // can be more than one region with this tag, so we'll optimistically |
195 | // assume that they're continguous. Otherwise, we would need to scan every |
196 | // region to ensure we find them all. |
197 | scan_state.in_libdispatch = true; |
198 | ScanRangeForPointers(address, end_address, frontier, "GLOBAL" , |
199 | kReachable); |
200 | } else if (scan_state.in_libdispatch) { |
201 | scan_state.seen_regions |= SeenRegion::LibDispatch; |
202 | scan_state.in_libdispatch = false; |
203 | } |
204 | |
205 | // Recursing over the full memory map is very slow, break out |
206 | // early if we don't need the full iteration. |
207 | if (scan_state.seen_regions == SeenRegion::All && !use_root_regions) { |
208 | break; |
209 | } |
210 | |
211 | // This additional root region scan is required on Darwin in order to |
212 | // detect root regions contained within mmap'd memory regions, because |
213 | // the Darwin implementation of sanitizer_procmaps traverses images |
214 | // as loaded by dyld, and not the complete set of all memory regions. |
215 | // |
216 | // TODO(fjricci) - remove this once sanitizer_procmaps_mac has the same |
217 | // behavior as sanitizer_procmaps_linux and traverses all memory regions |
218 | if (use_root_regions && (info.protection & kProtectionRead)) |
219 | mapped_regions.push_back({address, end_address}); |
220 | |
221 | address = end_address; |
222 | } |
223 | ScanRootRegions(frontier, mapped_regions); |
224 | } |
225 | |
226 | // On darwin, we can intercept _exit gracefully, and return a failing exit code |
227 | // if required at that point. Calling Die() here is undefined behavior and |
228 | // causes rare race conditions. |
229 | void HandleLeaks() {} |
230 | |
231 | void LockStuffAndStopTheWorld(StopTheWorldCallback callback, |
232 | CheckForLeaksParam *argument) { |
233 | ScopedStopTheWorldLock lock; |
234 | StopTheWorld(callback, argument); |
235 | } |
236 | |
237 | } // namespace __lsan |
238 | |
239 | #endif // CAN_SANITIZE_LEAKS && SANITIZER_APPLE |
240 | |