1 | /* |
2 | * Copyright (C) 2009, 2012 Apple Inc. All rights reserved. |
3 | * |
4 | * Redistribution and use in source and binary forms, with or without |
5 | * modification, are permitted provided that the following conditions |
6 | * are met: |
7 | * 1. Redistributions of source code must retain the above copyright |
8 | * notice, this list of conditions and the following disclaimer. |
9 | * 2. Redistributions in binary form must reproduce the above copyright |
10 | * notice, this list of conditions and the following disclaimer in the |
11 | * documentation and/or other materials provided with the distribution. |
12 | * |
13 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
14 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
15 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
16 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
17 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
18 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
19 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
20 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
21 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
22 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
23 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
24 | */ |
25 | |
26 | #ifndef MacroAssemblerCodeRef_h |
27 | #define MacroAssemblerCodeRef_h |
28 | |
29 | #include "Disassembler.h" |
30 | #include <wtf/Platform.h> |
31 | #include "ExecutableAllocator.h" |
32 | #include "LLIntData.h" |
33 | #include <wtf/DataLog.h> |
34 | #include <wtf/PassRefPtr.h> |
35 | #include <wtf/RefPtr.h> |
36 | #include <wtf/UnusedParam.h> |
37 | #include <qglobal.h> |
38 | |
39 | // ASSERT_VALID_CODE_POINTER checks that ptr is a non-null pointer, and that it is a valid |
40 | // instruction address on the platform (for example, check any alignment requirements). |
41 | #if CPU(ARM_THUMB2) |
42 | // ARM/thumb instructions must be 16-bit aligned, but all code pointers to be loaded |
43 | // into the processor are decorated with the bottom bit set, indicating that this is |
44 | // thumb code (as oposed to 32-bit traditional ARM). The first test checks for both |
45 | // decorated and undectorated null, and the second test ensures that the pointer is |
46 | // decorated. |
47 | #define ASSERT_VALID_CODE_POINTER(ptr) \ |
48 | ASSERT(reinterpret_cast<intptr_t>(ptr) & ~1); |
49 | #define ASSERT_VALID_CODE_OFFSET(offset) \ |
50 | ASSERT(!(offset & 1)) // Must be multiple of 2. |
51 | #else |
52 | #define ASSERT_VALID_CODE_POINTER(ptr) \ |
53 | ASSERT(ptr) |
54 | #define ASSERT_VALID_CODE_OFFSET(offset) // Anything goes! |
55 | #endif |
56 | |
57 | #if CPU(X86) && OS(WINDOWS) |
58 | #define CALLING_CONVENTION_IS_STDCALL 1 |
59 | #ifndef CDECL |
60 | #if COMPILER(MSVC) |
61 | #define CDECL __cdecl |
62 | #else |
63 | #define CDECL __attribute__ ((__cdecl)) |
64 | #endif // COMPILER(MSVC) |
65 | #endif // CDECL |
66 | #else |
67 | #define CALLING_CONVENTION_IS_STDCALL 0 |
68 | #endif |
69 | |
70 | #if CPU(X86) && !OS(INTEGRITY) |
71 | #define HAS_FASTCALL_CALLING_CONVENTION 1 |
72 | #ifndef FASTCALL |
73 | #if COMPILER(MSVC) |
74 | #define FASTCALL __fastcall |
75 | #else |
76 | #define FASTCALL __attribute__ ((fastcall)) |
77 | #endif // COMPILER(MSVC) |
78 | #endif // FASTCALL |
79 | #else |
80 | #define HAS_FASTCALL_CALLING_CONVENTION 0 |
81 | #endif // CPU(X86) |
82 | |
83 | namespace JSC { |
84 | |
85 | // FunctionPtr: |
86 | // |
87 | // FunctionPtr should be used to wrap pointers to C/C++ functions in JSC |
88 | // (particularly, the stub functions). |
89 | class FunctionPtr { |
90 | public: |
91 | FunctionPtr() |
92 | : m_value(0) |
93 | { |
94 | } |
95 | |
96 | template<typename returnType> |
97 | FunctionPtr(returnType(*value)()) |
98 | : m_value((void*)value) |
99 | { |
100 | ASSERT_VALID_CODE_POINTER(m_value); |
101 | } |
102 | |
103 | template<typename returnType, typename argType1> |
104 | FunctionPtr(returnType(*value)(argType1)) |
105 | : m_value((void*)value) |
106 | { |
107 | ASSERT_VALID_CODE_POINTER(m_value); |
108 | } |
109 | |
110 | template<typename returnType, typename argType1, typename argType2> |
111 | FunctionPtr(returnType(*value)(argType1, argType2)) |
112 | : m_value((void*)value) |
113 | { |
114 | ASSERT_VALID_CODE_POINTER(m_value); |
115 | } |
116 | |
117 | template<typename returnType, typename argType1, typename argType2, typename argType3> |
118 | FunctionPtr(returnType(*value)(argType1, argType2, argType3)) |
119 | : m_value((void*)value) |
120 | { |
121 | ASSERT_VALID_CODE_POINTER(m_value); |
122 | } |
123 | |
124 | template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4> |
125 | FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4)) |
126 | : m_value((void*)value) |
127 | { |
128 | ASSERT_VALID_CODE_POINTER(m_value); |
129 | } |
130 | |
131 | template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4, typename argType5> |
132 | FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5)) |
133 | : m_value((void*)value) |
134 | { |
135 | ASSERT_VALID_CODE_POINTER(m_value); |
136 | } |
137 | |
138 | template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4, typename argType5, typename argType6> |
139 | FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5, argType6)) |
140 | : m_value((void*)value) |
141 | { |
142 | ASSERT_VALID_CODE_POINTER(m_value); |
143 | } |
144 | |
145 | inline FunctionPtr(MacroAssemblerCodePtr ptr); |
146 | |
147 | // MSVC doesn't seem to treat functions with different calling conventions as |
148 | // different types; these methods already defined for fastcall, below. |
149 | #if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS) |
150 | |
151 | template<typename returnType> |
152 | FunctionPtr(returnType (CDECL *value)()) |
153 | : m_value((void*)value) |
154 | { |
155 | ASSERT_VALID_CODE_POINTER(m_value); |
156 | } |
157 | |
158 | template<typename returnType, typename argType1> |
159 | FunctionPtr(returnType (CDECL *value)(argType1)) |
160 | : m_value((void*)value) |
161 | { |
162 | ASSERT_VALID_CODE_POINTER(m_value); |
163 | } |
164 | |
165 | template<typename returnType, typename argType1, typename argType2> |
166 | FunctionPtr(returnType (CDECL *value)(argType1, argType2)) |
167 | : m_value((void*)value) |
168 | { |
169 | ASSERT_VALID_CODE_POINTER(m_value); |
170 | } |
171 | |
172 | template<typename returnType, typename argType1, typename argType2, typename argType3> |
173 | FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3)) |
174 | : m_value((void*)value) |
175 | { |
176 | ASSERT_VALID_CODE_POINTER(m_value); |
177 | } |
178 | |
179 | template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4> |
180 | FunctionPtr(returnType (CDECL *value)(argType1, argType2, argType3, argType4)) |
181 | : m_value((void*)value) |
182 | { |
183 | ASSERT_VALID_CODE_POINTER(m_value); |
184 | } |
185 | #endif |
186 | |
187 | #if HAS_FASTCALL_CALLING_CONVENTION |
188 | |
189 | template<typename returnType> |
190 | FunctionPtr(returnType (FASTCALL *value)()) |
191 | : m_value((void*)value) |
192 | { |
193 | ASSERT_VALID_CODE_POINTER(m_value); |
194 | } |
195 | |
196 | template<typename returnType, typename argType1> |
197 | FunctionPtr(returnType (FASTCALL *value)(argType1)) |
198 | : m_value((void*)value) |
199 | { |
200 | ASSERT_VALID_CODE_POINTER(m_value); |
201 | } |
202 | |
203 | template<typename returnType, typename argType1, typename argType2> |
204 | FunctionPtr(returnType (FASTCALL *value)(argType1, argType2)) |
205 | : m_value((void*)value) |
206 | { |
207 | ASSERT_VALID_CODE_POINTER(m_value); |
208 | } |
209 | |
210 | template<typename returnType, typename argType1, typename argType2, typename argType3> |
211 | FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3)) |
212 | : m_value((void*)value) |
213 | { |
214 | ASSERT_VALID_CODE_POINTER(m_value); |
215 | } |
216 | |
217 | template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4> |
218 | FunctionPtr(returnType (FASTCALL *value)(argType1, argType2, argType3, argType4)) |
219 | : m_value((void*)value) |
220 | { |
221 | ASSERT_VALID_CODE_POINTER(m_value); |
222 | } |
223 | #endif |
224 | |
225 | template<typename FunctionType> |
226 | explicit FunctionPtr(FunctionType* value) |
227 | // Using a C-ctyle cast here to avoid compiler error on RVTC: |
228 | // Error: #694: reinterpret_cast cannot cast away const or other type qualifiers |
229 | // (I guess on RVTC function pointers have a different constness to GCC/MSVC?) |
230 | : m_value((void*)value) |
231 | { |
232 | ASSERT_VALID_CODE_POINTER(m_value); |
233 | } |
234 | |
235 | void* value() const { return m_value; } |
236 | void* executableAddress() const { return m_value; } |
237 | |
238 | |
239 | private: |
240 | void* m_value; |
241 | }; |
242 | |
243 | // ReturnAddressPtr: |
244 | // |
245 | // ReturnAddressPtr should be used to wrap return addresses generated by processor |
246 | // 'call' instructions exectued in JIT code. We use return addresses to look up |
247 | // exception and optimization information, and to repatch the call instruction |
248 | // that is the source of the return address. |
249 | class ReturnAddressPtr { |
250 | public: |
251 | ReturnAddressPtr() |
252 | : m_value(0) |
253 | { |
254 | } |
255 | |
256 | explicit ReturnAddressPtr(void* value) |
257 | : m_value(value) |
258 | { |
259 | ASSERT_VALID_CODE_POINTER(m_value); |
260 | } |
261 | |
262 | explicit ReturnAddressPtr(FunctionPtr function) |
263 | : m_value(function.value()) |
264 | { |
265 | ASSERT_VALID_CODE_POINTER(m_value); |
266 | } |
267 | |
268 | void* value() const { return m_value; } |
269 | |
270 | private: |
271 | void* m_value; |
272 | }; |
273 | |
274 | // MacroAssemblerCodePtr: |
275 | // |
276 | // MacroAssemblerCodePtr should be used to wrap pointers to JIT generated code. |
277 | class MacroAssemblerCodePtr { |
278 | public: |
279 | MacroAssemblerCodePtr() |
280 | : m_value(0) |
281 | { |
282 | } |
283 | |
284 | explicit MacroAssemblerCodePtr(void* value) |
285 | #if CPU(ARM_THUMB2) |
286 | // Decorate the pointer as a thumb code pointer. |
287 | : m_value(reinterpret_cast<char*>(value) + 1) |
288 | #else |
289 | : m_value(value) |
290 | #endif |
291 | { |
292 | ASSERT_VALID_CODE_POINTER(m_value); |
293 | } |
294 | |
295 | static MacroAssemblerCodePtr createFromExecutableAddress(void* value) |
296 | { |
297 | ASSERT_VALID_CODE_POINTER(value); |
298 | MacroAssemblerCodePtr result; |
299 | result.m_value = value; |
300 | return result; |
301 | } |
302 | |
303 | #if ENABLE(LLINT) |
304 | static MacroAssemblerCodePtr createLLIntCodePtr(LLIntCode codeId) |
305 | { |
306 | return createFromExecutableAddress(LLInt::getCodePtr(codeId)); |
307 | } |
308 | #endif |
309 | |
310 | explicit MacroAssemblerCodePtr(ReturnAddressPtr ra) |
311 | : m_value(ra.value()) |
312 | { |
313 | ASSERT_VALID_CODE_POINTER(m_value); |
314 | } |
315 | |
316 | void* executableAddress() const { return m_value; } |
317 | #if CPU(ARM_THUMB2) |
318 | // To use this pointer as a data address remove the decoration. |
319 | void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return reinterpret_cast<char*>(m_value) - 1; } |
320 | #else |
321 | void* dataLocation() const { ASSERT_VALID_CODE_POINTER(m_value); return m_value; } |
322 | #endif |
323 | |
324 | bool operator!() const |
325 | { |
326 | return !m_value; |
327 | } |
328 | |
329 | private: |
330 | void* m_value; |
331 | }; |
332 | |
333 | |
334 | FunctionPtr::FunctionPtr(MacroAssemblerCodePtr ptr) |
335 | : m_value(ptr.executableAddress()) |
336 | { |
337 | } |
338 | |
339 | // MacroAssemblerCodeRef: |
340 | // |
341 | // A reference to a section of JIT generated code. A CodeRef consists of a |
342 | // pointer to the code, and a ref pointer to the pool from within which it |
343 | // was allocated. |
344 | class MacroAssemblerCodeRef { |
345 | private: |
346 | // This is private because it's dangerous enough that we want uses of it |
347 | // to be easy to find - hence the static create method below. |
348 | explicit MacroAssemblerCodeRef(MacroAssemblerCodePtr codePtr) |
349 | : m_codePtr(codePtr) |
350 | { |
351 | ASSERT(m_codePtr); |
352 | } |
353 | |
354 | public: |
355 | MacroAssemblerCodeRef() |
356 | { |
357 | } |
358 | |
359 | MacroAssemblerCodeRef(PassRefPtr<ExecutableMemoryHandle> executableMemory) |
360 | : m_codePtr(executableMemory->codeStart()) |
361 | , m_executableMemory(executableMemory) |
362 | { |
363 | ASSERT(m_executableMemory->isManaged()); |
364 | ASSERT(m_executableMemory->codeStart()); |
365 | ASSERT(m_codePtr); |
366 | } |
367 | |
368 | // Use this only when you know that the codePtr refers to code that is |
369 | // already being kept alive through some other means. Typically this means |
370 | // that codePtr is immortal. |
371 | static MacroAssemblerCodeRef createSelfManagedCodeRef(MacroAssemblerCodePtr codePtr) |
372 | { |
373 | return MacroAssemblerCodeRef(codePtr); |
374 | } |
375 | |
376 | #if ENABLE(LLINT) |
377 | // Helper for creating self-managed code refs from LLInt. |
378 | static MacroAssemblerCodeRef createLLIntCodeRef(LLIntCode codeId) |
379 | { |
380 | return createSelfManagedCodeRef(MacroAssemblerCodePtr::createFromExecutableAddress(LLInt::getCodePtr(codeId))); |
381 | } |
382 | #endif |
383 | |
384 | ExecutableMemoryHandle* executableMemory() const |
385 | { |
386 | return m_executableMemory.get(); |
387 | } |
388 | |
389 | MacroAssemblerCodePtr code() const |
390 | { |
391 | return m_codePtr; |
392 | } |
393 | |
394 | size_t size() const |
395 | { |
396 | if (!m_executableMemory) |
397 | return 0; |
398 | return m_executableMemory->codeSize(); |
399 | } |
400 | |
401 | bool tryToDisassemble(const char* prefix) const |
402 | { |
403 | return JSC::tryToDisassemble(m_codePtr, size(), prefix, WTF::dataFile()); |
404 | } |
405 | |
406 | bool operator!() const { return !m_codePtr; } |
407 | |
408 | private: |
409 | MacroAssemblerCodePtr m_codePtr; |
410 | RefPtr<ExecutableMemoryHandle> m_executableMemory; |
411 | }; |
412 | |
413 | } // namespace JSC |
414 | |
415 | #endif // MacroAssemblerCodeRef_h |
416 | |