1/*
2 * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef AbstractMacroAssembler_h
27#define AbstractMacroAssembler_h
28
29#include "AssemblerBuffer.h"
30#include "CodeLocation.h"
31#include "MacroAssemblerCodeRef.h"
32#include <wtf/CryptographicallyRandomNumber.h>
33#include <wtf/Noncopyable.h>
34#include <wtf/UnusedParam.h>
35
36#if ENABLE(ASSEMBLER)
37
38
39#if PLATFORM(QT)
40#define ENABLE_JIT_CONSTANT_BLINDING 0
41#endif
42
43#ifndef ENABLE_JIT_CONSTANT_BLINDING
44#define ENABLE_JIT_CONSTANT_BLINDING 1
45#endif
46
47namespace JSC {
48
49class JumpReplacementWatchpoint;
50template <typename, template <typename> class>
51class LinkBufferBase;
52template <typename>
53class BranchCompactingLinkBuffer;
54class Watchpoint;
55namespace DFG {
56struct OSRExit;
57}
58
59template <class AssemblerType>
60class AbstractMacroAssembler {
61public:
62 friend class JITWriteBarrierBase;
63 typedef AssemblerType AssemblerType_T;
64
65 typedef MacroAssemblerCodePtr CodePtr;
66 typedef MacroAssemblerCodeRef CodeRef;
67
68#if !CPU(ARM_THUMB2) && !CPU(ARM64)
69 class Jump;
70#endif
71
72 typedef typename AssemblerType::RegisterID RegisterID;
73 typedef typename AssemblerType::FPRegisterID FPRegisterID;
74
75 // Section 1: MacroAssembler operand types
76 //
77 // The following types are used as operands to MacroAssembler operations,
78 // describing immediate and memory operands to the instructions to be planted.
79
80 enum Scale {
81 TimesOne,
82 TimesTwo,
83 TimesFour,
84 TimesEight,
85 };
86
87 // Address:
88 //
89 // Describes a simple base-offset address.
90 struct Address {
91 explicit Address(RegisterID base, int32_t offset = 0)
92 : base(base)
93 , offset(offset)
94 {
95 }
96
97 RegisterID base;
98 int32_t offset;
99 };
100
101 struct ExtendedAddress {
102 explicit ExtendedAddress(RegisterID base, intptr_t offset = 0)
103 : base(base)
104 , offset(offset)
105 {
106 }
107
108 RegisterID base;
109 intptr_t offset;
110 };
111
112 // ImplicitAddress:
113 //
114 // This class is used for explicit 'load' and 'store' operations
115 // (as opposed to situations in which a memory operand is provided
116 // to a generic operation, such as an integer arithmetic instruction).
117 //
118 // In the case of a load (or store) operation we want to permit
119 // addresses to be implicitly constructed, e.g. the two calls:
120 //
121 // load32(Address(addrReg), destReg);
122 // load32(addrReg, destReg);
123 //
124 // Are equivalent, and the explicit wrapping of the Address in the former
125 // is unnecessary.
126 struct ImplicitAddress {
127 ImplicitAddress(RegisterID base)
128 : base(base)
129 , offset(0)
130 {
131 }
132
133 ImplicitAddress(Address address)
134 : base(address.base)
135 , offset(address.offset)
136 {
137 }
138
139 RegisterID base;
140 int32_t offset;
141 };
142
143 // BaseIndex:
144 //
145 // Describes a complex addressing mode.
146 struct BaseIndex {
147 BaseIndex(RegisterID base, RegisterID index, Scale scale, int32_t offset = 0)
148 : base(base)
149 , index(index)
150 , scale(scale)
151 , offset(offset)
152 {
153 }
154
155 RegisterID base;
156 RegisterID index;
157 Scale scale;
158 int32_t offset;
159 };
160
161 // AbsoluteAddress:
162 //
163 // Describes an memory operand given by a pointer. For regular load & store
164 // operations an unwrapped void* will be used, rather than using this.
165 struct AbsoluteAddress {
166 explicit AbsoluteAddress(const void* ptr)
167 : m_ptr(ptr)
168 {
169 }
170
171 const void* m_ptr;
172 };
173
174 // TrustedImmPtr:
175 //
176 // A pointer sized immediate operand to an instruction - this is wrapped
177 // in a class requiring explicit construction in order to differentiate
178 // from pointers used as absolute addresses to memory operations
179 struct TrustedImmPtr {
180 TrustedImmPtr() { }
181
182 explicit TrustedImmPtr(const void* value)
183 : m_value(value)
184 {
185 }
186
187 // This is only here so that TrustedImmPtr(0) does not confuse the C++
188 // overload handling rules.
189 explicit TrustedImmPtr(int value)
190 : m_value(0)
191 {
192 ASSERT_UNUSED(value, !value);
193 }
194
195 explicit TrustedImmPtr(size_t value)
196 : m_value(reinterpret_cast<void*>(value))
197 {
198 }
199
200 intptr_t asIntptr()
201 {
202 return reinterpret_cast<intptr_t>(m_value);
203 }
204
205 const void* m_value;
206 };
207
208 struct ImmPtr :
209#if ENABLE(JIT_CONSTANT_BLINDING)
210 private TrustedImmPtr
211#else
212 public TrustedImmPtr
213#endif
214 {
215 explicit ImmPtr(const void* value)
216 : TrustedImmPtr(value)
217 {
218 }
219
220 TrustedImmPtr asTrustedImmPtr() { return *this; }
221 };
222
223 // TrustedImm32:
224 //
225 // A 32bit immediate operand to an instruction - this is wrapped in a
226 // class requiring explicit construction in order to prevent RegisterIDs
227 // (which are implemented as an enum) from accidentally being passed as
228 // immediate values.
229 struct TrustedImm32 {
230 TrustedImm32() { }
231
232 explicit TrustedImm32(int32_t value)
233 : m_value(value)
234 {
235 }
236
237#if !CPU(X86_64)
238 explicit TrustedImm32(TrustedImmPtr ptr)
239 : m_value(ptr.asIntptr())
240 {
241 }
242#endif
243
244 int32_t m_value;
245 };
246
247
248 struct Imm32 :
249#if ENABLE(JIT_CONSTANT_BLINDING)
250 private TrustedImm32
251#else
252 public TrustedImm32
253#endif
254 {
255 explicit Imm32(int32_t value)
256 : TrustedImm32(value)
257 {
258 }
259#if !CPU(X86_64)
260 explicit Imm32(TrustedImmPtr ptr)
261 : TrustedImm32(ptr)
262 {
263 }
264#endif
265 const TrustedImm32& asTrustedImm32() const { return *this; }
266
267 };
268
269 // TrustedImm64:
270 //
271 // A 64bit immediate operand to an instruction - this is wrapped in a
272 // class requiring explicit construction in order to prevent RegisterIDs
273 // (which are implemented as an enum) from accidentally being passed as
274 // immediate values.
275 struct TrustedImm64 {
276 TrustedImm64() { }
277
278 explicit TrustedImm64(int64_t value)
279 : m_value(value)
280 {
281 }
282
283#if CPU(X86_64) || CPU(ARM64)
284 explicit TrustedImm64(TrustedImmPtr ptr)
285 : m_value(ptr.asIntptr())
286 {
287 }
288#endif
289
290 int64_t m_value;
291 };
292
293 struct Imm64 :
294#if ENABLE(JIT_CONSTANT_BLINDING)
295 private TrustedImm64
296#else
297 public TrustedImm64
298#endif
299 {
300 explicit Imm64(int64_t value)
301 : TrustedImm64(value)
302 {
303 }
304#if CPU(X86_64) || CPU(ARM64)
305 explicit Imm64(TrustedImmPtr ptr)
306 : TrustedImm64(ptr)
307 {
308 }
309#endif
310 const TrustedImm64& asTrustedImm64() const { return *this; }
311 };
312
313 // Section 2: MacroAssembler code buffer handles
314 //
315 // The following types are used to reference items in the code buffer
316 // during JIT code generation. For example, the type Jump is used to
317 // track the location of a jump instruction so that it may later be
318 // linked to a label marking its destination.
319
320
321 // Label:
322 //
323 // A Label records a point in the generated instruction stream, typically such that
324 // it may be used as a destination for a jump.
325 class Label {
326 template<class TemplateAssemblerType>
327 friend class AbstractMacroAssembler;
328 friend struct DFG::OSRExit;
329
330#if CPU(ARM_THUMB2) || CPU(ARM64)
331 using Jump = typename AssemblerType::template Jump<Label>;
332 friend Jump;
333#else
334 friend class Jump;
335#endif
336 friend class JumpReplacementWatchpoint;
337 friend class MacroAssemblerCodeRef;
338 template <typename, template <typename> class> friend class LinkBufferBase;
339 friend class Watchpoint;
340
341 public:
342 Label()
343 {
344 }
345
346 Label(AbstractMacroAssembler<AssemblerType>* masm)
347 : m_label(masm->m_assembler.label())
348 {
349 }
350
351 bool isSet() const { return m_label.isSet(); }
352
353 const AssemblerLabel &label() const { return m_label; }
354 private:
355 AssemblerLabel m_label;
356 };
357
358 // ConvertibleLoadLabel:
359 //
360 // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr
361 // so that:
362 //
363 // loadPtr(Address(a, i), b)
364 //
365 // becomes:
366 //
367 // addPtr(TrustedImmPtr(i), a, b)
368 class ConvertibleLoadLabel {
369 template<class TemplateAssemblerType>
370 friend class AbstractMacroAssembler;
371 template <typename, template <typename> class> friend class LinkBufferBase;
372
373 public:
374 ConvertibleLoadLabel()
375 {
376 }
377
378 ConvertibleLoadLabel(AbstractMacroAssembler<AssemblerType>* masm)
379 : m_label(masm->m_assembler.labelIgnoringWatchpoints())
380 {
381 }
382
383 bool isSet() const { return m_label.isSet(); }
384 private:
385 AssemblerLabel m_label;
386 };
387
388 // DataLabelPtr:
389 //
390 // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
391 // patched after the code has been generated.
392 class DataLabelPtr {
393 template<class TemplateAssemblerType>
394 friend class AbstractMacroAssembler;
395 template <typename, template <typename> class> friend class LinkBufferBase;
396 public:
397 DataLabelPtr()
398 {
399 }
400
401 DataLabelPtr(AbstractMacroAssembler<AssemblerType>* masm)
402 : m_label(masm->m_assembler.label())
403 {
404 }
405
406 bool isSet() const { return m_label.isSet(); }
407
408 private:
409 AssemblerLabel m_label;
410 };
411
412 // DataLabel32:
413 //
414 // A DataLabelPtr is used to refer to a location in the code containing a pointer to be
415 // patched after the code has been generated.
416 class DataLabel32 {
417 template<class TemplateAssemblerType>
418 friend class AbstractMacroAssembler;
419 template <typename, template <typename> class> friend class LinkBufferBase;
420 public:
421 DataLabel32()
422 {
423 }
424
425 DataLabel32(AbstractMacroAssembler<AssemblerType>* masm)
426 : m_label(masm->m_assembler.label())
427 {
428 }
429
430 AssemblerLabel label() const { return m_label; }
431
432 private:
433 AssemblerLabel m_label;
434 };
435
436 // DataLabelCompact:
437 //
438 // A DataLabelCompact is used to refer to a location in the code containing a
439 // compact immediate to be patched after the code has been generated.
440 class DataLabelCompact {
441 template<class TemplateAssemblerType>
442 friend class AbstractMacroAssembler;
443 template <typename, template <typename> class> friend class LinkBufferBase;
444 public:
445 DataLabelCompact()
446 {
447 }
448
449 DataLabelCompact(AbstractMacroAssembler<AssemblerType>* masm)
450 : m_label(masm->m_assembler.label())
451 {
452 }
453
454 DataLabelCompact(AssemblerLabel label)
455 : m_label(label)
456 {
457 }
458
459 private:
460 AssemblerLabel m_label;
461 };
462
463#if CPU(ARM_THUMB2) || CPU(ARM64)
464 using Jump = typename AssemblerType::template Jump<Label>;
465 friend Jump;
466#endif
467
468 // Call:
469 //
470 // A Call object is a reference to a call instruction that has been planted
471 // into the code buffer - it is typically used to link the call, setting the
472 // relative offset such that when executed it will call to the desired
473 // destination.
474 class Call {
475 template<class TemplateAssemblerType>
476 friend class AbstractMacroAssembler;
477
478 public:
479 enum Flags {
480 None = 0x0,
481 Linkable = 0x1,
482 Near = 0x2,
483 LinkableNear = 0x3,
484 };
485
486 Call()
487 : m_flags(None)
488 {
489 }
490
491 Call(AssemblerLabel jmp, Flags flags)
492 : m_label(jmp)
493 , m_flags(flags)
494 {
495 }
496
497 bool isFlagSet(Flags flag)
498 {
499 return m_flags & flag;
500 }
501
502 static Call fromTailJump(Jump jump)
503 {
504 return Call(jump.m_label, Linkable);
505 }
506
507 AssemblerLabel m_label;
508 private:
509 Flags m_flags;
510 };
511
512 // Jump:
513 //
514 // A jump object is a reference to a jump instruction that has been planted
515 // into the code buffer - it is typically used to link the jump, setting the
516 // relative offset such that when executed it will jump to the desired
517 // destination.
518#if !CPU(ARM_THUMB2) && !CPU(ARM64)
519 class Jump {
520 template<class TemplateAssemblerType>
521 friend class AbstractMacroAssembler;
522 friend class Call;
523 friend struct DFG::OSRExit;
524 template <typename, template <typename> class> friend class LinkBufferBase;
525 public:
526 Jump()
527 {
528 }
529
530#if CPU(ARM_THUMB2)
531 // Fixme: this information should be stored in the instruction stream, not in the Jump object.
532 Jump(AssemblerLabel jmp, ARMv7Assembler::JumpType type = ARMv7Assembler::JumpNoCondition, ARMv7Assembler::Condition condition = ARMv7Assembler::ConditionInvalid)
533 : m_label(jmp)
534 , m_type(type)
535 , m_condition(condition)
536 {
537 }
538#elif CPU(ARM64)
539 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type = ARM64Assembler::JumpNoCondition, ARM64Assembler::Condition condition = ARM64Assembler::ConditionInvalid)
540 : m_label(jmp)
541 , m_type(type)
542 , m_condition(condition)
543 {
544 }
545
546 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, bool is64Bit, ARM64Assembler::RegisterID compareRegister)
547 : m_label(jmp)
548 , m_type(type)
549 , m_condition(condition)
550 , m_is64Bit(is64Bit)
551 , m_compareRegister(compareRegister)
552 {
553 ASSERT((type == ARM64Assembler::JumpCompareAndBranch) || (type == ARM64Assembler::JumpCompareAndBranchFixedSize));
554 }
555
556 Jump(AssemblerLabel jmp, ARM64Assembler::JumpType type, ARM64Assembler::Condition condition, unsigned bitNumber, ARM64Assembler::RegisterID compareRegister)
557 : m_label(jmp)
558 , m_type(type)
559 , m_condition(condition)
560 , m_bitNumber(bitNumber)
561 , m_compareRegister(compareRegister)
562 {
563 ASSERT((type == ARM64Assembler::JumpTestBit) || (type == ARM64Assembler::JumpTestBitFixedSize));
564 }
565#else
566 Jump(AssemblerLabel jmp)
567 : m_label(jmp)
568 {
569 }
570#endif
571
572 Label label() const
573 {
574 Label result;
575 result.m_label = m_label;
576 return result;
577 }
578
579 void link(AbstractMacroAssembler<AssemblerType>* masm) const
580 {
581#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
582 masm->checkRegisterAllocationAgainstBranchRange(m_label.m_offset, masm->debugOffset());
583#endif
584
585#if CPU(ARM_THUMB2)
586 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
587#elif CPU(ARM64)
588 if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
589 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_is64Bit, m_compareRegister);
590 else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
591 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition, m_bitNumber, m_compareRegister);
592 else
593 masm->m_assembler.linkJump(m_label, masm->m_assembler.label(), m_type, m_condition);
594#else
595 masm->m_assembler.linkJump(m_label, masm->m_assembler.label());
596#endif
597 }
598
599 void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm) const
600 {
601#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
602 masm->checkRegisterAllocationAgainstBranchRange(label.m_label.m_offset, m_label.m_offset);
603#endif
604
605#if CPU(ARM_THUMB2)
606 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
607#elif CPU(ARM64)
608 if ((m_type == ARM64Assembler::JumpCompareAndBranch) || (m_type == ARM64Assembler::JumpCompareAndBranchFixedSize))
609 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_is64Bit, m_compareRegister);
610 else if ((m_type == ARM64Assembler::JumpTestBit) || (m_type == ARM64Assembler::JumpTestBitFixedSize))
611 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition, m_bitNumber, m_compareRegister);
612 else
613 masm->m_assembler.linkJump(m_label, label.m_label, m_type, m_condition);
614#else
615 masm->m_assembler.linkJump(m_label, label.m_label);
616#endif
617 }
618
619 bool isSet() const { return m_label.isSet(); }
620
621 private:
622 AssemblerLabel m_label;
623#if CPU(ARM_THUMB2)
624 ARMv7Assembler::JumpType m_type;
625 ARMv7Assembler::Condition m_condition;
626#endif
627#if CPU(ARM64)
628 ARM64Assembler::JumpType m_type;
629 ARM64Assembler::Condition m_condition;
630 bool m_is64Bit;
631 unsigned m_bitNumber;
632 ARM64Assembler::RegisterID m_compareRegister;
633#endif
634 };
635#endif
636
637 struct PatchableJump {
638 PatchableJump()
639 {
640 }
641
642 explicit PatchableJump(Jump jump)
643 : m_jump(jump)
644 {
645 }
646
647 operator Jump&() { return m_jump; }
648
649 Jump m_jump;
650 };
651
652 // JumpList:
653 //
654 // A JumpList is a set of Jump objects.
655 // All jumps in the set will be linked to the same destination.
656 class JumpList {
657 template <typename, template <typename> class> friend class LinkBufferBase;
658
659 public:
660 typedef Vector<Jump, 2> JumpVector;
661
662 JumpList() { }
663
664 JumpList(Jump jump)
665 {
666 append(jump);
667 }
668
669 void link(AbstractMacroAssembler<AssemblerType>* masm)
670 {
671 size_t size = m_jumps.size();
672 for (size_t i = 0; i < size; ++i)
673 m_jumps[i].link(masm);
674 m_jumps.clear();
675 }
676
677 void linkTo(Label label, AbstractMacroAssembler<AssemblerType>* masm)
678 {
679 size_t size = m_jumps.size();
680 for (size_t i = 0; i < size; ++i)
681 m_jumps[i].linkTo(label, masm);
682 m_jumps.clear();
683 }
684
685 void append(Jump jump)
686 {
687 m_jumps.append(jump);
688 }
689
690 void append(const JumpList& other)
691 {
692 m_jumps.append(other.m_jumps.begin(), other.m_jumps.size());
693 }
694
695 bool empty()
696 {
697 return !m_jumps.size();
698 }
699
700 void clear()
701 {
702 m_jumps.clear();
703 }
704
705 const JumpVector& jumps() const { return m_jumps; }
706
707 private:
708 JumpVector m_jumps;
709 };
710
711
712 // Section 3: Misc admin methods
713#if ENABLE(DFG_JIT)
714 Label labelIgnoringWatchpoints()
715 {
716 Label result;
717 result.m_label = m_assembler.labelIgnoringWatchpoints();
718 return result;
719 }
720#else
721 Label labelIgnoringWatchpoints()
722 {
723 return label();
724 }
725#endif
726
727 Label label()
728 {
729 return Label(this);
730 }
731
732 void padBeforePatch()
733 {
734 // Rely on the fact that asking for a label already does the padding.
735 (void)label();
736 }
737
738 Label watchpointLabel()
739 {
740 Label result;
741 result.m_label = m_assembler.labelForWatchpoint();
742 return result;
743 }
744
745 Label align()
746 {
747 m_assembler.align(16);
748 return Label(this);
749 }
750
751#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
752 class RegisterAllocationOffset {
753 public:
754 RegisterAllocationOffset(unsigned offset)
755 : m_offset(offset)
756 {
757 }
758
759 void check(unsigned low, unsigned high)
760 {
761 RELEASE_ASSERT_WITH_MESSAGE(!(low <= m_offset && m_offset <= high), "Unsafe branch over register allocation at instruction offset %u in jump offset range %u..%u", m_offset, low, high);
762 }
763
764 private:
765 unsigned m_offset;
766 };
767
768 void addRegisterAllocationAtOffset(unsigned offset)
769 {
770 m_registerAllocationForOffsets.append(RegisterAllocationOffset(offset));
771 }
772
773 void clearRegisterAllocationOffsets()
774 {
775 m_registerAllocationForOffsets.clear();
776 }
777
778 void checkRegisterAllocationAgainstBranchRange(unsigned offset1, unsigned offset2)
779 {
780 if (offset1 > offset2)
781 std::swap(offset1, offset2);
782
783 size_t size = m_registerAllocationForOffsets.size();
784 for (size_t i = 0; i < size; ++i)
785 m_registerAllocationForOffsets[i].check(offset1, offset2);
786 }
787#endif
788
789 template<typename T, typename U>
790 static ptrdiff_t differenceBetween(T from, U to)
791 {
792 return AssemblerType::getDifferenceBetweenLabels(from.m_label, to.m_label);
793 }
794
795 static ptrdiff_t differenceBetweenCodePtr(const MacroAssemblerCodePtr& a, const MacroAssemblerCodePtr& b)
796 {
797 return reinterpret_cast<ptrdiff_t>(b.executableAddress()) - reinterpret_cast<ptrdiff_t>(a.executableAddress());
798 }
799
800 unsigned debugOffset() { return m_assembler.debugOffset(); }
801
802 ALWAYS_INLINE static void cacheFlush(void* code, size_t size)
803 {
804 AssemblerType::cacheFlush(code, size);
805 }
806protected:
807 AbstractMacroAssembler()
808 : m_randomSource(cryptographicallyRandomNumber())
809 {
810 }
811
812 AssemblerType m_assembler;
813
814 uint32_t random()
815 {
816 return m_randomSource.getUint32();
817 }
818
819 WeakRandom m_randomSource;
820
821#if ENABLE(DFG_REGISTER_ALLOCATION_VALIDATION)
822 Vector<RegisterAllocationOffset, 10> m_registerAllocationForOffsets;
823#endif
824
825#if ENABLE(JIT_CONSTANT_BLINDING)
826 static bool scratchRegisterForBlinding() { return false; }
827 static bool shouldBlindForSpecificArch(uint32_t) { return true; }
828 static bool shouldBlindForSpecificArch(uint64_t) { return true; }
829#endif
830
831 template <typename, template <typename> class> friend class LinkBufferBase;
832 template <typename> friend class BranchCompactingLinkBuffer;
833
834 static void linkJump(void* code, Jump jump, CodeLocationLabel target)
835 {
836 AssemblerType::linkJump(code, jump.m_label, target.dataLocation());
837 }
838
839 static void linkPointer(void* code, AssemblerLabel label, void* value)
840 {
841 AssemblerType::linkPointer(code, label, value);
842 }
843
844 static void* getLinkerAddress(void* code, AssemblerLabel label)
845 {
846 return AssemblerType::getRelocatedAddress(code, label);
847 }
848
849 static unsigned getLinkerCallReturnOffset(Call call)
850 {
851 return AssemblerType::getCallReturnOffset(call.m_label);
852 }
853
854 static void repatchJump(CodeLocationJump jump, CodeLocationLabel destination)
855 {
856 AssemblerType::relinkJump(jump.dataLocation(), destination.dataLocation());
857 }
858
859 static void repatchNearCall(CodeLocationNearCall nearCall, CodeLocationLabel destination)
860 {
861 AssemblerType::relinkCall(nearCall.dataLocation(), destination.executableAddress());
862 }
863
864 static void repatchCompact(CodeLocationDataLabelCompact dataLabelCompact, int32_t value)
865 {
866 AssemblerType::repatchCompact(dataLabelCompact.dataLocation(), value);
867 }
868
869 static void repatchInt32(CodeLocationDataLabel32 dataLabel32, int32_t value)
870 {
871 AssemblerType::repatchInt32(dataLabel32.dataLocation(), value);
872 }
873
874 static void repatchPointer(CodeLocationDataLabelPtr dataLabelPtr, void* value)
875 {
876 AssemblerType::repatchPointer(dataLabelPtr.dataLocation(), value);
877 }
878
879 static void* readPointer(CodeLocationDataLabelPtr dataLabelPtr)
880 {
881 return AssemblerType::readPointer(dataLabelPtr.dataLocation());
882 }
883
884 static void replaceWithLoad(CodeLocationConvertibleLoad label)
885 {
886 AssemblerType::replaceWithLoad(label.dataLocation());
887 }
888
889 static void replaceWithAddressComputation(CodeLocationConvertibleLoad label)
890 {
891 AssemblerType::replaceWithAddressComputation(label.dataLocation());
892 }
893};
894
895} // namespace JSC
896
897#endif // ENABLE(ASSEMBLER)
898
899#endif // AbstractMacroAssembler_h
900

source code of qtdeclarative/src/3rdparty/masm/assembler/AbstractMacroAssembler.h