1/*
2 * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssembler_h
27#define MacroAssembler_h
28
29#include <wtf/Platform.h>
30
31#if ENABLE(ASSEMBLER)
32
33#include "MacroAssemblerARMv7.h"
34#include "MacroAssemblerARM64.h"
35
36#if CPU(ARM_THUMB2)
37namespace JSC { typedef MacroAssemblerARMv7 MacroAssemblerBase; };
38
39#elif CPU(ARM64)
40namespace JSC { typedef MacroAssemblerARM64 MacroAssemblerBase; };
41
42#elif CPU(MIPS)
43#include "MacroAssemblerMIPS.h"
44namespace JSC {
45typedef MacroAssemblerMIPS MacroAssemblerBase;
46};
47
48#elif CPU(X86)
49#include "MacroAssemblerX86.h"
50namespace JSC { typedef MacroAssemblerX86 MacroAssemblerBase; };
51
52#elif CPU(X86_64)
53#include "MacroAssemblerX86_64.h"
54namespace JSC { typedef MacroAssemblerX86_64 MacroAssemblerBase; };
55
56#else
57#error "The MacroAssembler is not supported on this platform."
58#endif
59
60namespace JSC {
61
62template <typename MacroAssemblerBase>
63class MacroAssembler : public MacroAssemblerBase {
64public:
65
66 using DoubleCondition = typename MacroAssemblerBase::DoubleCondition;
67 using ResultCondition = typename MacroAssemblerBase::ResultCondition;
68 using RelationalCondition = typename MacroAssemblerBase::RelationalCondition;
69 using RegisterID = typename MacroAssemblerBase::RegisterID;
70 using Address = typename MacroAssemblerBase::Address;
71 using ExtendedAddress = typename MacroAssemblerBase::ExtendedAddress;
72 using BaseIndex = typename MacroAssemblerBase::BaseIndex;
73 using ImplicitAddress = typename MacroAssemblerBase::ImplicitAddress;
74 using AbsoluteAddress = typename MacroAssemblerBase::AbsoluteAddress;
75 using TrustedImm32 = typename MacroAssemblerBase::TrustedImm32;
76 using TrustedImm64 = typename MacroAssemblerBase::TrustedImm64;
77 using TrustedImmPtr = typename MacroAssemblerBase::TrustedImmPtr;
78 using Imm32 = typename MacroAssemblerBase::Imm32;
79 using Imm64 = typename MacroAssemblerBase::Imm64;
80 using ImmPtr = typename MacroAssemblerBase::ImmPtr;
81 using Label = typename MacroAssemblerBase::Label;
82 using DataLabelPtr = typename MacroAssemblerBase::DataLabelPtr;
83 using DataLabel32 = typename MacroAssemblerBase::DataLabel32;
84 using DataLabelCompact = typename MacroAssemblerBase::DataLabelCompact;
85 using Jump = typename MacroAssemblerBase::Jump;
86 using PatchableJump = typename MacroAssemblerBase::PatchableJump;
87 using MacroAssemblerBase::PointerSize;
88
89 using MacroAssemblerBase::pop;
90 using MacroAssemblerBase::jump;
91 using MacroAssemblerBase::branch32;
92 using MacroAssemblerBase::move;
93 using MacroAssemblerBase::store32;
94 using MacroAssemblerBase::add32;
95 using MacroAssemblerBase::xor32;
96 using MacroAssemblerBase::sub32;
97 using MacroAssemblerBase::load32;
98
99
100#if CPU(X86_64) || CPU(ARM64)
101 using MacroAssemblerBase::add64;
102 using MacroAssemblerBase::sub64;
103 using MacroAssemblerBase::xor64;
104 using MacroAssemblerBase::load64;
105 using MacroAssemblerBase::store64;
106#endif
107
108#if ENABLE(JIT_CONSTANT_BLINDING)
109 using MacroAssemblerBase::add32;
110 using MacroAssemblerBase::and32;
111 using MacroAssemblerBase::branchAdd32;
112 using MacroAssemblerBase::branchMul32;
113 using MacroAssemblerBase::branchSub32;
114 using MacroAssemblerBase::lshift32;
115 using MacroAssemblerBase::or32;
116 using MacroAssemblerBase::rshift32;
117 using MacroAssemblerBase::store32;
118 using MacroAssemblerBase::sub32;
119 using MacroAssemblerBase::urshift32;
120 using MacroAssemblerBase::xor32;
121#endif
122
123 static const double twoToThe32; // This is super useful for some double code.
124
125 // Utilities used by the DFG JIT.
126#if ENABLE(DFG_JIT) || ENABLE(DFG_JIT_UTILITY_METHODS)
127 using MacroAssemblerBase::invert;
128
129 static DoubleCondition invert(DoubleCondition cond)
130 {
131 switch (cond) {
132 case DoubleCondition::DoubleEqual:
133 return DoubleCondition::DoubleNotEqualOrUnordered;
134 case DoubleCondition::DoubleNotEqual:
135 return DoubleCondition::DoubleEqualOrUnordered;
136 case DoubleCondition::DoubleGreaterThan:
137 return DoubleCondition::DoubleLessThanOrEqualOrUnordered;
138 case DoubleCondition::DoubleGreaterThanOrEqual:
139 return DoubleCondition::DoubleLessThanOrUnordered;
140 case DoubleCondition::DoubleLessThan:
141 return DoubleCondition::DoubleGreaterThanOrEqualOrUnordered;
142 case DoubleCondition::DoubleLessThanOrEqual:
143 return DoubleCondition::DoubleGreaterThanOrUnordered;
144 case DoubleCondition::DoubleEqualOrUnordered:
145 return DoubleCondition::DoubleNotEqual;
146 case DoubleCondition::DoubleNotEqualOrUnordered:
147 return DoubleCondition::DoubleEqual;
148 case DoubleCondition::DoubleGreaterThanOrUnordered:
149 return DoubleCondition::DoubleLessThanOrEqual;
150 case DoubleCondition::DoubleGreaterThanOrEqualOrUnordered:
151 return DoubleCondition::DoubleLessThan;
152 case DoubleCondition::DoubleLessThanOrUnordered:
153 return DoubleCondition::DoubleGreaterThanOrEqual;
154 case DoubleCondition::DoubleLessThanOrEqualOrUnordered:
155 return DoubleCondition::DoubleGreaterThan;
156 default:
157 RELEASE_ASSERT_NOT_REACHED();
158 return DoubleCondition::DoubleEqual; // make compiler happy
159 }
160 }
161
162 static bool isInvertible(ResultCondition cond)
163 {
164 switch (cond) {
165 case ResultCondition::Zero:
166 case ResultCondition::NonZero:
167 return true;
168 default:
169 return false;
170 }
171 }
172
173 static ResultCondition invert(ResultCondition cond)
174 {
175 switch (cond) {
176 case ResultCondition::Zero:
177 return ResultCondition::NonZero;
178 case ResultCondition::NonZero:
179 return ResultCondition::Zero;
180 default:
181 RELEASE_ASSERT_NOT_REACHED();
182 return ResultCondition::Zero; // Make compiler happy for release builds.
183 }
184 }
185#endif
186
187 // Platform agnostic onvenience functions,
188 // described in terms of other macro assembly methods.
189 void pop()
190 {
191 addPtr(TrustedImm32(PointerSize), MacroAssemblerBase::stackPointerRegister);
192 }
193
194 void peek(RegisterID dest, int index = 0)
195 {
196 loadPtr(Address(MacroAssemblerBase::stackPointerRegister, (index * PointerSize)), dest);
197 }
198
199 Address addressForPoke(int index)
200 {
201 return Address(MacroAssemblerBase::stackPointerRegister, (index * PointerSize));
202 }
203
204 void poke(RegisterID src, int index = 0)
205 {
206 storePtr(src, addressForPoke(index));
207 }
208
209 void poke(TrustedImm32 value, int index = 0)
210 {
211 store32(value, addressForPoke(index));
212 }
213
214 void poke(TrustedImmPtr imm, int index = 0)
215 {
216 storePtr(imm, addressForPoke(index));
217 }
218
219#if CPU(X86_64) || CPU(ARM64)
220 void peek64(RegisterID dest, int index = 0)
221 {
222 load64(Address(MacroAssemblerBase::stackPointerRegister, (index * sizeof(void*))), dest);
223 }
224
225 void poke(TrustedImm64 value, int index = 0)
226 {
227 store64(value, addressForPoke(index));
228 }
229
230 void poke64(RegisterID src, int index = 0)
231 {
232 store64(src, addressForPoke(index));
233 }
234#endif
235
236 // Backwards banches, these are currently all implemented using existing forwards branch mechanisms.
237 void branchPtr(RelationalCondition cond, RegisterID op1, TrustedImmPtr imm, Label target)
238 {
239 branchPtr(cond, op1, imm).linkTo(target, this);
240 }
241 void branchPtr(RelationalCondition cond, RegisterID op1, ImmPtr imm, Label target)
242 {
243 branchPtr(cond, op1, imm).linkTo(target, this);
244 }
245
246 void branch32(RelationalCondition cond, RegisterID op1, RegisterID op2, Label target)
247 {
248 branch32(cond, op1, op2).linkTo(target, this);
249 }
250
251 void branch32(RelationalCondition cond, RegisterID op1, TrustedImm32 imm, Label target)
252 {
253 branch32(cond, op1, imm).linkTo(target, this);
254 }
255
256 void branch32(RelationalCondition cond, RegisterID op1, Imm32 imm, Label target)
257 {
258 branch32(cond, op1, imm).linkTo(target, this);
259 }
260
261 void branch32(RelationalCondition cond, RegisterID left, Address right, Label target)
262 {
263 branch32(cond, left, right).linkTo(target, this);
264 }
265
266 Jump branch32(RelationalCondition cond, TrustedImm32 left, RegisterID right)
267 {
268 return branch32(commute(condition: cond), right, left);
269 }
270
271 Jump branch32(RelationalCondition cond, Imm32 left, RegisterID right)
272 {
273 return branch32(commute(condition: cond), right, left);
274 }
275
276 void branchTestPtr(ResultCondition cond, RegisterID reg, Label target)
277 {
278 branchTestPtr(cond, reg).linkTo(target, this);
279 }
280
281#if !CPU(ARM_THUMB2) && !CPU(ARM64)
282 PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0))
283 {
284 return PatchableJump(branchPtr(cond, left, right));
285 }
286
287 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
288 {
289 return PatchableJump(branchPtrWithPatch(cond, left, dataLabel, initialRightValue));
290 }
291
292 PatchableJump patchableJump()
293 {
294 return PatchableJump(jump());
295 }
296
297 PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
298 {
299 return PatchableJump(branchTest32(cond, reg, mask));
300 }
301#endif // !CPU(ARM_THUMB2) && !CPU(ARM64)
302
303#if !CPU(ARM)
304 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
305 {
306 return PatchableJump(branch32(cond, reg, imm));
307 }
308#endif // !(CPU(ARM)
309
310 void jump(Label target)
311 {
312 jump().linkTo(target, this);
313 }
314
315 // Commute a relational condition, returns a new condition that will produce
316 // the same results given the same inputs but with their positions exchanged.
317 static RelationalCondition commute(RelationalCondition condition)
318 {
319 switch (condition) {
320 case RelationalCondition::Above:
321 return RelationalCondition::Below;
322 case RelationalCondition::AboveOrEqual:
323 return RelationalCondition::BelowOrEqual;
324 case RelationalCondition::Below:
325 return RelationalCondition::Above;
326 case RelationalCondition::BelowOrEqual:
327 return RelationalCondition::AboveOrEqual;
328 case RelationalCondition::GreaterThan:
329 return RelationalCondition::LessThan;
330 case RelationalCondition::GreaterThanOrEqual:
331 return RelationalCondition::LessThanOrEqual;
332 case RelationalCondition::LessThan:
333 return RelationalCondition::GreaterThan;
334 case RelationalCondition::LessThanOrEqual:
335 return RelationalCondition::GreaterThanOrEqual;
336 default:
337 break;
338 }
339
340 ASSERT(condition == RelationalCondition::Equal || condition == RelationalCondition::NotEqual);
341 return condition;
342 }
343
344 static const unsigned BlindingModulus = 64;
345 bool shouldConsiderBlinding()
346 {
347 return !(this->random() & (BlindingModulus - 1));
348 }
349
350 // Ptr methods
351 // On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
352 // FIXME: should this use a test for 32-bitness instead of this specific exception?
353#if !CPU(X86_64) && !CPU(ARM64)
354 void addPtr(Address src, RegisterID dest)
355 {
356 add32(src, dest);
357 }
358
359 void addPtr(AbsoluteAddress src, RegisterID dest)
360 {
361 add32(src, dest);
362 }
363
364 void addPtr(RegisterID src, RegisterID dest)
365 {
366 add32(src, dest);
367 }
368
369 void addPtr(TrustedImm32 imm, RegisterID srcDest)
370 {
371 add32(imm, srcDest);
372 }
373
374 void addPtr(TrustedImmPtr imm, RegisterID dest)
375 {
376 add32(TrustedImm32(imm), dest);
377 }
378
379 void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
380 {
381 add32(imm, src, dest);
382 }
383
384 void addPtr(TrustedImm32 imm, AbsoluteAddress address)
385 {
386 add32(imm, address);
387 }
388
389 void andPtr(RegisterID src, RegisterID dest)
390 {
391 and32(src, dest);
392 }
393
394 void andPtr(TrustedImm32 imm, RegisterID srcDest)
395 {
396 and32(imm, srcDest);
397 }
398
399 void negPtr(RegisterID dest)
400 {
401 neg32(dest);
402 }
403
404 void orPtr(RegisterID src, RegisterID dest)
405 {
406 or32(src, dest);
407 }
408
409 void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
410 {
411 or32(op1, op2, dest);
412 }
413
414 void orPtr(TrustedImmPtr imm, RegisterID dest)
415 {
416 or32(TrustedImm32(imm), dest);
417 }
418
419 void orPtr(TrustedImm32 imm, RegisterID dest)
420 {
421 or32(imm, dest);
422 }
423
424 void subPtr(RegisterID src, RegisterID dest)
425 {
426 sub32(src, dest);
427 }
428
429 void subPtr(TrustedImm32 imm, RegisterID dest)
430 {
431 sub32(imm, dest);
432 }
433
434 void subPtr(TrustedImmPtr imm, RegisterID dest)
435 {
436 sub32(TrustedImm32(imm), dest);
437 }
438
439 void xorPtr(RegisterID src, RegisterID dest)
440 {
441 xor32(src, dest);
442 }
443
444 void xorPtr(TrustedImm32 imm, RegisterID srcDest)
445 {
446 xor32(imm, srcDest);
447 }
448
449
450 void loadPtr(ImplicitAddress address, RegisterID dest)
451 {
452 load32(address, dest);
453 }
454
455 void loadPtr(BaseIndex address, RegisterID dest)
456 {
457 load32(address, dest);
458 }
459
460 void loadPtr(const void* address, RegisterID dest)
461 {
462 load32(address, dest);
463 }
464
465 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
466 {
467 return load32WithAddressOffsetPatch(address, dest);
468 }
469
470 DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
471 {
472 return load32WithCompactAddressOffsetPatch(address, dest);
473 }
474
475 void move(ImmPtr imm, RegisterID dest)
476 {
477 move(Imm32(imm.asTrustedImmPtr()), dest);
478 }
479
480 void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
481 {
482 compare32(cond, left, right, dest);
483 }
484
485 void storePtr(RegisterID src, ImplicitAddress address)
486 {
487 store32(src, address);
488 }
489
490 void storePtr(RegisterID src, BaseIndex address)
491 {
492 store32(src, address);
493 }
494
495 void storePtr(RegisterID src, void* address)
496 {
497 store32(src, address);
498 }
499
500 void storePtr(TrustedImmPtr imm, ImplicitAddress address)
501 {
502 store32(TrustedImm32(imm), address);
503 }
504
505 void storePtr(ImmPtr imm, Address address)
506 {
507 store32(Imm32(imm.asTrustedImmPtr()), address);
508 }
509
510 void storePtr(TrustedImmPtr imm, void* address)
511 {
512 store32(TrustedImm32(imm), address);
513 }
514
515 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
516 {
517 return store32WithAddressOffsetPatch(src, address);
518 }
519
520 Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
521 {
522 return branch32(cond, left, right);
523 }
524
525 Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
526 {
527 return branch32(cond, left, TrustedImm32(right));
528 }
529
530 Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
531 {
532 return branch32(cond, left, Imm32(right.asTrustedImmPtr()));
533 }
534
535 Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
536 {
537 return branch32(cond, left, right);
538 }
539
540 Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
541 {
542 return branch32(cond, left, right);
543 }
544
545 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
546 {
547 return branch32(cond, left, right);
548 }
549
550 Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
551 {
552 return branch32(cond, left, TrustedImm32(right));
553 }
554
555 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, TrustedImmPtr right)
556 {
557 return branch32(cond, left, TrustedImm32(right));
558 }
559
560 Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
561 {
562 return branchSub32(cond, src, dest);
563 }
564
565 Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
566 {
567 return branchTest32(cond, reg, mask);
568 }
569
570 Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
571 {
572 return branchTest32(cond, reg, mask);
573 }
574
575 Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
576 {
577 return branchTest32(cond, address, mask);
578 }
579
580 Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
581 {
582 return branchTest32(cond, address, mask);
583 }
584
585 Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
586 {
587 return branchAdd32(cond, src, dest);
588 }
589
590 Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
591 {
592 return branchSub32(cond, imm, dest);
593 }
594 using MacroAssemblerBase::branchTest8;
595 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
596 {
597 return MacroAssemblerBase::branchTest8(cond, Address(address.base, address.offset), mask);
598 }
599#else
600 void addPtr(RegisterID src, RegisterID dest)
601 {
602 add64(src, dest);
603 }
604
605 void addPtr(Address src, RegisterID dest)
606 {
607 add64(src, dest);
608 }
609
610 void addPtr(TrustedImm32 imm, RegisterID srcDest)
611 {
612 add64(imm, srcDest);
613 }
614
615 void addPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
616 {
617 add64(imm, src, dest);
618 }
619
620 void addPtr(TrustedImm32 imm, Address address)
621 {
622 add64(imm, address);
623 }
624
625 void addPtr(AbsoluteAddress src, RegisterID dest)
626 {
627 add64(src, dest);
628 }
629
630 void addPtr(TrustedImmPtr imm, RegisterID dest)
631 {
632 add64(TrustedImm64(imm), dest);
633 }
634
635 void addPtr(TrustedImm32 imm, AbsoluteAddress address)
636 {
637 add64(imm, address);
638 }
639
640 void andPtr(RegisterID src, RegisterID dest)
641 {
642 and64(src, dest);
643 }
644
645 void andPtr(TrustedImm32 imm, RegisterID srcDest)
646 {
647 and64(imm, srcDest);
648 }
649
650 void negPtr(RegisterID dest)
651 {
652 neg64(dest);
653 }
654
655 void orPtr(RegisterID src, RegisterID dest)
656 {
657 or64(src, dest);
658 }
659
660 void orPtr(TrustedImm32 imm, RegisterID dest)
661 {
662 or64(imm, dest);
663 }
664
665 void orPtr(TrustedImmPtr imm, RegisterID dest)
666 {
667 or64(TrustedImm64(imm), dest);
668 }
669
670 void orPtr(RegisterID op1, RegisterID op2, RegisterID dest)
671 {
672 or64(op1, op2, dest);
673 }
674
675 void orPtr(TrustedImm32 imm, RegisterID src, RegisterID dest)
676 {
677 or64(imm, src, dest);
678 }
679
680 void rotateRightPtr(TrustedImm32 imm, RegisterID srcDst)
681 {
682 rotateRight64(imm, srcDst);
683 }
684
685 void subPtr(RegisterID src, RegisterID dest)
686 {
687 sub64(src, dest);
688 }
689
690 void subPtr(TrustedImm32 imm, RegisterID dest)
691 {
692 sub64(imm, dest);
693 }
694
695 void subPtr(TrustedImmPtr imm, RegisterID dest)
696 {
697 sub64(TrustedImm64(imm), dest);
698 }
699
700 void xorPtr(RegisterID src, RegisterID dest)
701 {
702 xor64(src, dest);
703 }
704
705 void xorPtr(RegisterID src, Address dest)
706 {
707 xor64(src, dest);
708 }
709
710 void xorPtr(TrustedImm32 imm, RegisterID srcDest)
711 {
712 xor64(imm, srcDest);
713 }
714
715 void loadPtr(ImplicitAddress address, RegisterID dest)
716 {
717 load64(address, dest);
718 }
719
720 void loadPtr(BaseIndex address, RegisterID dest)
721 {
722 load64(address, dest);
723 }
724
725 void loadPtr(const void* address, RegisterID dest)
726 {
727 load64(address, dest);
728 }
729
730 DataLabel32 loadPtrWithAddressOffsetPatch(Address address, RegisterID dest)
731 {
732 return load64WithAddressOffsetPatch(address, dest);
733 }
734
735 DataLabelCompact loadPtrWithCompactAddressOffsetPatch(Address address, RegisterID dest)
736 {
737 return load64WithCompactAddressOffsetPatch(address, dest);
738 }
739
740 void storePtr(RegisterID src, ImplicitAddress address)
741 {
742 store64(src, address);
743 }
744
745 void storePtr(RegisterID src, BaseIndex address)
746 {
747 store64(src, address);
748 }
749
750 void storePtr(RegisterID src, void* address)
751 {
752 store64(src, address);
753 }
754
755 void storePtr(TrustedImmPtr imm, ImplicitAddress address)
756 {
757 store64(TrustedImm64(imm), address);
758 }
759
760 void storePtr(TrustedImmPtr imm, BaseIndex address)
761 {
762 store64(TrustedImm64(imm), address);
763 }
764
765 DataLabel32 storePtrWithAddressOffsetPatch(RegisterID src, Address address)
766 {
767 return store64WithAddressOffsetPatch(src, address);
768 }
769
770 void comparePtr(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
771 {
772 compare64(cond, left, right, dest);
773 }
774
775 void comparePtr(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
776 {
777 compare64(cond, left, right, dest);
778 }
779
780 void testPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask, RegisterID dest)
781 {
782 test64(cond, reg, mask, dest);
783 }
784
785 void testPtr(ResultCondition cond, RegisterID reg, RegisterID mask, RegisterID dest)
786 {
787 test64(cond, reg, mask, dest);
788 }
789
790 Jump branchPtr(RelationalCondition cond, RegisterID left, RegisterID right)
791 {
792 return this->branch64(cond, left, right);
793 }
794
795 Jump branchPtr(RelationalCondition cond, RegisterID left, TrustedImmPtr right)
796 {
797 return this->branch64(cond, left, TrustedImm64(right));
798 }
799
800 Jump branchPtr(RelationalCondition cond, RegisterID left, Address right)
801 {
802 return branch64(cond, left, right);
803 }
804
805 Jump branchPtr(RelationalCondition cond, Address left, RegisterID right)
806 {
807 return branch64(cond, left, right);
808 }
809
810 Jump branchPtr(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
811 {
812 return branch64(cond, left, right);
813 }
814
815 Jump branchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
816 {
817 return branch64(cond, left, TrustedImm64(right));
818 }
819
820 Jump branchTestPtr(ResultCondition cond, RegisterID reg, RegisterID mask)
821 {
822 return branchTest64(cond, reg, mask);
823 }
824
825 Jump branchTestPtr(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
826 {
827 return this->branchTest64(cond, reg, mask);
828 }
829
830 Jump branchTestPtr(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
831 {
832 return branchTest64(cond, address, mask);
833 }
834
835 Jump branchTestPtr(ResultCondition cond, Address address, RegisterID reg)
836 {
837 return branchTest64(cond, address, reg);
838 }
839
840 Jump branchTestPtr(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
841 {
842 return branchTest64(cond, address, mask);
843 }
844
845 Jump branchTestPtr(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
846 {
847 return branchTest64(cond, address, mask);
848 }
849
850 Jump branchAddPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
851 {
852 return branchAdd64(cond, imm, dest);
853 }
854
855 Jump branchAddPtr(ResultCondition cond, RegisterID src, RegisterID dest)
856 {
857 return branchAdd64(cond, src, dest);
858 }
859
860 Jump branchSubPtr(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
861 {
862 return branchSub64(cond, imm, dest);
863 }
864
865 Jump branchSubPtr(ResultCondition cond, RegisterID src, RegisterID dest)
866 {
867 return branchSub64(cond, src, dest);
868 }
869
870 Jump branchSubPtr(ResultCondition cond, RegisterID src1, TrustedImm32 src2, RegisterID dest)
871 {
872 return branchSub64(cond, src1, src2, dest);
873 }
874#endif // !CPU(X86_64) && !CPU(ARM64)
875
876#if ENABLE(JIT_CONSTANT_BLINDING)
877 using MacroAssemblerBase::and64;
878 using MacroAssemblerBase::convertInt32ToDouble;
879 using MacroAssemblerBase::store64;
880 bool shouldBlindDouble(double value)
881 {
882 // Don't trust NaN or +/-Infinity
883 if (!std::isfinite(value))
884 return shouldConsiderBlinding();
885
886 // Try to force normalisation, and check that there's no change
887 // in the bit pattern
888 if (bitwise_cast<uint64_t>(value * 1.0) != bitwise_cast<uint64_t>(value))
889 return shouldConsiderBlinding();
890
891 value = abs(value);
892 // Only allow a limited set of fractional components
893 double scaledValue = value * 8;
894 if (scaledValue / 8 != value)
895 return shouldConsiderBlinding();
896 double frac = scaledValue - floor(scaledValue);
897 if (frac != 0.0)
898 return shouldConsiderBlinding();
899
900 return value > 0xff;
901 }
902
903 bool shouldBlind(ImmPtr imm)
904 {
905#if ENABLE(FORCED_JIT_BLINDING)
906 UNUSED_PARAM(imm);
907 // Debug always blind all constants, if only so we know
908 // if we've broken blinding during patch development.
909 return true;
910#endif
911
912 // First off we'll special case common, "safe" values to avoid hurting
913 // performance too much
914 uintptr_t value = imm.asTrustedImmPtr().asIntptr();
915 switch (value) {
916 case 0xffff:
917 case 0xffffff:
918 case 0xffffffffL:
919 case 0xffffffffffL:
920 case 0xffffffffffffL:
921 case 0xffffffffffffffL:
922 case 0xffffffffffffffffL:
923 return false;
924 default: {
925 if (value <= 0xff)
926 return false;
927 if (~value <= 0xff)
928 return false;
929 }
930 }
931
932 if (!shouldConsiderBlinding())
933 return false;
934
935 return shouldBlindForSpecificArch(value);
936 }
937
938 struct RotatedImmPtr {
939 RotatedImmPtr(uintptr_t v1, uint8_t v2)
940 : value(v1)
941 , rotation(v2)
942 {
943 }
944 TrustedImmPtr value;
945 TrustedImm32 rotation;
946 };
947
948 RotatedImmPtr rotationBlindConstant(ImmPtr imm)
949 {
950 uint8_t rotation = random() % (sizeof(void*) * 8);
951 uintptr_t value = imm.asTrustedImmPtr().asIntptr();
952 value = (value << rotation) | (value >> (sizeof(void*) * 8 - rotation));
953 return RotatedImmPtr(value, rotation);
954 }
955
956 void loadRotationBlindedConstant(RotatedImmPtr constant, RegisterID dest)
957 {
958 move(constant.value, dest);
959 rotateRightPtr(constant.rotation, dest);
960 }
961
962 bool shouldBlind(Imm64 imm)
963 {
964#if ENABLE(FORCED_JIT_BLINDING)
965 UNUSED_PARAM(imm);
966 // Debug always blind all constants, if only so we know
967 // if we've broken blinding during patch development.
968 return true;
969#endif
970
971 // First off we'll special case common, "safe" values to avoid hurting
972 // performance too much
973 uint64_t value = imm.asTrustedImm64().m_value;
974 switch (value) {
975 case 0xffff:
976 case 0xffffff:
977 case 0xffffffffL:
978 case 0xffffffffffL:
979 case 0xffffffffffffL:
980 case 0xffffffffffffffL:
981 case 0xffffffffffffffffL:
982 return false;
983 default: {
984 if (value <= 0xff)
985 return false;
986 if (~value <= 0xff)
987 return false;
988
989 JSValue jsValue = JSValue::decode(value);
990 if (jsValue.isInt32())
991 return shouldBlind(Imm32(jsValue.asInt32()));
992 if (jsValue.isDouble() && !shouldBlindDouble(jsValue.asDouble()))
993 return false;
994
995 if (!shouldBlindDouble(bitwise_cast<double>(value)))
996 return false;
997 }
998 }
999
1000 if (!shouldConsiderBlinding())
1001 return false;
1002
1003 return shouldBlindForSpecificArch(value);
1004 }
1005
1006 struct RotatedImm64 {
1007 RotatedImm64(uint64_t v1, uint8_t v2)
1008 : value(v1)
1009 , rotation(v2)
1010 {
1011 }
1012 TrustedImm64 value;
1013 TrustedImm32 rotation;
1014 };
1015
1016 RotatedImm64 rotationBlindConstant(Imm64 imm)
1017 {
1018 uint8_t rotation = random() % (sizeof(int64_t) * 8);
1019 uint64_t value = imm.asTrustedImm64().m_value;
1020 value = (value << rotation) | (value >> (sizeof(int64_t) * 8 - rotation));
1021 return RotatedImm64(value, rotation);
1022 }
1023
1024 void loadRotationBlindedConstant(RotatedImm64 constant, RegisterID dest)
1025 {
1026 move(constant.value, dest);
1027 rotateRight64(constant.rotation, dest);
1028 }
1029
1030 void convertInt32ToDouble(Imm32 imm, FPRegisterID dest)
1031 {
1032 if (shouldBlind(imm)) {
1033 RegisterID scratchRegister = scratchRegisterForBlinding();
1034 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
1035 convertInt32ToDouble(scratchRegister, dest);
1036 } else
1037 convertInt32ToDouble(imm.asTrustedImm32(), dest);
1038 }
1039
1040 void move(ImmPtr imm, RegisterID dest)
1041 {
1042 if (shouldBlind(imm))
1043 loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1044 else
1045 move(imm.asTrustedImmPtr(), dest);
1046 }
1047
1048 void move(Imm64 imm, RegisterID dest)
1049 {
1050 if (shouldBlind(imm))
1051 loadRotationBlindedConstant(rotationBlindConstant(imm), dest);
1052 else
1053 move(imm.asTrustedImm64(), dest);
1054 }
1055
1056 void and64(Imm32 imm, RegisterID dest)
1057 {
1058 if (shouldBlind(imm)) {
1059 BlindedImm32 key = andBlindedConstant(imm);
1060 and64(key.value1, dest);
1061 and64(key.value2, dest);
1062 } else
1063 and64(imm.asTrustedImm32(), dest);
1064 }
1065
1066 Jump branchPtr(RelationalCondition cond, RegisterID left, ImmPtr right)
1067 {
1068 if (shouldBlind(right)) {
1069 RegisterID scratchRegister = scratchRegisterForBlinding();
1070 loadRotationBlindedConstant(rotationBlindConstant(right), scratchRegister);
1071 return branchPtr(cond, left, scratchRegister);
1072 }
1073 return branchPtr(cond, left, right.asTrustedImmPtr());
1074 }
1075
1076 void storePtr(ImmPtr imm, Address dest)
1077 {
1078 if (shouldBlind(imm)) {
1079 RegisterID scratchRegister = scratchRegisterForBlinding();
1080 loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1081 storePtr(scratchRegister, dest);
1082 } else
1083 storePtr(imm.asTrustedImmPtr(), dest);
1084 }
1085
1086 void store64(Imm64 imm, Address dest)
1087 {
1088 if (shouldBlind(imm)) {
1089 RegisterID scratchRegister = scratchRegisterForBlinding();
1090 loadRotationBlindedConstant(rotationBlindConstant(imm), scratchRegister);
1091 store64(scratchRegister, dest);
1092 } else
1093 store64(imm.asTrustedImm64(), dest);
1094 }
1095
1096#endif
1097
1098#if ENABLE(JIT_CONSTANT_BLINDING)
1099 bool shouldBlind(Imm32 imm)
1100 {
1101#if ENABLE(FORCED_JIT_BLINDING)
1102 UNUSED_PARAM(imm);
1103 // Debug always blind all constants, if only so we know
1104 // if we've broken blinding during patch development.
1105 return true;
1106#else
1107
1108 // First off we'll special case common, "safe" values to avoid hurting
1109 // performance too much
1110 uint32_t value = imm.asTrustedImm32().m_value;
1111 switch (value) {
1112 case 0xffff:
1113 case 0xffffff:
1114 case 0xffffffff:
1115 return false;
1116 default:
1117 if (value <= 0xff)
1118 return false;
1119 if (~value <= 0xff)
1120 return false;
1121 }
1122
1123 if (!shouldConsiderBlinding())
1124 return false;
1125
1126 return shouldBlindForSpecificArch(value);
1127#endif
1128 }
1129
1130 struct BlindedImm32 {
1131 BlindedImm32(int32_t v1, int32_t v2)
1132 : value1(v1)
1133 , value2(v2)
1134 {
1135 }
1136 TrustedImm32 value1;
1137 TrustedImm32 value2;
1138 };
1139
1140 uint32_t keyForConstant(uint32_t value, uint32_t& mask)
1141 {
1142 uint32_t key = random();
1143 if (value <= 0xff)
1144 mask = 0xff;
1145 else if (value <= 0xffff)
1146 mask = 0xffff;
1147 else if (value <= 0xffffff)
1148 mask = 0xffffff;
1149 else
1150 mask = 0xffffffff;
1151 return key & mask;
1152 }
1153
1154 uint32_t keyForConstant(uint32_t value)
1155 {
1156 uint32_t mask = 0;
1157 return keyForConstant(value, mask);
1158 }
1159
1160 BlindedImm32 xorBlindConstant(Imm32 imm)
1161 {
1162 uint32_t baseValue = imm.asTrustedImm32().m_value;
1163 uint32_t key = keyForConstant(baseValue);
1164 return BlindedImm32(baseValue ^ key, key);
1165 }
1166
1167 BlindedImm32 additionBlindedConstant(Imm32 imm)
1168 {
1169 // The addition immediate may be used as a pointer offset. Keep aligned based on "imm".
1170 static uint32_t maskTable[4] = { 0xfffffffc, 0xffffffff, 0xfffffffe, 0xffffffff };
1171
1172 uint32_t baseValue = imm.asTrustedImm32().m_value;
1173 uint32_t key = keyForConstant(baseValue) & maskTable[baseValue & 3];
1174 if (key > baseValue)
1175 key = key - baseValue;
1176 return BlindedImm32(baseValue - key, key);
1177 }
1178
1179 BlindedImm32 andBlindedConstant(Imm32 imm)
1180 {
1181 uint32_t baseValue = imm.asTrustedImm32().m_value;
1182 uint32_t mask = 0;
1183 uint32_t key = keyForConstant(baseValue, mask);
1184 ASSERT((baseValue & mask) == baseValue);
1185 return BlindedImm32(((baseValue & key) | ~key) & mask, ((baseValue & ~key) | key) & mask);
1186 }
1187
1188 BlindedImm32 orBlindedConstant(Imm32 imm)
1189 {
1190 uint32_t baseValue = imm.asTrustedImm32().m_value;
1191 uint32_t mask = 0;
1192 uint32_t key = keyForConstant(baseValue, mask);
1193 ASSERT((baseValue & mask) == baseValue);
1194 return BlindedImm32((baseValue & key) & mask, (baseValue & ~key) & mask);
1195 }
1196
1197 void loadXorBlindedConstant(BlindedImm32 constant, RegisterID dest)
1198 {
1199 move(constant.value1, dest);
1200 xor32(constant.value2, dest);
1201 }
1202
1203 void add32(Imm32 imm, RegisterID dest)
1204 {
1205 if (shouldBlind(imm)) {
1206 BlindedImm32 key = additionBlindedConstant(imm);
1207 add32(key.value1, dest);
1208 add32(key.value2, dest);
1209 } else
1210 add32(imm.asTrustedImm32(), dest);
1211 }
1212
1213 void addPtr(Imm32 imm, RegisterID dest)
1214 {
1215 if (shouldBlind(imm)) {
1216 BlindedImm32 key = additionBlindedConstant(imm);
1217 addPtr(key.value1, dest);
1218 addPtr(key.value2, dest);
1219 } else
1220 addPtr(imm.asTrustedImm32(), dest);
1221 }
1222
1223 void and32(Imm32 imm, RegisterID dest)
1224 {
1225 if (shouldBlind(imm)) {
1226 BlindedImm32 key = andBlindedConstant(imm);
1227 and32(key.value1, dest);
1228 and32(key.value2, dest);
1229 } else
1230 and32(imm.asTrustedImm32(), dest);
1231 }
1232
1233 void andPtr(Imm32 imm, RegisterID dest)
1234 {
1235 if (shouldBlind(imm)) {
1236 BlindedImm32 key = andBlindedConstant(imm);
1237 andPtr(key.value1, dest);
1238 andPtr(key.value2, dest);
1239 } else
1240 andPtr(imm.asTrustedImm32(), dest);
1241 }
1242
1243 void and32(Imm32 imm, RegisterID src, RegisterID dest)
1244 {
1245 if (shouldBlind(imm)) {
1246 if (src == dest)
1247 return and32(imm.asTrustedImm32(), dest);
1248 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1249 and32(src, dest);
1250 } else
1251 and32(imm.asTrustedImm32(), src, dest);
1252 }
1253
1254 void move(Imm32 imm, RegisterID dest)
1255 {
1256 if (shouldBlind(imm))
1257 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1258 else
1259 move(imm.asTrustedImm32(), dest);
1260 }
1261
1262 void or32(Imm32 imm, RegisterID src, RegisterID dest)
1263 {
1264 if (shouldBlind(imm)) {
1265 if (src == dest)
1266 return or32(imm, dest);
1267 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1268 or32(src, dest);
1269 } else
1270 or32(imm.asTrustedImm32(), src, dest);
1271 }
1272
1273 void or32(Imm32 imm, RegisterID dest)
1274 {
1275 if (shouldBlind(imm)) {
1276 BlindedImm32 key = orBlindedConstant(imm);
1277 or32(key.value1, dest);
1278 or32(key.value2, dest);
1279 } else
1280 or32(imm.asTrustedImm32(), dest);
1281 }
1282
1283 void poke(Imm32 value, int index = 0)
1284 {
1285 store32(value, addressForPoke(index));
1286 }
1287
1288 void poke(ImmPtr value, int index = 0)
1289 {
1290 storePtr(value, addressForPoke(index));
1291 }
1292
1293#if CPU(X86_64)
1294 void poke(Imm64 value, int index = 0)
1295 {
1296 store64(value, addressForPoke(index));
1297 }
1298#endif
1299
1300 void store32(Imm32 imm, Address dest)
1301 {
1302 if (shouldBlind(imm)) {
1303#if CPU(X86) || CPU(X86_64)
1304 BlindedImm32 blind = xorBlindConstant(imm);
1305 store32(blind.value1, dest);
1306 xor32(blind.value2, dest);
1307#else
1308 if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
1309 loadXorBlindedConstant(xorBlindConstant(imm), scratchRegister);
1310 store32(scratchRegister, dest);
1311 } else {
1312 // If we don't have a scratch register available for use, we'll just
1313 // place a random number of nops.
1314 uint32_t nopCount = random() & 3;
1315 while (nopCount--)
1316 nop();
1317 store32(imm.asTrustedImm32(), dest);
1318 }
1319#endif
1320 } else
1321 store32(imm.asTrustedImm32(), dest);
1322 }
1323
1324 void sub32(Imm32 imm, RegisterID dest)
1325 {
1326 if (shouldBlind(imm)) {
1327 BlindedImm32 key = additionBlindedConstant(imm);
1328 sub32(key.value1, dest);
1329 sub32(key.value2, dest);
1330 } else
1331 sub32(imm.asTrustedImm32(), dest);
1332 }
1333
1334 void subPtr(Imm32 imm, RegisterID dest)
1335 {
1336 if (shouldBlind(imm)) {
1337 BlindedImm32 key = additionBlindedConstant(imm);
1338 subPtr(key.value1, dest);
1339 subPtr(key.value2, dest);
1340 } else
1341 subPtr(imm.asTrustedImm32(), dest);
1342 }
1343
1344 void xor32(Imm32 imm, RegisterID src, RegisterID dest)
1345 {
1346 if (shouldBlind(imm)) {
1347 BlindedImm32 blind = xorBlindConstant(imm);
1348 xor32(blind.value1, src, dest);
1349 xor32(blind.value2, dest);
1350 } else
1351 xor32(imm.asTrustedImm32(), src, dest);
1352 }
1353
1354 void xor32(Imm32 imm, RegisterID dest)
1355 {
1356 if (shouldBlind(imm)) {
1357 BlindedImm32 blind = xorBlindConstant(imm);
1358 xor32(blind.value1, dest);
1359 xor32(blind.value2, dest);
1360 } else
1361 xor32(imm.asTrustedImm32(), dest);
1362 }
1363
1364 Jump branch32(RelationalCondition cond, RegisterID left, Imm32 right)
1365 {
1366 if (shouldBlind(right)) {
1367 if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
1368 loadXorBlindedConstant(xorBlindConstant(right), scratchRegister);
1369 return branch32(cond, left, scratchRegister);
1370 }
1371 // If we don't have a scratch register available for use, we'll just
1372 // place a random number of nops.
1373 uint32_t nopCount = random() & 3;
1374 while (nopCount--)
1375 nop();
1376 return branch32(cond, left, right.asTrustedImm32());
1377 }
1378
1379 return branch32(cond, left, right.asTrustedImm32());
1380 }
1381
1382 Jump branchAdd32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest)
1383 {
1384 if (src == dest)
1385 ASSERT(scratchRegisterForBlinding());
1386
1387 if (shouldBlind(imm)) {
1388 if (src == dest) {
1389 if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
1390 move(src, scratchRegister);
1391 src = scratchRegister;
1392 }
1393 }
1394 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1395 return branchAdd32(cond, src, dest);
1396 }
1397 return branchAdd32(cond, src, imm.asTrustedImm32(), dest);
1398 }
1399
1400 Jump branchMul32(ResultCondition cond, Imm32 imm, RegisterID src, RegisterID dest)
1401 {
1402 if (src == dest)
1403 ASSERT(scratchRegisterForBlinding());
1404
1405 if (shouldBlind(imm)) {
1406 if (src == dest) {
1407 if (RegisterID scratchRegister = (RegisterID)scratchRegisterForBlinding()) {
1408 move(src, scratchRegister);
1409 src = scratchRegister;
1410 }
1411 }
1412 loadXorBlindedConstant(xorBlindConstant(imm), dest);
1413 return branchMul32(cond, src, dest);
1414 }
1415 return branchMul32(cond, imm.asTrustedImm32(), src, dest);
1416 }
1417
1418 // branchSub32 takes a scratch register as 32 bit platforms make use of this,
1419 // with src == dst, and on x86-32 we don't have a platform scratch register.
1420 Jump branchSub32(ResultCondition cond, RegisterID src, Imm32 imm, RegisterID dest, RegisterID scratch)
1421 {
1422 if (shouldBlind(imm)) {
1423 ASSERT(scratch != dest);
1424 ASSERT(scratch != src);
1425 loadXorBlindedConstant(xorBlindConstant(imm), scratch);
1426 return branchSub32(cond, src, scratch, dest);
1427 }
1428 return branchSub32(cond, src, imm.asTrustedImm32(), dest);
1429 }
1430
1431 // Immediate shifts only have 5 controllable bits
1432 // so we'll consider them safe for now.
1433 TrustedImm32 trustedImm32ForShift(Imm32 imm)
1434 {
1435 return TrustedImm32(imm.asTrustedImm32().m_value & 31);
1436 }
1437
1438 void lshift32(Imm32 imm, RegisterID dest)
1439 {
1440 lshift32(trustedImm32ForShift(imm), dest);
1441 }
1442
1443 void lshift32(RegisterID src, Imm32 amount, RegisterID dest)
1444 {
1445 lshift32(src, trustedImm32ForShift(amount), dest);
1446 }
1447
1448 void rshift32(Imm32 imm, RegisterID dest)
1449 {
1450 rshift32(trustedImm32ForShift(imm), dest);
1451 }
1452
1453 void rshift32(RegisterID src, Imm32 amount, RegisterID dest)
1454 {
1455 rshift32(src, trustedImm32ForShift(amount), dest);
1456 }
1457
1458 void urshift32(Imm32 imm, RegisterID dest)
1459 {
1460 urshift32(trustedImm32ForShift(imm), dest);
1461 }
1462
1463 void urshift32(RegisterID src, Imm32 amount, RegisterID dest)
1464 {
1465 urshift32(src, trustedImm32ForShift(amount), dest);
1466 }
1467#endif
1468};
1469
1470#if CPU(ARM_THUMB2)
1471typedef MacroAssembler<MacroAssemblerARMv7> DefaultMacroAssembler;
1472#elif CPU(ARM64)
1473typedef MacroAssembler<MacroAssemblerARM64> DefaultMacroAssembler;
1474#elif CPU(MIPS)
1475typedef MacroAssembler<MacroAssemblerMIPS> DefaultMacroAssembler;
1476#elif CPU(X86)
1477typedef MacroAssembler<MacroAssemblerX86> DefaultMacroAssembler;
1478#elif CPU(X86_64)
1479typedef MacroAssembler<MacroAssemblerX86_64> DefaultMacroAssembler;
1480#endif
1481
1482} // namespace JSC
1483
1484#else // ENABLE(ASSEMBLER)
1485
1486// If there is no assembler for this platform, at least allow code to make references to
1487// some of the things it would otherwise define, albeit without giving that code any way
1488// of doing anything useful.
1489class MacroAssembler {
1490private:
1491 MacroAssembler() { }
1492
1493public:
1494
1495 enum RegisterID { NoRegister };
1496 enum FPRegisterID { NoFPRegister };
1497};
1498
1499#endif // ENABLE(ASSEMBLER)
1500
1501#endif // MacroAssembler_h
1502

source code of qtdeclarative/src/3rdparty/masm/assembler/MacroAssembler.h