1/*
2 * Copyright (C) 2012, 2014, 2015 Apple Inc. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
6 * are met:
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
12 *
13 * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
14 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
15 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
16 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
17 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
18 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
19 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
20 * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
21 * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
22 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
23 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
24 */
25
26#ifndef MacroAssemblerARM64_h
27#define MacroAssemblerARM64_h
28
29#if ENABLE(ASSEMBLER) && CPU(ARM64)
30
31#include "ARM64Assembler.h"
32#include "AbstractMacroAssembler.h"
33#include <wtf/MathExtras.h>
34
35namespace JSC {
36
37class MacroAssemblerARM64 : public AbstractMacroAssembler<ARM64Assembler> {
38#define COPIED_FROM_AbstractAssembler_H 1
39#ifdef COPIED_FROM_AbstractAssembler_H
40 typedef MacroAssemblerARM64 AbstractMacroAssemblerType;
41 class CachedTempRegister {
42 friend class DataLabelPtr;
43 friend class DataLabel32;
44 friend class DataLabelCompact;
45// template <typename> friend class Jump;
46 friend class Label;
47
48 public:
49 CachedTempRegister(AbstractMacroAssemblerType* masm, RegisterID registerID)
50 : m_masm(masm)
51 , m_registerID(registerID)
52 , m_value(0)
53 , m_validBit(1 << static_cast<unsigned>(registerID))
54 {
55 ASSERT(static_cast<unsigned>(registerID) < (sizeof(unsigned) * 8));
56 }
57
58 ALWAYS_INLINE RegisterID registerIDInvalidate() { invalidate(); return m_registerID; }
59
60 ALWAYS_INLINE RegisterID registerIDNoInvalidate() { return m_registerID; }
61
62 bool value(intptr_t& value)
63 {
64 value = m_value;
65 return m_masm->isTempRegisterValid(m_validBit);
66 }
67
68 void setValue(intptr_t value)
69 {
70 m_value = value;
71 m_masm->setTempRegisterValid(m_validBit);
72 }
73
74 ALWAYS_INLINE void invalidate() { m_masm->clearTempRegisterValid(m_validBit); }
75
76 private:
77 AbstractMacroAssemblerType* m_masm;
78 RegisterID m_registerID;
79 intptr_t m_value;
80 unsigned m_validBit;
81 };
82
83 ALWAYS_INLINE void invalidateAllTempRegisters()
84 {
85 m_tempRegistersValidBits = 0;
86 }
87
88 ALWAYS_INLINE bool isTempRegisterValid(unsigned registerMask)
89 {
90 return (m_tempRegistersValidBits & registerMask);
91 }
92
93 ALWAYS_INLINE void clearTempRegisterValid(unsigned registerMask)
94 {
95 m_tempRegistersValidBits &= ~registerMask;
96 }
97
98 ALWAYS_INLINE void setTempRegisterValid(unsigned registerMask)
99 {
100 m_tempRegistersValidBits |= registerMask;
101 }
102
103 friend class AllowMacroScratchRegisterUsage;
104 friend class DisallowMacroScratchRegisterUsage;
105 unsigned m_tempRegistersValidBits;
106#endif // COPIED_FROM_AbstractAssembler_H
107public:
108 static const RegisterID dataTempRegister = ARM64Registers::ip0;
109 static const RegisterID memoryTempRegister = ARM64Registers::ip1;
110
111#if 0
112 RegisterID scratchRegister()
113 {
114 RELEASE_ASSERT(m_allowScratchRegister);
115 return getCachedDataTempRegisterIDAndInvalidate();
116 }
117#endif
118
119private:
120 static const ARM64Registers::FPRegisterID fpTempRegister = ARM64Registers::q31;
121 static const ARM64Assembler::SetFlags S = ARM64Assembler::S;
122 static const int64_t maskHalfWord0 = 0xffffl;
123 static const int64_t maskHalfWord1 = 0xffff0000l;
124 static const int64_t maskUpperWord = 0xffffffff00000000l;
125
126 // 4 instructions - 3 to load the function pointer, + blr.
127 static const ptrdiff_t REPATCH_OFFSET_CALL_TO_POINTER = -16;
128
129public:
130 static const int PointerSize = 8;
131
132 MacroAssemblerARM64()
133 : m_dataMemoryTempRegister(this, dataTempRegister)
134 , m_cachedMemoryTempRegister(this, memoryTempRegister)
135 , m_makeJumpPatchable(false)
136 {
137 }
138
139 typedef ARM64Assembler::LinkRecord LinkRecord;
140 typedef ARM64Assembler::JumpType JumpType;
141 typedef ARM64Assembler::JumpLinkType JumpLinkType;
142 typedef ARM64Assembler::Condition Condition;
143
144 static const ARM64Assembler::Condition DefaultCondition = ARM64Assembler::ConditionInvalid;
145 static const ARM64Assembler::JumpType DefaultJump = ARM64Assembler::JumpNoConditionFixedSize;
146
147 Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); }
148 void* unlinkedCode() { return m_assembler.unlinkedCode(); }
149 static bool canCompact(JumpType jumpType) { return ARM64Assembler::canCompact(jumpType); }
150 static JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(jumpType, from, to); }
151 static JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return ARM64Assembler::computeJumpType(record, from, to); }
152 static int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return ARM64Assembler::jumpSizeDelta(jumpType, jumpLinkType); }
153 static void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return ARM64Assembler::link(record, from, to); }
154
155 static const Scale ScalePtr = TimesEight;
156
157 static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value)
158 {
159 // This is the largest 32-bit access allowed, aligned to 64-bit boundary.
160 return !(value & ~0x3ff8);
161 }
162
163 enum RelationalCondition {
164 Equal = ARM64Assembler::ConditionEQ,
165 NotEqual = ARM64Assembler::ConditionNE,
166 Above = ARM64Assembler::ConditionHI,
167 AboveOrEqual = ARM64Assembler::ConditionHS,
168 Below = ARM64Assembler::ConditionLO,
169 BelowOrEqual = ARM64Assembler::ConditionLS,
170 GreaterThan = ARM64Assembler::ConditionGT,
171 GreaterThanOrEqual = ARM64Assembler::ConditionGE,
172 LessThan = ARM64Assembler::ConditionLT,
173 LessThanOrEqual = ARM64Assembler::ConditionLE
174 };
175
176 enum ResultCondition {
177 Overflow = ARM64Assembler::ConditionVS,
178 Signed = ARM64Assembler::ConditionMI,
179 PositiveOrZero = ARM64Assembler::ConditionPL,
180 Zero = ARM64Assembler::ConditionEQ,
181 NonZero = ARM64Assembler::ConditionNE
182 };
183
184 enum ZeroCondition {
185 IsZero = ARM64Assembler::ConditionEQ,
186 IsNonZero = ARM64Assembler::ConditionNE
187 };
188
189 enum DoubleCondition {
190 // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN.
191 DoubleEqual = ARM64Assembler::ConditionEQ,
192 DoubleNotEqual = ARM64Assembler::ConditionVC, // Not the right flag! check for this & handle differently.
193 DoubleGreaterThan = ARM64Assembler::ConditionGT,
194 DoubleGreaterThanOrEqual = ARM64Assembler::ConditionGE,
195 DoubleLessThan = ARM64Assembler::ConditionLO,
196 DoubleLessThanOrEqual = ARM64Assembler::ConditionLS,
197 // If either operand is NaN, these conditions always evaluate to true.
198 DoubleEqualOrUnordered = ARM64Assembler::ConditionVS, // Not the right flag! check for this & handle differently.
199 DoubleNotEqualOrUnordered = ARM64Assembler::ConditionNE,
200 DoubleGreaterThanOrUnordered = ARM64Assembler::ConditionHI,
201 DoubleGreaterThanOrEqualOrUnordered = ARM64Assembler::ConditionHS,
202 DoubleLessThanOrUnordered = ARM64Assembler::ConditionLT,
203 DoubleLessThanOrEqualOrUnordered = ARM64Assembler::ConditionLE,
204 };
205
206 static const RegisterID stackPointerRegister = ARM64Registers::sp;
207 static const RegisterID framePointerRegister = ARM64Registers::fp;
208 static const RegisterID linkRegister = ARM64Registers::lr;
209
210 // FIXME: Get reasonable implementations for these
211 static bool shouldBlindForSpecificArch(uint32_t value) { return value >= 0x00ffffff; }
212 static bool shouldBlindForSpecificArch(uint64_t value) { return value >= 0x00ffffff; }
213
214 // Integer operations:
215
216 void add32(RegisterID a, RegisterID b, RegisterID dest)
217 {
218 ASSERT(a != ARM64Registers::sp && b != ARM64Registers::sp);
219 m_assembler.add<32>(dest, a, b);
220 }
221
222 void add32(RegisterID src, RegisterID dest)
223 {
224 m_assembler.add<32>(dest, dest, src);
225 }
226
227 void add32(TrustedImm32 imm, RegisterID dest)
228 {
229 if (!imm.m_value)
230 return;
231
232 add32(imm, dest, dest);
233 }
234
235 void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
236 {
237 if (!imm.m_value) {
238 move(src, dest);
239 return;
240 }
241
242 if (isUInt12(imm.m_value))
243 m_assembler.add<32>(dest, src, UInt12(imm.m_value));
244 else if (isUInt12(-imm.m_value))
245 m_assembler.sub<32>(dest, src, UInt12(-imm.m_value));
246 else {
247 move(imm, getCachedDataTempRegisterIDAndInvalidate());
248 m_assembler.add<32>(dest, src, dataTempRegister);
249 }
250 }
251
252 void add32(TrustedImm32 imm, Address address)
253 {
254 if (!imm.m_value)
255 return;
256
257 load32(address, getCachedDataTempRegisterIDAndInvalidate());
258
259 if (isUInt12(imm.m_value))
260 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
261 else if (isUInt12(-imm.m_value))
262 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
263 else {
264 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
265 m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
266 }
267
268 store32(dataTempRegister, address);
269 }
270
271 void add32(TrustedImm32 imm, AbsoluteAddress address)
272 {
273 if (!imm.m_value)
274 return;
275
276 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
277
278 if (isUInt12(imm.m_value)) {
279 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
280 store32(dataTempRegister, address.m_ptr);
281 return;
282 }
283
284 if (isUInt12(-imm.m_value)) {
285 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
286 store32(dataTempRegister, address.m_ptr);
287 return;
288 }
289
290 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
291 m_assembler.add<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
292 store32(dataTempRegister, address.m_ptr);
293 }
294
295 void add32(Address src, RegisterID dest)
296 {
297 load32(src, getCachedDataTempRegisterIDAndInvalidate());
298 add32(dataTempRegister, dest);
299 }
300
301 void add64(RegisterID a, RegisterID b, RegisterID dest)
302 {
303 ASSERT(a != ARM64Registers::sp || b != ARM64Registers::sp);
304 if (b == ARM64Registers::sp)
305 std::swap(a, b);
306 m_assembler.add<64>(dest, a, b);
307 }
308
309 void add64(RegisterID src, RegisterID dest)
310 {
311 if (src == ARM64Registers::sp)
312 m_assembler.add<64>(dest, src, dest);
313 else
314 m_assembler.add<64>(dest, dest, src);
315 }
316
317 void add64(TrustedImm32 imm, RegisterID dest)
318 {
319 if (!imm.m_value)
320 return;
321
322 if (isUInt12(imm.m_value)) {
323 m_assembler.add<64>(dest, dest, UInt12(imm.m_value));
324 return;
325 }
326 if (isUInt12(-imm.m_value)) {
327 m_assembler.sub<64>(dest, dest, UInt12(-imm.m_value));
328 return;
329 }
330
331 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
332 m_assembler.add<64>(dest, dest, dataTempRegister);
333 }
334
335 void add64(TrustedImm64 imm, RegisterID dest)
336 {
337 intptr_t immediate = imm.m_value;
338 if (!immediate)
339 return;
340
341 if (isUInt12(immediate)) {
342 m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
343 return;
344 }
345 if (isUInt12(-immediate)) {
346 m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
347 return;
348 }
349
350 move(imm, getCachedDataTempRegisterIDAndInvalidate());
351 m_assembler.add<64>(dest, dest, dataTempRegister);
352 }
353
354 void add64(TrustedImm32 imm, RegisterID src, RegisterID dest)
355 {
356 if (!imm.m_value) {
357 move(src, dest);
358 return;
359 }
360
361 if (isUInt12(imm.m_value)) {
362 m_assembler.add<64>(dest, src, UInt12(imm.m_value));
363 return;
364 }
365 if (isUInt12(-imm.m_value)) {
366 m_assembler.sub<64>(dest, src, UInt12(-imm.m_value));
367 return;
368 }
369
370 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
371 m_assembler.add<64>(dest, src, dataTempRegister);
372 }
373
374 void add64(TrustedImm32 imm, Address address)
375 {
376 if (!imm.m_value)
377 return;
378
379 load64(address, getCachedDataTempRegisterIDAndInvalidate());
380
381 if (isUInt12(imm.m_value))
382 m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
383 else if (isUInt12(-imm.m_value))
384 m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
385 else {
386 signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
387 m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
388 }
389
390 store64(dataTempRegister, address);
391 }
392
393 void add64(TrustedImm32 imm, AbsoluteAddress address)
394 {
395 if (!imm.m_value)
396 return;
397
398 load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
399
400 if (isUInt12(imm.m_value)) {
401 m_assembler.add<64>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
402 store64(dataTempRegister, address.m_ptr);
403 return;
404 }
405
406 if (isUInt12(-imm.m_value)) {
407 m_assembler.sub<64>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
408 store64(dataTempRegister, address.m_ptr);
409 return;
410 }
411
412 signExtend32ToPtr(imm, getCachedMemoryTempRegisterIDAndInvalidate());
413 m_assembler.add<64>(dataTempRegister, dataTempRegister, memoryTempRegister);
414 store64(dataTempRegister, address.m_ptr);
415 }
416
417 void addPtrNoFlags(TrustedImm32 imm, RegisterID srcDest)
418 {
419 add64(imm, srcDest);
420 }
421
422 void add64(Address src, RegisterID dest)
423 {
424 load64(src, getCachedDataTempRegisterIDAndInvalidate());
425 m_assembler.add<64>(dest, dest, dataTempRegister);
426 }
427
428 void add64(AbsoluteAddress src, RegisterID dest)
429 {
430 load64(src.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
431 m_assembler.add<64>(dest, dest, dataTempRegister);
432 }
433
434 void and32(RegisterID src, RegisterID dest)
435 {
436 and32(dest, src, dest);
437 }
438
439 void and32(RegisterID op1, RegisterID op2, RegisterID dest)
440 {
441 m_assembler.and_<32>(dest, op1, op2);
442 }
443
444 void and32(TrustedImm32 imm, RegisterID dest)
445 {
446 and32(imm, dest, dest);
447 }
448
449 void and32(TrustedImm32 imm, RegisterID src, RegisterID dest)
450 {
451 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
452
453 if (logicalImm.isValid()) {
454 m_assembler.and_<32>(dest, src, logicalImm);
455 return;
456 }
457
458 move(imm, getCachedDataTempRegisterIDAndInvalidate());
459 m_assembler.and_<32>(dest, src, dataTempRegister);
460 }
461
462 void and32(Address src, RegisterID dest)
463 {
464 load32(src, dataTempRegister);
465 and32(dataTempRegister, dest);
466 }
467
468 void and64(RegisterID src, RegisterID dest)
469 {
470 m_assembler.and_<64>(dest, dest, src);
471 }
472
473 void and64(TrustedImm32 imm, RegisterID dest)
474 {
475 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
476
477 if (logicalImm.isValid()) {
478 m_assembler.and_<64>(dest, dest, logicalImm);
479 return;
480 }
481
482 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
483 m_assembler.and_<64>(dest, dest, dataTempRegister);
484 }
485
486 void and64(TrustedImmPtr imm, RegisterID dest)
487 {
488 LogicalImmediate logicalImm = LogicalImmediate::create64(reinterpret_cast<uint64_t>(imm.m_value));
489
490 if (logicalImm.isValid()) {
491 m_assembler.and_<64>(dest, dest, logicalImm);
492 return;
493 }
494
495 move(imm, getCachedDataTempRegisterIDAndInvalidate());
496 m_assembler.and_<64>(dest, dest, dataTempRegister);
497 }
498
499 void countLeadingZeros32(RegisterID src, RegisterID dest)
500 {
501 m_assembler.clz<32>(dest, src);
502 }
503
504 void countLeadingZeros64(RegisterID src, RegisterID dest)
505 {
506 m_assembler.clz<64>(dest, src);
507 }
508
509 void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
510 {
511 m_assembler.lsl<32>(dest, src, shiftAmount);
512 }
513
514 void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
515 {
516 m_assembler.lsl<32>(dest, src, imm.m_value & 0x1f);
517 }
518
519 void lshift32(RegisterID shiftAmount, RegisterID dest)
520 {
521 lshift32(dest, shiftAmount, dest);
522 }
523
524 void lshift32(TrustedImm32 imm, RegisterID dest)
525 {
526 lshift32(dest, imm, dest);
527 }
528
529 void lshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
530 {
531 m_assembler.lsl<64>(dest, src, shiftAmount);
532 }
533
534 void lshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
535 {
536 m_assembler.lsl<64>(dest, src, imm.m_value & 0x3f);
537 }
538
539 void lshift64(RegisterID shiftAmount, RegisterID dest)
540 {
541 lshift64(dest, shiftAmount, dest);
542 }
543
544 void lshift64(TrustedImm32 imm, RegisterID dest)
545 {
546 lshift64(dest, imm, dest);
547 }
548
549 void mul32(RegisterID left, RegisterID right, RegisterID dest)
550 {
551 m_assembler.mul<32>(dest, left, right);
552 }
553
554 void mul32(RegisterID src, RegisterID dest)
555 {
556 m_assembler.mul<32>(dest, dest, src);
557 }
558
559 void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest)
560 {
561 move(imm, getCachedDataTempRegisterIDAndInvalidate());
562 m_assembler.mul<32>(dest, src, dataTempRegister);
563 }
564
565 void mul32(Address src, RegisterID dest)
566 {
567 load32(src, dataTempRegister);
568 mul32(dataTempRegister, dest);
569 }
570
571 void mul64(RegisterID src, RegisterID dest)
572 {
573 m_assembler.mul<64>(dest, dest, src);
574 }
575
576 void mul64(RegisterID left, RegisterID right, RegisterID dest)
577 {
578 m_assembler.mul<64>(dest, left, right);
579 }
580
581 void div32(RegisterID dividend, RegisterID divisor, RegisterID dest)
582 {
583 m_assembler.sdiv<32>(dest, dividend, divisor);
584 }
585
586 void div64(RegisterID dividend, RegisterID divisor, RegisterID dest)
587 {
588 m_assembler.sdiv<64>(dest, dividend, divisor);
589 }
590
591 void neg32(RegisterID dest)
592 {
593 m_assembler.neg<32>(dest, dest);
594 }
595
596 void neg64(RegisterID dest)
597 {
598 m_assembler.neg<64>(dest, dest);
599 }
600
601 void or32(RegisterID src, RegisterID dest)
602 {
603 or32(dest, src, dest);
604 }
605
606 void or32(Address src, RegisterID dest)
607 {
608 load32(src, dataTempRegister);
609 or32(dataTempRegister, dest);
610 }
611
612 void or32(RegisterID op1, RegisterID op2, RegisterID dest)
613 {
614 m_assembler.orr<32>(dest, op1, op2);
615 }
616
617 void or32(TrustedImm32 imm, RegisterID dest)
618 {
619 or32(imm, dest, dest);
620 }
621
622 void or32(TrustedImm32 imm, RegisterID src, RegisterID dest)
623 {
624 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
625
626 if (logicalImm.isValid()) {
627 m_assembler.orr<32>(dest, src, logicalImm);
628 return;
629 }
630
631 ASSERT(src != dataTempRegister);
632 move(imm, getCachedDataTempRegisterIDAndInvalidate());
633 m_assembler.orr<32>(dest, src, dataTempRegister);
634 }
635
636 void or32(RegisterID src, AbsoluteAddress address)
637 {
638 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
639 m_assembler.orr<32>(dataTempRegister, dataTempRegister, src);
640 store32(dataTempRegister, address.m_ptr);
641 }
642
643 void or32(TrustedImm32 imm, AbsoluteAddress address)
644 {
645 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
646 if (logicalImm.isValid()) {
647 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
648 m_assembler.orr<32>(dataTempRegister, dataTempRegister, logicalImm);
649 store32(dataTempRegister, address.m_ptr);
650 } else {
651 load32(address.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
652 or32(imm, memoryTempRegister, getCachedDataTempRegisterIDAndInvalidate());
653 store32(dataTempRegister, address.m_ptr);
654 }
655 }
656
657 void or32(TrustedImm32 imm, Address address)
658 {
659 load32(address, getCachedDataTempRegisterIDAndInvalidate());
660 or32(imm, dataTempRegister, dataTempRegister);
661 store32(dataTempRegister, address);
662 }
663
664 void or64(RegisterID src, RegisterID dest)
665 {
666 or64(dest, src, dest);
667 }
668
669 void or64(RegisterID op1, RegisterID op2, RegisterID dest)
670 {
671 m_assembler.orr<64>(dest, op1, op2);
672 }
673
674 void or64(TrustedImm32 imm, RegisterID dest)
675 {
676 or64(imm, dest, dest);
677 }
678
679 void or64(TrustedImm32 imm, RegisterID src, RegisterID dest)
680 {
681 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
682
683 if (logicalImm.isValid()) {
684 m_assembler.orr<64>(dest, src, logicalImm);
685 return;
686 }
687
688 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
689 m_assembler.orr<64>(dest, src, dataTempRegister);
690 }
691
692 void or64(TrustedImm64 imm, RegisterID dest)
693 {
694 or64(imm, dest, dest);
695 }
696
697 void or64(TrustedImm64 imm, RegisterID src, RegisterID dest)
698 {
699 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
700
701 if (logicalImm.isValid()) {
702 m_assembler.orr<64>(dest, src, logicalImm);
703 return;
704 }
705
706 move(imm, getCachedDataTempRegisterIDAndInvalidate());
707 m_assembler.orr<64>(dest, src, dataTempRegister);
708 }
709
710 void rotateRight64(TrustedImm32 imm, RegisterID srcDst)
711 {
712 m_assembler.ror<64>(srcDst, srcDst, imm.m_value & 63);
713 }
714
715 void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
716 {
717 m_assembler.asr<32>(dest, src, shiftAmount);
718 }
719
720 void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
721 {
722 m_assembler.asr<32>(dest, src, imm.m_value & 0x1f);
723 }
724
725 void rshift32(RegisterID shiftAmount, RegisterID dest)
726 {
727 rshift32(dest, shiftAmount, dest);
728 }
729
730 void rshift32(TrustedImm32 imm, RegisterID dest)
731 {
732 rshift32(dest, imm, dest);
733 }
734
735 void rshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
736 {
737 m_assembler.asr<64>(dest, src, shiftAmount);
738 }
739
740 void rshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
741 {
742 m_assembler.asr<64>(dest, src, imm.m_value & 0x3f);
743 }
744
745 void rshift64(RegisterID shiftAmount, RegisterID dest)
746 {
747 rshift64(dest, shiftAmount, dest);
748 }
749
750 void rshift64(TrustedImm32 imm, RegisterID dest)
751 {
752 rshift64(dest, imm, dest);
753 }
754
755 void sub32(RegisterID src, RegisterID dest)
756 {
757 m_assembler.sub<32>(dest, dest, src);
758 }
759
760 void sub32(TrustedImm32 imm, RegisterID dest)
761 {
762 if (isUInt12(imm.m_value)) {
763 m_assembler.sub<32>(dest, dest, UInt12(imm.m_value));
764 return;
765 }
766 if (isUInt12(-imm.m_value)) {
767 m_assembler.add<32>(dest, dest, UInt12(-imm.m_value));
768 return;
769 }
770
771 move(imm, getCachedDataTempRegisterIDAndInvalidate());
772 m_assembler.sub<32>(dest, dest, dataTempRegister);
773 }
774
775 void sub32(TrustedImm32 imm, Address address)
776 {
777 load32(address, getCachedDataTempRegisterIDAndInvalidate());
778
779 if (isUInt12(imm.m_value))
780 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
781 else if (isUInt12(-imm.m_value))
782 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
783 else {
784 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
785 m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
786 }
787
788 store32(dataTempRegister, address);
789 }
790
791 void sub32(TrustedImm32 imm, AbsoluteAddress address)
792 {
793 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
794
795 if (isUInt12(imm.m_value)) {
796 m_assembler.sub<32>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
797 store32(dataTempRegister, address.m_ptr);
798 return;
799 }
800
801 if (isUInt12(-imm.m_value)) {
802 m_assembler.add<32>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
803 store32(dataTempRegister, address.m_ptr);
804 return;
805 }
806
807 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
808 m_assembler.sub<32>(dataTempRegister, dataTempRegister, memoryTempRegister);
809 store32(dataTempRegister, address.m_ptr);
810 }
811
812 void sub32(Address src, RegisterID dest)
813 {
814 load32(src, getCachedDataTempRegisterIDAndInvalidate());
815 sub32(dataTempRegister, dest);
816 }
817
818 void sub64(RegisterID src, RegisterID dest)
819 {
820 m_assembler.sub<64>(dest, dest, src);
821 }
822
823 void sub64(TrustedImm32 imm, RegisterID dest)
824 {
825 if (isUInt12(imm.m_value)) {
826 m_assembler.sub<64>(dest, dest, UInt12(imm.m_value));
827 return;
828 }
829 if (isUInt12(-imm.m_value)) {
830 m_assembler.add<64>(dest, dest, UInt12(-imm.m_value));
831 return;
832 }
833
834 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
835 m_assembler.sub<64>(dest, dest, dataTempRegister);
836 }
837
838 void sub64(TrustedImm64 imm, RegisterID dest)
839 {
840 intptr_t immediate = imm.m_value;
841
842 if (isUInt12(immediate)) {
843 m_assembler.sub<64>(dest, dest, UInt12(static_cast<int32_t>(immediate)));
844 return;
845 }
846 if (isUInt12(-immediate)) {
847 m_assembler.add<64>(dest, dest, UInt12(static_cast<int32_t>(-immediate)));
848 return;
849 }
850
851 move(imm, getCachedDataTempRegisterIDAndInvalidate());
852 m_assembler.sub<64>(dest, dest, dataTempRegister);
853 }
854
855 void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest)
856 {
857 m_assembler.lsr<32>(dest, src, shiftAmount);
858 }
859
860 void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest)
861 {
862 m_assembler.lsr<32>(dest, src, imm.m_value & 0x1f);
863 }
864
865 void urshift32(RegisterID shiftAmount, RegisterID dest)
866 {
867 urshift32(dest, shiftAmount, dest);
868 }
869
870 void urshift32(TrustedImm32 imm, RegisterID dest)
871 {
872 urshift32(dest, imm, dest);
873 }
874
875 void urshift64(RegisterID src, RegisterID shiftAmount, RegisterID dest)
876 {
877 m_assembler.lsr<64>(dest, src, shiftAmount);
878 }
879
880 void urshift64(RegisterID src, TrustedImm32 imm, RegisterID dest)
881 {
882 m_assembler.lsr<64>(dest, src, imm.m_value & 0x3f);
883 }
884
885 void urshift64(RegisterID shiftAmount, RegisterID dest)
886 {
887 urshift64(dest, shiftAmount, dest);
888 }
889
890 void urshift64(TrustedImm32 imm, RegisterID dest)
891 {
892 urshift64(dest, imm, dest);
893 }
894
895 void xor32(Address src, RegisterID dest)
896 {
897 load32(src, dataTempRegister);
898 xor32(dataTempRegister, dest);
899 }
900
901 void xor32(RegisterID src, RegisterID dest)
902 {
903 xor32(dest, src, dest);
904 }
905
906 void xor32(RegisterID op1, RegisterID op2, RegisterID dest)
907 {
908 m_assembler.eor<32>(dest, op1, op2);
909 }
910
911 void xor32(TrustedImm32 imm, RegisterID dest)
912 {
913 xor32(imm, dest, dest);
914 }
915
916 void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest)
917 {
918 if (imm.m_value == -1)
919 m_assembler.mvn<32>(dest, src);
920 else {
921 LogicalImmediate logicalImm = LogicalImmediate::create32(imm.m_value);
922
923 if (logicalImm.isValid()) {
924 m_assembler.eor<32>(dest, src, logicalImm);
925 return;
926 }
927
928 move(imm, getCachedDataTempRegisterIDAndInvalidate());
929 m_assembler.eor<32>(dest, src, dataTempRegister);
930 }
931 }
932
933 void xor64(RegisterID src, Address address)
934 {
935 load64(address, getCachedDataTempRegisterIDAndInvalidate());
936 m_assembler.eor<64>(dataTempRegister, dataTempRegister, src);
937 store64(dataTempRegister, address);
938 }
939
940 void xor64(RegisterID src, RegisterID dest)
941 {
942 xor64(dest, src, dest);
943 }
944
945 void xor64(RegisterID op1, RegisterID op2, RegisterID dest)
946 {
947 m_assembler.eor<64>(dest, op1, op2);
948 }
949
950 void xor64(TrustedImm32 imm, RegisterID dest)
951 {
952 xor64(imm, dest, dest);
953 }
954
955 void xor64(TrustedImm32 imm, RegisterID src, RegisterID dest)
956 {
957 if (imm.m_value == -1)
958 m_assembler.mvn<64>(dest, src);
959 else {
960 LogicalImmediate logicalImm = LogicalImmediate::create64(static_cast<intptr_t>(static_cast<int64_t>(imm.m_value)));
961
962 if (logicalImm.isValid()) {
963 m_assembler.eor<64>(dest, src, logicalImm);
964 return;
965 }
966
967 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
968 m_assembler.eor<64>(dest, src, dataTempRegister);
969 }
970 }
971
972 void not32(RegisterID src, RegisterID dest)
973 {
974 m_assembler.mvn<32>(dest, src);
975 }
976
977 void not64(RegisterID src, RegisterID dest)
978 {
979 m_assembler.mvn<64>(dest, src);
980 }
981
982 // Memory access operations:
983
984 void load64(ImplicitAddress address, RegisterID dest)
985 {
986 if (tryLoadWithOffset<64>(dest, address.base, address.offset))
987 return;
988
989 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
990 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
991 }
992
993 void load64(BaseIndex address, RegisterID dest)
994 {
995 if (!address.offset && (!address.scale || address.scale == 3)) {
996 m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
997 return;
998 }
999
1000 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1001 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1002 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1003 }
1004
1005 void load64(const void* address, RegisterID dest)
1006 {
1007 load<64>(address, dest);
1008 }
1009
1010 DataLabel32 load64WithAddressOffsetPatch(Address address, RegisterID dest)
1011 {
1012 DataLabel32 label(this);
1013 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1014 m_assembler.ldr<64>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1015 return label;
1016 }
1017
1018 DataLabelCompact load64WithCompactAddressOffsetPatch(Address address, RegisterID dest)
1019 {
1020 ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
1021 DataLabelCompact label(this);
1022 m_assembler.ldr<64>(dest, address.base, address.offset);
1023 return label;
1024 }
1025
1026 ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest)
1027 {
1028 ConvertibleLoadLabel result(this);
1029 ASSERT(!(address.offset & ~0xff8));
1030 m_assembler.ldr<64>(dest, address.base, address.offset);
1031 return result;
1032 }
1033
1034 void load32(ImplicitAddress address, RegisterID dest)
1035 {
1036 if (tryLoadWithOffset<32>(dest, address.base, address.offset))
1037 return;
1038
1039 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1040 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1041 }
1042
1043 void load32(BaseIndex address, RegisterID dest)
1044 {
1045 if (!address.offset && (!address.scale || address.scale == 2)) {
1046 m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1047 return;
1048 }
1049
1050 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1051 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1052 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1053 }
1054
1055 void load32(const void* address, RegisterID dest)
1056 {
1057 load<32>(address, dest);
1058 }
1059
1060 DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest)
1061 {
1062 DataLabel32 label(this);
1063 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1064 m_assembler.ldr<32>(dest, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1065 return label;
1066 }
1067
1068 DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest)
1069 {
1070 ASSERT(isCompactPtrAlignedAddressOffset(address.offset));
1071 DataLabelCompact label(this);
1072 m_assembler.ldr<32>(dest, address.base, address.offset);
1073 return label;
1074 }
1075
1076 void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest)
1077 {
1078 load32(address, dest);
1079 }
1080
1081 void load16(ImplicitAddress address, RegisterID dest)
1082 {
1083 if (tryLoadWithOffset<16>(dest, address.base, address.offset))
1084 return;
1085
1086 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1087 m_assembler.ldrh(dest, address.base, memoryTempRegister);
1088 }
1089
1090 void load16(BaseIndex address, RegisterID dest)
1091 {
1092 if (!address.offset && (!address.scale || address.scale == 1)) {
1093 m_assembler.ldrh(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1094 return;
1095 }
1096
1097 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1098 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1099 m_assembler.ldrh(dest, address.base, memoryTempRegister);
1100 }
1101
1102 void load16(ExtendedAddress address, RegisterID dest)
1103 {
1104 moveToCachedReg(TrustedImmPtr(reinterpret_cast<void*>(address.offset)), m_cachedMemoryTempRegister);
1105 m_assembler.ldrh(dest, memoryTempRegister, address.base, ARM64Assembler::UXTX, 1);
1106 if (dest == memoryTempRegister)
1107 m_cachedMemoryTempRegister.invalidate();
1108 }
1109
1110 void load16Unaligned(ImplicitAddress address, RegisterID dest)
1111 {
1112 load16(address, dest);
1113 }
1114
1115 void load16Unaligned(BaseIndex address, RegisterID dest)
1116 {
1117 load16(address, dest);
1118 }
1119
1120 void load16SignedExtendTo32(ImplicitAddress address, RegisterID dest)
1121 {
1122 if (tryLoadSignedWithOffset<16>(dest, address.base, address.offset))
1123 return;
1124
1125 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1126 m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
1127 }
1128
1129 void load16SignedExtendTo32(BaseIndex address, RegisterID dest)
1130 {
1131 if (!address.offset && (!address.scale || address.scale == 1)) {
1132 m_assembler.ldrsh<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1133 return;
1134 }
1135
1136 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1137 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1138 m_assembler.ldrsh<32>(dest, address.base, memoryTempRegister);
1139 }
1140
1141 void zeroExtend16To32(RegisterID src, RegisterID dest)
1142 {
1143 m_assembler.uxth<64>(dest, src);
1144 }
1145
1146 void signExtend16To32(RegisterID src, RegisterID dest)
1147 {
1148 m_assembler.sxth<64>(dest, src);
1149 }
1150
1151 void load8(ImplicitAddress address, RegisterID dest)
1152 {
1153 if (tryLoadWithOffset<8>(dest, address.base, address.offset))
1154 return;
1155
1156 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1157 m_assembler.ldrb(dest, address.base, memoryTempRegister);
1158 }
1159
1160 void load8(BaseIndex address, RegisterID dest)
1161 {
1162 if (!address.offset && !address.scale) {
1163 m_assembler.ldrb(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1164 return;
1165 }
1166
1167 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1168 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1169 m_assembler.ldrb(dest, address.base, memoryTempRegister);
1170 }
1171
1172 void load8(const void* address, RegisterID dest)
1173 {
1174 moveToCachedReg(TrustedImmPtr(address), m_cachedMemoryTempRegister);
1175 m_assembler.ldrb(dest, memoryTempRegister, ARM64Registers::zr);
1176 if (dest == memoryTempRegister)
1177 m_cachedMemoryTempRegister.invalidate();
1178 }
1179
1180 void load8SignedExtendTo32(ImplicitAddress address, RegisterID dest)
1181 {
1182 if (tryLoadSignedWithOffset<8>(dest, address.base, address.offset))
1183 return;
1184
1185 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1186 m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
1187 }
1188
1189 void load8SignedExtendTo32(BaseIndex address, RegisterID dest)
1190 {
1191 if (!address.offset && !address.scale) {
1192 m_assembler.ldrsb<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1193 return;
1194 }
1195
1196 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1197 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1198 m_assembler.ldrsb<32>(dest, address.base, memoryTempRegister);
1199 }
1200
1201 void zeroExtend8To32(RegisterID src, RegisterID dest)
1202 {
1203 m_assembler.uxtb<64>(dest, src);
1204 }
1205
1206 void signExtend8To32(RegisterID src, RegisterID dest)
1207 {
1208 m_assembler.sxtb<64>(dest, src);
1209 }
1210
1211 void store64(RegisterID src, ImplicitAddress address)
1212 {
1213 if (tryStoreWithOffset<64>(src, address.base, address.offset))
1214 return;
1215
1216 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1217 m_assembler.str<64>(src, address.base, memoryTempRegister);
1218 }
1219
1220 void store64(RegisterID src, BaseIndex address)
1221 {
1222 if (!address.offset && (!address.scale || address.scale == 3)) {
1223 m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1224 return;
1225 }
1226
1227 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1228 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1229 m_assembler.str<64>(src, address.base, memoryTempRegister);
1230 }
1231
1232 void store64(RegisterID src, const void* address)
1233 {
1234 store<64>(src, address);
1235 }
1236
1237 void store64(TrustedImm32 imm, ImplicitAddress address)
1238 {
1239 store64(TrustedImm64(imm.m_value), address);
1240 }
1241
1242 void store64(TrustedImm64 imm, ImplicitAddress address)
1243 {
1244 if (!imm.m_value) {
1245 store64(ARM64Registers::zr, address);
1246 return;
1247 }
1248
1249 moveToCachedReg(imm, m_dataMemoryTempRegister);
1250 store64(dataTempRegister, address);
1251 }
1252
1253 void store64(TrustedImm64 imm, BaseIndex address)
1254 {
1255 if (!imm.m_value) {
1256 store64(ARM64Registers::zr, address);
1257 return;
1258 }
1259
1260 moveToCachedReg(imm, m_dataMemoryTempRegister);
1261 store64(dataTempRegister, address);
1262 }
1263
1264 DataLabel32 store64WithAddressOffsetPatch(RegisterID src, Address address)
1265 {
1266 DataLabel32 label(this);
1267 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1268 m_assembler.str<64>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1269 return label;
1270 }
1271
1272 void storePair64(RegisterID src1, RegisterID src2, RegisterID dest)
1273 {
1274 storePair64(src1, src2, dest, TrustedImm32(0));
1275 }
1276
1277 void storePair64(RegisterID src1, RegisterID src2, RegisterID dest, TrustedImm32 offset)
1278 {
1279 m_assembler.stp<64>(src1, src2, dest, offset.m_value);
1280 }
1281
1282 void store32(RegisterID src, ImplicitAddress address)
1283 {
1284 if (tryStoreWithOffset<32>(src, address.base, address.offset))
1285 return;
1286
1287 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1288 m_assembler.str<32>(src, address.base, memoryTempRegister);
1289 }
1290
1291 void store32(RegisterID src, BaseIndex address)
1292 {
1293 if (!address.offset && (!address.scale || address.scale == 2)) {
1294 m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1295 return;
1296 }
1297
1298 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1299 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1300 m_assembler.str<32>(src, address.base, memoryTempRegister);
1301 }
1302
1303 void store32(RegisterID src, const void* address)
1304 {
1305 store<32>(src, address);
1306 }
1307
1308 void store32(TrustedImm32 imm, ImplicitAddress address)
1309 {
1310 if (!imm.m_value) {
1311 store32(ARM64Registers::zr, address);
1312 return;
1313 }
1314
1315 moveToCachedReg(imm, m_dataMemoryTempRegister);
1316 store32(dataTempRegister, address);
1317 }
1318
1319 void store32(TrustedImm32 imm, BaseIndex address)
1320 {
1321 if (!imm.m_value) {
1322 store32(ARM64Registers::zr, address);
1323 return;
1324 }
1325
1326 moveToCachedReg(imm, m_dataMemoryTempRegister);
1327 store32(dataTempRegister, address);
1328 }
1329
1330 void store32(TrustedImm32 imm, const void* address)
1331 {
1332 if (!imm.m_value) {
1333 store32(ARM64Registers::zr, address);
1334 return;
1335 }
1336
1337 moveToCachedReg(imm, m_dataMemoryTempRegister);
1338 store32(dataTempRegister, address);
1339 }
1340
1341 DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address)
1342 {
1343 DataLabel32 label(this);
1344 signExtend32ToPtrWithFixedWidth(address.offset, getCachedMemoryTempRegisterIDAndInvalidate());
1345 m_assembler.str<32>(src, address.base, memoryTempRegister, ARM64Assembler::SXTW, 0);
1346 return label;
1347 }
1348
1349 void store16(RegisterID src, ImplicitAddress address)
1350 {
1351 if (tryStoreWithOffset<16>(src, address.base, address.offset))
1352 return;
1353
1354 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1355 m_assembler.str<16>(src, address.base, memoryTempRegister);
1356 }
1357
1358 void store16(RegisterID src, BaseIndex address)
1359 {
1360 if (!address.offset && (!address.scale || address.scale == 1)) {
1361 m_assembler.strh(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1362 return;
1363 }
1364
1365 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1366 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1367 m_assembler.strh(src, address.base, memoryTempRegister);
1368 }
1369
1370 void store8(RegisterID src, BaseIndex address)
1371 {
1372 if (!address.offset && !address.scale) {
1373 m_assembler.strb(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1374 return;
1375 }
1376
1377 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1378 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1379 m_assembler.strb(src, address.base, memoryTempRegister);
1380 }
1381
1382 void store8(RegisterID src, void* address)
1383 {
1384 move(TrustedImmPtr(address), getCachedMemoryTempRegisterIDAndInvalidate());
1385 m_assembler.strb(src, memoryTempRegister, 0);
1386 }
1387
1388 void store8(RegisterID src, ImplicitAddress address)
1389 {
1390 if (tryStoreWithOffset<8>(src, address.base, address.offset))
1391 return;
1392
1393 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1394 m_assembler.str<8>(src, address.base, memoryTempRegister);
1395 }
1396
1397 void store8(TrustedImm32 imm, void* address)
1398 {
1399 if (!imm.m_value) {
1400 store8(ARM64Registers::zr, address);
1401 return;
1402 }
1403
1404 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1405 store8(dataTempRegister, address);
1406 }
1407
1408 void store8(TrustedImm32 imm, ImplicitAddress address)
1409 {
1410 if (!imm.m_value) {
1411 store8(ARM64Registers::zr, address);
1412 return;
1413 }
1414
1415 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1416 store8(dataTempRegister, address);
1417 }
1418
1419 void getEffectiveAddress(BaseIndex address, RegisterID dest)
1420 {
1421 m_assembler.add<64>(dest, address.base, address.index, ARM64Assembler::LSL, address.scale);
1422 if (address.offset)
1423 add64(TrustedImm32(address.offset), dest);
1424 }
1425
1426
1427 // Floating-point operations:
1428
1429 static bool supportsFloatingPoint() { return true; }
1430 static bool supportsFloatingPointTruncate() { return true; }
1431 static bool supportsFloatingPointSqrt() { return true; }
1432 static bool supportsFloatingPointAbs() { return true; }
1433 static bool supportsFloatingPointCeil() { return true; }
1434
1435 enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful };
1436
1437 void absDouble(FPRegisterID src, FPRegisterID dest)
1438 {
1439 m_assembler.fabs<64>(dest, src);
1440 }
1441
1442 void absFloat(FPRegisterID src, FPRegisterID dest)
1443 {
1444 m_assembler.fabs<32>(dest, src);
1445 }
1446
1447 void addDouble(FPRegisterID src, FPRegisterID dest)
1448 {
1449 addDouble(dest, src, dest);
1450 }
1451
1452 void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1453 {
1454 m_assembler.fadd<64>(dest, op1, op2);
1455 }
1456
1457 void addDouble(Address src, FPRegisterID dest)
1458 {
1459 loadDouble(src, fpTempRegister);
1460 addDouble(fpTempRegister, dest);
1461 }
1462
1463 void addDouble(AbsoluteAddress address, FPRegisterID dest)
1464 {
1465 loadDouble(TrustedImmPtr(address.m_ptr), fpTempRegister);
1466 addDouble(fpTempRegister, dest);
1467 }
1468
1469 void addFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1470 {
1471 m_assembler.fadd<32>(dest, op1, op2);
1472 }
1473
1474 void ceilDouble(FPRegisterID src, FPRegisterID dest)
1475 {
1476 m_assembler.frintp<64>(dest, src);
1477 }
1478
1479 void ceilFloat(FPRegisterID src, FPRegisterID dest)
1480 {
1481 m_assembler.frintp<32>(dest, src);
1482 }
1483
1484 void floorDouble(FPRegisterID src, FPRegisterID dest)
1485 {
1486 m_assembler.frintm<64>(dest, src);
1487 }
1488
1489 // Convert 'src' to an integer, and places the resulting 'dest'.
1490 // If the result is not representable as a 32 bit value, branch.
1491 // May also branch for some values that are representable in 32 bits
1492 // (specifically, in this case, 0).
1493 void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID, bool negZeroCheck = true)
1494 {
1495 m_assembler.fcvtns<32, 64>(dest, src);
1496
1497 // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump.
1498 m_assembler.scvtf<64, 32>(fpTempRegister, dest);
1499 failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister));
1500
1501 // Test for negative zero.
1502 if (negZeroCheck) {
1503 Jump valueIsNonZero = branchTest32(NonZero, dest);
1504 RegisterID scratch = getCachedMemoryTempRegisterIDAndInvalidate();
1505 m_assembler.fmov<64>(scratch, src);
1506 failureCases.append(makeTestBitAndBranch(scratch, 63, IsNonZero));
1507 valueIsNonZero.link(this);
1508 }
1509 }
1510
1511 Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1512 {
1513 m_assembler.fcmp<64>(left, right);
1514 return jumpAfterFloatingPointCompare(cond);
1515 }
1516
1517 Jump branchFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right)
1518 {
1519 m_assembler.fcmp<32>(left, right);
1520 return jumpAfterFloatingPointCompare(cond);
1521 }
1522
1523 Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID)
1524 {
1525 m_assembler.fcmp_0<64>(reg);
1526 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1527 Jump result = makeBranch(ARM64Assembler::ConditionNE);
1528 unordered.link(this);
1529 return result;
1530 }
1531
1532 Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID)
1533 {
1534 m_assembler.fcmp_0<64>(reg);
1535 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1536 Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
1537 unordered.link(this);
1538 // We get here if either unordered or equal.
1539 Jump result = jump();
1540 notEqual.link(this);
1541 return result;
1542 }
1543
1544 Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1545 {
1546 // Truncate to a 64-bit integer in dataTempRegister, copy the low 32-bit to dest.
1547 m_assembler.fcvtzs<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
1548 zeroExtend32ToPtr(dataTempRegister, dest);
1549 // Check thlow 32-bits sign extend to be equal to the full value.
1550 m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0);
1551 return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
1552 }
1553
1554 Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed)
1555 {
1556 // Truncate to a 64-bit unsigned integer in dataTempRegister, copy the low 32-bit to dest.
1557 m_assembler.fcvtzu<64, 64>(getCachedDataTempRegisterIDAndInvalidate(), src);
1558 zeroExtend32ToPtr(dataTempRegister, dest);
1559 // Check thlow 32-bits sign extend to be equal to the full value.
1560 m_assembler.cmp<64>(dataTempRegister, dataTempRegister, ARM64Assembler::SXTW, 0);
1561 return Jump(makeBranch(branchType == BranchIfTruncateSuccessful ? Equal : NotEqual));
1562 }
1563
1564 void convertDoubleToFloat(FPRegisterID src, FPRegisterID dest)
1565 {
1566 m_assembler.fcvt<32, 64>(dest, src);
1567 }
1568
1569 void convertFloatToDouble(FPRegisterID src, FPRegisterID dest)
1570 {
1571 m_assembler.fcvt<64, 32>(dest, src);
1572 }
1573
1574 void convertInt32ToDouble(TrustedImm32 imm, FPRegisterID dest)
1575 {
1576 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1577 convertInt32ToDouble(dataTempRegister, dest);
1578 }
1579
1580 void convertUInt32ToDouble(RegisterID src, FPRegisterID dest, RegisterID /*scratch*/)
1581 {
1582 m_assembler.ucvtf<64, 32>(dest, src);
1583 }
1584
1585 void convertInt32ToDouble(RegisterID src, FPRegisterID dest)
1586 {
1587 m_assembler.scvtf<64, 32>(dest, src);
1588 }
1589
1590 void convertInt32ToDouble(Address address, FPRegisterID dest)
1591 {
1592 load32(address, getCachedDataTempRegisterIDAndInvalidate());
1593 convertInt32ToDouble(dataTempRegister, dest);
1594 }
1595
1596 void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest)
1597 {
1598 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
1599 convertInt32ToDouble(dataTempRegister, dest);
1600 }
1601
1602 void convertInt64ToDouble(RegisterID src, FPRegisterID dest)
1603 {
1604 m_assembler.scvtf<64, 64>(dest, src);
1605 }
1606
1607 void divDouble(FPRegisterID src, FPRegisterID dest)
1608 {
1609 divDouble(dest, src, dest);
1610 }
1611
1612 void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1613 {
1614 m_assembler.fdiv<64>(dest, op1, op2);
1615 }
1616
1617 void divFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1618 {
1619 m_assembler.fdiv<32>(dest, op1, op2);
1620 }
1621
1622 void loadDouble(ImplicitAddress address, FPRegisterID dest)
1623 {
1624 if (tryLoadWithOffset<64>(dest, address.base, address.offset))
1625 return;
1626
1627 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1628 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1629 }
1630
1631 void loadDouble(BaseIndex address, FPRegisterID dest)
1632 {
1633 if (!address.offset && (!address.scale || address.scale == 3)) {
1634 m_assembler.ldr<64>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1635 return;
1636 }
1637
1638 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1639 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1640 m_assembler.ldr<64>(dest, address.base, memoryTempRegister);
1641 }
1642
1643 void loadDouble(TrustedImmPtr address, FPRegisterID dest)
1644 {
1645 moveToCachedReg(address, m_cachedMemoryTempRegister);
1646 m_assembler.ldr<64>(dest, memoryTempRegister, ARM64Registers::zr);
1647 }
1648
1649 void loadFloat(ImplicitAddress address, FPRegisterID dest)
1650 {
1651 if (tryLoadWithOffset<32>(dest, address.base, address.offset))
1652 return;
1653
1654 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1655 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1656 }
1657
1658 void loadFloat(BaseIndex address, FPRegisterID dest)
1659 {
1660 if (!address.offset && (!address.scale || address.scale == 2)) {
1661 m_assembler.ldr<32>(dest, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1662 return;
1663 }
1664
1665 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1666 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1667 m_assembler.ldr<32>(dest, address.base, memoryTempRegister);
1668 }
1669
1670 void moveDouble(FPRegisterID src, FPRegisterID dest)
1671 {
1672 m_assembler.fmov<64>(dest, src);
1673 }
1674
1675 void moveZeroToDouble(FPRegisterID reg)
1676 {
1677 m_assembler.fmov<64>(reg, ARM64Registers::zr);
1678 }
1679
1680 void moveDoubleTo64(FPRegisterID src, RegisterID dest)
1681 {
1682 m_assembler.fmov<64>(dest, src);
1683 }
1684
1685 void moveFloatTo32(FPRegisterID src, RegisterID dest)
1686 {
1687 m_assembler.fmov<32>(dest, src);
1688 }
1689
1690 void move64ToDouble(RegisterID src, FPRegisterID dest)
1691 {
1692 m_assembler.fmov<64>(dest, src);
1693 }
1694
1695 void move32ToFloat(RegisterID src, FPRegisterID dest)
1696 {
1697 m_assembler.fmov<32>(dest, src);
1698 }
1699
1700 void moveConditionallyDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
1701 {
1702 m_assembler.fcmp<64>(left, right);
1703 moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
1704 }
1705
1706 void moveConditionallyFloat(DoubleCondition cond, FPRegisterID left, FPRegisterID right, RegisterID src, RegisterID dest)
1707 {
1708 m_assembler.fcmp<32>(left, right);
1709 moveConditionallyAfterFloatingPointCompare<64>(cond, src, dest);
1710 }
1711
1712 template<int datasize>
1713 void moveConditionallyAfterFloatingPointCompare(DoubleCondition cond, RegisterID src, RegisterID dest)
1714 {
1715 if (cond == DoubleNotEqual) {
1716 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
1717 m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionNE);
1718 unordered.link(this);
1719 return;
1720 }
1721 if (cond == DoubleEqualOrUnordered) {
1722 // If the compare is unordered, src is copied to dest and the
1723 // next csel has all arguments equal to src.
1724 // If the compare is ordered, dest is unchanged and EQ decides
1725 // what value to set.
1726 m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionVS);
1727 m_assembler.csel<datasize>(dest, src, dest, ARM64Assembler::ConditionEQ);
1728 return;
1729 }
1730 m_assembler.csel<datasize>(dest, src, dest, ARM64Condition(cond));
1731 }
1732
1733 void mulDouble(FPRegisterID src, FPRegisterID dest)
1734 {
1735 mulDouble(dest, src, dest);
1736 }
1737
1738 void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1739 {
1740 m_assembler.fmul<64>(dest, op1, op2);
1741 }
1742
1743 void mulDouble(Address src, FPRegisterID dest)
1744 {
1745 loadDouble(src, fpTempRegister);
1746 mulDouble(fpTempRegister, dest);
1747 }
1748
1749 void mulFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1750 {
1751 m_assembler.fmul<32>(dest, op1, op2);
1752 }
1753
1754 void andDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1755 {
1756 m_assembler.vand<64>(dest, op1, op2);
1757 }
1758
1759 void andFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1760 {
1761 andDouble(op1, op2, dest);
1762 }
1763
1764 void negateDouble(FPRegisterID src, FPRegisterID dest)
1765 {
1766 m_assembler.fneg<64>(dest, src);
1767 }
1768
1769 void sqrtDouble(FPRegisterID src, FPRegisterID dest)
1770 {
1771 m_assembler.fsqrt<64>(dest, src);
1772 }
1773
1774 void sqrtFloat(FPRegisterID src, FPRegisterID dest)
1775 {
1776 m_assembler.fsqrt<32>(dest, src);
1777 }
1778
1779 void storeDouble(FPRegisterID src, ImplicitAddress address)
1780 {
1781 if (tryStoreWithOffset<64>(src, address.base, address.offset))
1782 return;
1783
1784 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1785 m_assembler.str<64>(src, address.base, memoryTempRegister);
1786 }
1787
1788 void storeDouble(FPRegisterID src, TrustedImmPtr address)
1789 {
1790 moveToCachedReg(address, m_cachedMemoryTempRegister);
1791 m_assembler.str<64>(src, memoryTempRegister, ARM64Registers::zr);
1792 }
1793
1794 void storeDouble(FPRegisterID src, BaseIndex address)
1795 {
1796 if (!address.offset && (!address.scale || address.scale == 3)) {
1797 m_assembler.str<64>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1798 return;
1799 }
1800
1801 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1802 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1803 m_assembler.str<64>(src, address.base, memoryTempRegister);
1804 }
1805
1806 void storeFloat(FPRegisterID src, ImplicitAddress address)
1807 {
1808 if (tryStoreWithOffset<32>(src, address.base, address.offset))
1809 return;
1810
1811 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1812 m_assembler.str<32>(src, address.base, memoryTempRegister);
1813 }
1814
1815 void storeFloat(FPRegisterID src, BaseIndex address)
1816 {
1817 if (!address.offset && (!address.scale || address.scale == 2)) {
1818 m_assembler.str<32>(src, address.base, address.index, ARM64Assembler::UXTX, address.scale);
1819 return;
1820 }
1821
1822 signExtend32ToPtr(TrustedImm32(address.offset), getCachedMemoryTempRegisterIDAndInvalidate());
1823 m_assembler.add<64>(memoryTempRegister, memoryTempRegister, address.index, ARM64Assembler::UXTX, address.scale);
1824 m_assembler.str<32>(src, address.base, memoryTempRegister);
1825 }
1826
1827 void subDouble(FPRegisterID src, FPRegisterID dest)
1828 {
1829 subDouble(dest, src, dest);
1830 }
1831
1832 void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1833 {
1834 m_assembler.fsub<64>(dest, op1, op2);
1835 }
1836
1837 void subDouble(Address src, FPRegisterID dest)
1838 {
1839 loadDouble(src, fpTempRegister);
1840 subDouble(fpTempRegister, dest);
1841 }
1842
1843 void subFloat(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest)
1844 {
1845 m_assembler.fsub<32>(dest, op1, op2);
1846 }
1847
1848 // Result is undefined if the value is outside of the integer range.
1849 void truncateDoubleToInt32(FPRegisterID src, RegisterID dest)
1850 {
1851 m_assembler.fcvtzs<32, 64>(dest, src);
1852 }
1853
1854 void truncateDoubleToUint32(FPRegisterID src, RegisterID dest)
1855 {
1856 m_assembler.fcvtzu<32, 64>(dest, src);
1857 }
1858
1859
1860 // Stack manipulation operations:
1861 //
1862 // The ABI is assumed to provide a stack abstraction to memory,
1863 // containing machine word sized units of data. Push and pop
1864 // operations add and remove a single register sized unit of data
1865 // to or from the stack. These operations are not supported on
1866 // ARM64. Peek and poke operations read or write values on the
1867 // stack, without moving the current stack position. Additionally,
1868 // there are popToRestore and pushToSave operations, which are
1869 // designed just for quick-and-dirty saving and restoring of
1870 // temporary values. These operations don't claim to have any
1871 // ABI compatibility.
1872
1873 void pop(RegisterID) NO_RETURN_DUE_TO_CRASH
1874 {
1875 CRASH();
1876 }
1877
1878 void push(RegisterID) NO_RETURN_DUE_TO_CRASH
1879 {
1880 CRASH();
1881 }
1882
1883 void push(Address) NO_RETURN_DUE_TO_CRASH
1884 {
1885 CRASH();
1886 }
1887
1888 void push(TrustedImm32) NO_RETURN_DUE_TO_CRASH
1889 {
1890 CRASH();
1891 }
1892
1893 void popPair(RegisterID dest1, RegisterID dest2)
1894 {
1895 m_assembler.ldp<64>(dest1, dest2, ARM64Registers::sp, PairPostIndex(16));
1896 }
1897
1898 void pushPair(RegisterID src1, RegisterID src2)
1899 {
1900 m_assembler.stp<64>(src1, src2, ARM64Registers::sp, PairPreIndex(-16));
1901 }
1902
1903 void popToRestore(RegisterID dest)
1904 {
1905 m_assembler.ldr<64>(dest, ARM64Registers::sp, PostIndex(16));
1906 }
1907
1908 void pushToSave(RegisterID src)
1909 {
1910 m_assembler.str<64>(src, ARM64Registers::sp, PreIndex(-16));
1911 }
1912
1913 void pushToSaveImmediateWithoutTouchingRegisters(TrustedImm32 imm)
1914 {
1915 RegisterID reg = dataTempRegister;
1916 pushPair(reg, reg);
1917 move(imm, reg);
1918 store64(reg, stackPointerRegister);
1919 load64(Address(stackPointerRegister, 8), reg);
1920 }
1921
1922 void pushToSave(Address address)
1923 {
1924 load32(address, getCachedDataTempRegisterIDAndInvalidate());
1925 pushToSave(dataTempRegister);
1926 }
1927
1928 void pushToSave(TrustedImm32 imm)
1929 {
1930 move(imm, getCachedDataTempRegisterIDAndInvalidate());
1931 pushToSave(dataTempRegister);
1932 }
1933
1934 void popToRestore(FPRegisterID dest)
1935 {
1936 loadDouble(stackPointerRegister, dest);
1937 add64(TrustedImm32(16), stackPointerRegister);
1938 }
1939
1940 void pushToSave(FPRegisterID src)
1941 {
1942 sub64(TrustedImm32(16), stackPointerRegister);
1943 storeDouble(src, stackPointerRegister);
1944 }
1945
1946 static ptrdiff_t pushToSaveByteOffset() { return 16; }
1947
1948 // Register move operations:
1949
1950 void move(RegisterID src, RegisterID dest)
1951 {
1952 if (src != dest)
1953 m_assembler.mov<64>(dest, src);
1954 }
1955
1956 void move(TrustedImm32 imm, RegisterID dest)
1957 {
1958 moveInternal<TrustedImm32, int32_t>(imm, dest);
1959 }
1960
1961 void move(TrustedImmPtr imm, RegisterID dest)
1962 {
1963 moveInternal<TrustedImmPtr, intptr_t>(imm, dest);
1964 }
1965
1966 void move(TrustedImm64 imm, RegisterID dest)
1967 {
1968 moveInternal<TrustedImm64, int64_t>(imm, dest);
1969 }
1970
1971 void swap(RegisterID reg1, RegisterID reg2)
1972 {
1973 move(reg1, getCachedDataTempRegisterIDAndInvalidate());
1974 move(reg2, reg1);
1975 move(dataTempRegister, reg2);
1976 }
1977
1978 void signExtend32ToPtr(TrustedImm32 imm, RegisterID dest)
1979 {
1980 move(TrustedImmPtr(reinterpret_cast<void*>(static_cast<intptr_t>(imm.m_value))), dest);
1981 }
1982
1983 void signExtend32ToPtr(RegisterID src, RegisterID dest)
1984 {
1985 m_assembler.sxtw(dest, src);
1986 }
1987
1988 void zeroExtend32ToPtr(RegisterID src, RegisterID dest)
1989 {
1990 m_assembler.uxtw(dest, src);
1991 }
1992
1993 void moveConditionally32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
1994 {
1995 m_assembler.cmp<32>(left, right);
1996 m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond));
1997 }
1998
1999 void moveConditionally64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID src, RegisterID dest)
2000 {
2001 m_assembler.cmp<64>(left, right);
2002 m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
2003 }
2004
2005 void moveConditionallyTest32(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
2006 {
2007 m_assembler.tst<32>(testReg, mask);
2008 m_assembler.csel<32>(dest, src, dest, ARM64Condition(cond));
2009 }
2010
2011 void moveConditionallyTest64(ResultCondition cond, RegisterID testReg, RegisterID mask, RegisterID src, RegisterID dest)
2012 {
2013 m_assembler.tst<64>(testReg, mask);
2014 m_assembler.csel<64>(dest, src, dest, ARM64Condition(cond));
2015 }
2016
2017 // Forwards / external control flow operations:
2018 //
2019 // This set of jump and conditional branch operations return a Jump
2020 // object which may linked at a later point, allow forwards jump,
2021 // or jumps that will require external linkage (after the code has been
2022 // relocated).
2023 //
2024 // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge
2025 // respecitvely, for unsigned comparisons the names b, a, be, and ae are
2026 // used (representing the names 'below' and 'above').
2027 //
2028 // Operands to the comparision are provided in the expected order, e.g.
2029 // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when
2030 // treated as a signed 32bit value, is less than or equal to 5.
2031 //
2032 // jz and jnz test whether the first operand is equal to zero, and take
2033 // an optional second operand of a mask under which to perform the test.
2034
2035 Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right)
2036 {
2037 m_assembler.cmp<32>(left, right);
2038 return Jump(makeBranch(cond));
2039 }
2040
2041 Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right)
2042 {
2043 if (isUInt12(right.m_value))
2044 m_assembler.cmp<32>(left, UInt12(right.m_value));
2045 else if (isUInt12(-right.m_value))
2046 m_assembler.cmn<32>(left, UInt12(-right.m_value));
2047 else {
2048 moveToCachedReg(right, m_dataMemoryTempRegister);
2049 m_assembler.cmp<32>(left, dataTempRegister);
2050 }
2051 return Jump(makeBranch(cond));
2052 }
2053
2054 Jump branch32(RelationalCondition cond, RegisterID left, Address right)
2055 {
2056 load32(right, getCachedMemoryTempRegisterIDAndInvalidate());
2057 return branch32(cond, left, memoryTempRegister);
2058 }
2059
2060 Jump branch32(RelationalCondition cond, Address left, RegisterID right)
2061 {
2062 load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
2063 return branch32(cond, memoryTempRegister, right);
2064 }
2065
2066 Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right)
2067 {
2068 load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
2069 return branch32(cond, memoryTempRegister, right);
2070 }
2071
2072 Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2073 {
2074 load32(left, getCachedMemoryTempRegisterIDAndInvalidate());
2075 return branch32(cond, memoryTempRegister, right);
2076 }
2077
2078 Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
2079 {
2080 load32(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2081 return branch32(cond, dataTempRegister, right);
2082 }
2083
2084 Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
2085 {
2086 load32(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
2087 return branch32(cond, memoryTempRegister, right);
2088 }
2089
2090 Jump branch64(RelationalCondition cond, RegisterID left, RegisterID right)
2091 {
2092 if (right == ARM64Registers::sp) {
2093 if (cond == Equal && left != ARM64Registers::sp) {
2094 // CMP can only use SP for the left argument, since we are testing for equality, the order
2095 // does not matter here.
2096 std::swap(left, right);
2097 } else {
2098 move(right, getCachedDataTempRegisterIDAndInvalidate());
2099 right = dataTempRegister;
2100 }
2101 }
2102 m_assembler.cmp<64>(left, right);
2103 return Jump(makeBranch(cond));
2104 }
2105
2106 Jump branch64(RelationalCondition cond, RegisterID left, TrustedImm64 right)
2107 {
2108 intptr_t immediate = right.m_value;
2109 if (isUInt12(immediate))
2110 m_assembler.cmp<64>(left, UInt12(static_cast<int32_t>(immediate)));
2111 else if (isUInt12(-immediate))
2112 m_assembler.cmn<64>(left, UInt12(static_cast<int32_t>(-immediate)));
2113 else {
2114 moveToCachedReg(right, m_dataMemoryTempRegister);
2115 m_assembler.cmp<64>(left, dataTempRegister);
2116 }
2117 return Jump(makeBranch(cond));
2118 }
2119
2120 Jump branch64(RelationalCondition cond, RegisterID left, Address right)
2121 {
2122 load64(right, getCachedMemoryTempRegisterIDAndInvalidate());
2123 return branch64(cond, left, memoryTempRegister);
2124 }
2125
2126 Jump branch64(RelationalCondition cond, AbsoluteAddress left, RegisterID right)
2127 {
2128 load64(left.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2129 return branch64(cond, dataTempRegister, right);
2130 }
2131
2132 Jump branch64(RelationalCondition cond, Address left, RegisterID right)
2133 {
2134 load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
2135 return branch64(cond, memoryTempRegister, right);
2136 }
2137
2138 Jump branch64(RelationalCondition cond, Address left, TrustedImm64 right)
2139 {
2140 load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
2141 return branch64(cond, memoryTempRegister, right);
2142 }
2143
2144 Jump branchPtr(RelationalCondition cond, BaseIndex left, RegisterID right)
2145 {
2146 load64(left, getCachedMemoryTempRegisterIDAndInvalidate());
2147 return branch64(cond, memoryTempRegister, right);
2148 }
2149
2150 Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right)
2151 {
2152 ASSERT(!(0xffffff00 & right.m_value));
2153 load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
2154 return branch32(cond, memoryTempRegister, right);
2155 }
2156
2157 Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2158 {
2159 ASSERT(!(0xffffff00 & right.m_value));
2160 load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
2161 return branch32(cond, memoryTempRegister, right);
2162 }
2163
2164 Jump branch8(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right)
2165 {
2166 ASSERT(!(0xffffff00 & right.m_value));
2167 load8(left.m_ptr, getCachedMemoryTempRegisterIDAndInvalidate());
2168 return branch32(cond, memoryTempRegister, right);
2169 }
2170
2171 Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask)
2172 {
2173 m_assembler.tst<32>(reg, mask);
2174 return Jump(makeBranch(cond));
2175 }
2176
2177 void test32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2178 {
2179 if (mask.m_value == -1)
2180 m_assembler.tst<32>(reg, reg);
2181 else {
2182 bool testedWithImmediate = false;
2183 if ((cond == Zero) || (cond == NonZero)) {
2184 LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
2185
2186 if (logicalImm.isValid()) {
2187 m_assembler.tst<32>(reg, logicalImm);
2188 testedWithImmediate = true;
2189 }
2190 }
2191 if (!testedWithImmediate) {
2192 move(mask, getCachedDataTempRegisterIDAndInvalidate());
2193 m_assembler.tst<32>(reg, dataTempRegister);
2194 }
2195 }
2196 }
2197
2198 Jump branch(ResultCondition cond)
2199 {
2200 return Jump(makeBranch(cond));
2201 }
2202
2203 Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2204 {
2205 if (mask.m_value == -1) {
2206 if ((cond == Zero) || (cond == NonZero))
2207 return Jump(makeCompareAndBranch<32>(static_cast<ZeroCondition>(cond), reg));
2208 m_assembler.tst<32>(reg, reg);
2209 } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
2210 return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
2211 else {
2212 if ((cond == Zero) || (cond == NonZero)) {
2213 LogicalImmediate logicalImm = LogicalImmediate::create32(mask.m_value);
2214
2215 if (logicalImm.isValid()) {
2216 m_assembler.tst<32>(reg, logicalImm);
2217 return Jump(makeBranch(cond));
2218 }
2219 }
2220
2221 move(mask, getCachedDataTempRegisterIDAndInvalidate());
2222 m_assembler.tst<32>(reg, dataTempRegister);
2223 }
2224 return Jump(makeBranch(cond));
2225 }
2226
2227 Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2228 {
2229 load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
2230 return branchTest32(cond, memoryTempRegister, mask);
2231 }
2232
2233 Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2234 {
2235 load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
2236 return branchTest32(cond, memoryTempRegister, mask);
2237 }
2238
2239 Jump branchTest64(ResultCondition cond, RegisterID reg, RegisterID mask)
2240 {
2241 m_assembler.tst<64>(reg, mask);
2242 return Jump(makeBranch(cond));
2243 }
2244
2245 Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2246 {
2247 if (mask.m_value == -1) {
2248 if ((cond == Zero) || (cond == NonZero))
2249 return Jump(makeCompareAndBranch<64>(static_cast<ZeroCondition>(cond), reg));
2250 m_assembler.tst<64>(reg, reg);
2251 } else if (hasOneBitSet(mask.m_value) && ((cond == Zero) || (cond == NonZero)))
2252 return Jump(makeTestBitAndBranch(reg, getLSBSet(mask.m_value), static_cast<ZeroCondition>(cond)));
2253 else {
2254 if ((cond == Zero) || (cond == NonZero)) {
2255 LogicalImmediate logicalImm = LogicalImmediate::create64(mask.m_value);
2256
2257 if (logicalImm.isValid()) {
2258 m_assembler.tst<64>(reg, logicalImm);
2259 return Jump(makeBranch(cond));
2260 }
2261 }
2262
2263 signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2264 m_assembler.tst<64>(reg, dataTempRegister);
2265 }
2266 return Jump(makeBranch(cond));
2267 }
2268
2269 Jump branchTest64(ResultCondition cond, RegisterID reg, TrustedImm64 mask)
2270 {
2271 move(mask, getCachedDataTempRegisterIDAndInvalidate());
2272 return branchTest64(cond, reg, dataTempRegister);
2273 }
2274
2275 Jump branchTest64(ResultCondition cond, Address address, RegisterID mask)
2276 {
2277 load64(address, getCachedDataTempRegisterIDAndInvalidate());
2278 return branchTest64(cond, dataTempRegister, mask);
2279 }
2280
2281 Jump branchTest64(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2282 {
2283 load64(address, getCachedDataTempRegisterIDAndInvalidate());
2284 return branchTest64(cond, dataTempRegister, mask);
2285 }
2286
2287 Jump branchTest64(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2288 {
2289 load64(address, getCachedDataTempRegisterIDAndInvalidate());
2290 return branchTest64(cond, dataTempRegister, mask);
2291 }
2292
2293 Jump branchTest64(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
2294 {
2295 load64(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2296 return branchTest64(cond, dataTempRegister, mask);
2297 }
2298
2299 Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1))
2300 {
2301 load8(address, getCachedDataTempRegisterIDAndInvalidate());
2302 return branchTest32(cond, dataTempRegister, mask);
2303 }
2304
2305 Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1))
2306 {
2307 load8(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2308 return branchTest32(cond, dataTempRegister, mask);
2309 }
2310
2311 Jump branchTest8(ResultCondition cond, ExtendedAddress address, TrustedImm32 mask = TrustedImm32(-1))
2312 {
2313 move(TrustedImmPtr(reinterpret_cast<void*>(address.offset)), getCachedDataTempRegisterIDAndInvalidate());
2314 m_assembler.ldrb(dataTempRegister, address.base, dataTempRegister);
2315 return branchTest32(cond, dataTempRegister, mask);
2316 }
2317
2318 Jump branchTest8(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1))
2319 {
2320 load8(address, getCachedDataTempRegisterIDAndInvalidate());
2321 return branchTest32(cond, dataTempRegister, mask);
2322 }
2323
2324 Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right)
2325 {
2326 return branch32(cond, left, right);
2327 }
2328
2329
2330 // Arithmetic control flow operations:
2331 //
2332 // This set of conditional branch operations branch based
2333 // on the result of an arithmetic operation. The operation
2334 // is performed as normal, storing the result.
2335 //
2336 // * jz operations branch if the result is zero.
2337 // * jo operations branch if the (signed) arithmetic
2338 // operation caused an overflow to occur.
2339
2340 Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2341 {
2342 m_assembler.add<32, S>(dest, op1, op2);
2343 return Jump(makeBranch(cond));
2344 }
2345
2346 Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2347 {
2348 if (isUInt12(imm.m_value)) {
2349 m_assembler.add<32, S>(dest, op1, UInt12(imm.m_value));
2350 return Jump(makeBranch(cond));
2351 }
2352 if (isUInt12(-imm.m_value)) {
2353 m_assembler.sub<32, S>(dest, op1, UInt12(-imm.m_value));
2354 return Jump(makeBranch(cond));
2355 }
2356
2357 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
2358 return branchAdd32(cond, op1, dataTempRegister, dest);
2359 }
2360
2361 Jump branchAdd32(ResultCondition cond, Address src, RegisterID dest)
2362 {
2363 load32(src, getCachedDataTempRegisterIDAndInvalidate());
2364 return branchAdd32(cond, dest, dataTempRegister, dest);
2365 }
2366
2367 Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest)
2368 {
2369 return branchAdd32(cond, dest, src, dest);
2370 }
2371
2372 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2373 {
2374 return branchAdd32(cond, dest, imm, dest);
2375 }
2376
2377 Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress address)
2378 {
2379 load32(address.m_ptr, getCachedDataTempRegisterIDAndInvalidate());
2380
2381 if (isUInt12(imm.m_value)) {
2382 m_assembler.add<32, S>(dataTempRegister, dataTempRegister, UInt12(imm.m_value));
2383 store32(dataTempRegister, address.m_ptr);
2384 } else if (isUInt12(-imm.m_value)) {
2385 m_assembler.sub<32, S>(dataTempRegister, dataTempRegister, UInt12(-imm.m_value));
2386 store32(dataTempRegister, address.m_ptr);
2387 } else {
2388 move(imm, getCachedMemoryTempRegisterIDAndInvalidate());
2389 m_assembler.add<32, S>(dataTempRegister, dataTempRegister, memoryTempRegister);
2390 store32(dataTempRegister, address.m_ptr);
2391 }
2392
2393 return Jump(makeBranch(cond));
2394 }
2395
2396 Jump branchAdd64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2397 {
2398 m_assembler.add<64, S>(dest, op1, op2);
2399 return Jump(makeBranch(cond));
2400 }
2401
2402 Jump branchAdd64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2403 {
2404 if (isUInt12(imm.m_value)) {
2405 m_assembler.add<64, S>(dest, op1, UInt12(imm.m_value));
2406 return Jump(makeBranch(cond));
2407 }
2408 if (isUInt12(-imm.m_value)) {
2409 m_assembler.sub<64, S>(dest, op1, UInt12(-imm.m_value));
2410 return Jump(makeBranch(cond));
2411 }
2412
2413 move(imm, getCachedDataTempRegisterIDAndInvalidate());
2414 return branchAdd64(cond, op1, dataTempRegister, dest);
2415 }
2416
2417 Jump branchAdd64(ResultCondition cond, RegisterID src, RegisterID dest)
2418 {
2419 return branchAdd64(cond, dest, src, dest);
2420 }
2421
2422 Jump branchAdd64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2423 {
2424 return branchAdd64(cond, dest, imm, dest);
2425 }
2426
2427 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
2428 {
2429 ASSERT(cond != Signed);
2430
2431 if (cond != Overflow) {
2432 m_assembler.mul<32>(dest, src1, src2);
2433 return branchTest32(cond, dest);
2434 }
2435
2436 // This is a signed multiple of two 32-bit values, producing a 64-bit result.
2437 m_assembler.smull(dest, src1, src2);
2438 // Copy bits 63..32 of the result to bits 31..0 of scratch1.
2439 m_assembler.asr<64>(scratch1, dest, 32);
2440 // Splat bit 31 of the result to bits 31..0 of scratch2.
2441 m_assembler.asr<32>(scratch2, dest, 31);
2442 // After a mul32 the top 32 bits of the register should be clear.
2443 zeroExtend32ToPtr(dest, dest);
2444 // Check that bits 31..63 of the original result were all equal.
2445 return branch32(NotEqual, scratch2, scratch1);
2446 }
2447
2448 Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
2449 {
2450 return branchMul32(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
2451 }
2452
2453 Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest)
2454 {
2455 return branchMul32(cond, dest, src, dest);
2456 }
2457
2458 Jump branchMul32(ResultCondition cond, RegisterID src, TrustedImm32 imm, RegisterID dest)
2459 {
2460 move(imm, getCachedDataTempRegisterIDAndInvalidate());
2461 return branchMul32(cond, dataTempRegister, src, dest);
2462 }
2463
2464 Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest)
2465 {
2466 move(imm, dataTempRegister);
2467 return branchMul32(cond, dataTempRegister, src, dest);
2468 }
2469
2470 Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID scratch1, RegisterID scratch2, RegisterID dest)
2471 {
2472 ASSERT(cond != Signed);
2473
2474 // This is a signed multiple of two 64-bit values, producing a 64-bit result.
2475 m_assembler.mul<64>(dest, src1, src2);
2476
2477 if (cond != Overflow)
2478 return branchTest64(cond, dest);
2479
2480 // Compute bits 127..64 of the result into scratch1.
2481 m_assembler.smulh(scratch1, src1, src2);
2482 // Splat bit 63 of the result to bits 63..0 of scratch2.
2483 m_assembler.asr<64>(scratch2, dest, 63);
2484 // Check that bits 31..63 of the original result were all equal.
2485 return branch64(NotEqual, scratch2, scratch1);
2486 }
2487
2488 Jump branchMul64(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest)
2489 {
2490 return branchMul64(cond, src1, src2, getCachedDataTempRegisterIDAndInvalidate(), getCachedMemoryTempRegisterIDAndInvalidate(), dest);
2491 }
2492
2493 Jump branchMul64(ResultCondition cond, RegisterID src, RegisterID dest)
2494 {
2495 return branchMul64(cond, dest, src, dest);
2496 }
2497
2498 Jump branchNeg32(ResultCondition cond, RegisterID dest)
2499 {
2500 m_assembler.neg<32, S>(dest, dest);
2501 return Jump(makeBranch(cond));
2502 }
2503
2504 Jump branchNeg64(ResultCondition cond, RegisterID srcDest)
2505 {
2506 m_assembler.neg<64, S>(srcDest, srcDest);
2507 return Jump(makeBranch(cond));
2508 }
2509
2510 Jump branchSub32(ResultCondition cond, RegisterID dest)
2511 {
2512 m_assembler.neg<32, S>(dest, dest);
2513 return Jump(makeBranch(cond));
2514 }
2515
2516 Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2517 {
2518 m_assembler.sub<32, S>(dest, op1, op2);
2519 return Jump(makeBranch(cond));
2520 }
2521
2522 Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2523 {
2524 if (isUInt12(imm.m_value)) {
2525 m_assembler.sub<32, S>(dest, op1, UInt12(imm.m_value));
2526 return Jump(makeBranch(cond));
2527 }
2528 if (isUInt12(-imm.m_value)) {
2529 m_assembler.add<32, S>(dest, op1, UInt12(-imm.m_value));
2530 return Jump(makeBranch(cond));
2531 }
2532
2533 signExtend32ToPtr(imm, getCachedDataTempRegisterIDAndInvalidate());
2534 return branchSub32(cond, op1, dataTempRegister, dest);
2535 }
2536
2537 Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest)
2538 {
2539 return branchSub32(cond, dest, src, dest);
2540 }
2541
2542 Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2543 {
2544 return branchSub32(cond, dest, imm, dest);
2545 }
2546
2547 Jump branchSub64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2548 {
2549 m_assembler.sub<64, S>(dest, op1, op2);
2550 return Jump(makeBranch(cond));
2551 }
2552
2553 Jump branchSub64(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest)
2554 {
2555 if (isUInt12(imm.m_value)) {
2556 m_assembler.sub<64, S>(dest, op1, UInt12(imm.m_value));
2557 return Jump(makeBranch(cond));
2558 }
2559 if (isUInt12(-imm.m_value)) {
2560 m_assembler.add<64, S>(dest, op1, UInt12(-imm.m_value));
2561 return Jump(makeBranch(cond));
2562 }
2563
2564 move(imm, getCachedDataTempRegisterIDAndInvalidate());
2565 return branchSub64(cond, op1, dataTempRegister, dest);
2566 }
2567
2568 Jump branchSub64(ResultCondition cond, RegisterID src, RegisterID dest)
2569 {
2570 return branchSub64(cond, dest, src, dest);
2571 }
2572
2573 Jump branchSub64(ResultCondition cond, TrustedImm32 imm, RegisterID dest)
2574 {
2575 return branchSub64(cond, dest, imm, dest);
2576 }
2577
2578
2579 // Jumps, calls, returns
2580
2581 ALWAYS_INLINE Call call()
2582 {
2583 AssemblerLabel pointerLabel = m_assembler.label();
2584 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2585 invalidateAllTempRegisters();
2586 m_assembler.blr(dataTempRegister);
2587 AssemblerLabel callLabel = m_assembler.label();
2588 ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
2589 return Call(callLabel, Call::Linkable);
2590 }
2591
2592 ALWAYS_INLINE Call call(RegisterID target)
2593 {
2594 invalidateAllTempRegisters();
2595 m_assembler.blr(target);
2596 return Call(m_assembler.label(), Call::None);
2597 }
2598
2599 ALWAYS_INLINE Call call(Address address)
2600 {
2601 load64(address, getCachedDataTempRegisterIDAndInvalidate());
2602 return call(dataTempRegister);
2603 }
2604
2605 ALWAYS_INLINE Jump jump()
2606 {
2607 AssemblerLabel label = m_assembler.label();
2608 m_assembler.b();
2609 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpNoConditionFixedSize : ARM64Assembler::JumpNoCondition);
2610 }
2611
2612 void jump(RegisterID target)
2613 {
2614 m_assembler.br(target);
2615 }
2616
2617 void jump(Address address)
2618 {
2619 load64(address, getCachedDataTempRegisterIDAndInvalidate());
2620 m_assembler.br(dataTempRegister);
2621 }
2622
2623 void jump(AbsoluteAddress address)
2624 {
2625 move(TrustedImmPtr(address.m_ptr), getCachedDataTempRegisterIDAndInvalidate());
2626 load64(Address(dataTempRegister), dataTempRegister);
2627 m_assembler.br(dataTempRegister);
2628 }
2629
2630 ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump)
2631 {
2632 oldJump.link(this);
2633 return tailRecursiveCall();
2634 }
2635
2636 ALWAYS_INLINE Call nearCall()
2637 {
2638 m_assembler.bl();
2639 return Call(m_assembler.label(), Call::LinkableNear);
2640 }
2641
2642#if 0
2643 ALWAYS_INLINE Call nearTailCall()
2644 {
2645 AssemblerLabel label = m_assembler.label();
2646 m_assembler.b();
2647 return Call(label, Call::LinkableNearTail);
2648 }
2649#endif
2650
2651 ALWAYS_INLINE void ret()
2652 {
2653 m_assembler.ret();
2654 }
2655
2656 ALWAYS_INLINE Call tailRecursiveCall()
2657 {
2658 // Like a normal call, but don't link.
2659 AssemblerLabel pointerLabel = m_assembler.label();
2660 moveWithFixedWidth(TrustedImmPtr(0), getCachedDataTempRegisterIDAndInvalidate());
2661 m_assembler.br(dataTempRegister);
2662 AssemblerLabel callLabel = m_assembler.label();
2663 ASSERT_UNUSED(pointerLabel, ARM64Assembler::getDifferenceBetweenLabels(callLabel, pointerLabel) == REPATCH_OFFSET_CALL_TO_POINTER);
2664 return Call(callLabel, Call::Linkable);
2665 }
2666
2667
2668 // Comparisons operations
2669
2670 void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
2671 {
2672 m_assembler.cmp<32>(left, right);
2673 m_assembler.cset<32>(dest, ARM64Condition(cond));
2674 }
2675
2676 void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest)
2677 {
2678 load32(left, getCachedDataTempRegisterIDAndInvalidate());
2679 m_assembler.cmp<32>(dataTempRegister, right);
2680 m_assembler.cset<32>(dest, ARM64Condition(cond));
2681 }
2682
2683 void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2684 {
2685 move(right, getCachedDataTempRegisterIDAndInvalidate());
2686 m_assembler.cmp<32>(left, dataTempRegister);
2687 m_assembler.cset<32>(dest, ARM64Condition(cond));
2688 }
2689
2690 void compare64(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest)
2691 {
2692 m_assembler.cmp<64>(left, right);
2693 m_assembler.cset<32>(dest, ARM64Condition(cond));
2694 }
2695
2696 void compare64(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest)
2697 {
2698 signExtend32ToPtr(right, getCachedDataTempRegisterIDAndInvalidate());
2699 m_assembler.cmp<64>(left, dataTempRegister);
2700 m_assembler.cset<32>(dest, ARM64Condition(cond));
2701 }
2702
2703 void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest)
2704 {
2705 load8(left, getCachedMemoryTempRegisterIDAndInvalidate());
2706 move(right, getCachedDataTempRegisterIDAndInvalidate());
2707 compare32(cond, memoryTempRegister, dataTempRegister, dest);
2708 }
2709
2710 void test32(ResultCondition cond, RegisterID src, RegisterID mask, RegisterID dest)
2711 {
2712 m_assembler.tst<32>(src, mask);
2713 m_assembler.cset<32>(dest, ARM64Condition(cond));
2714 }
2715
2716 void test32(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
2717 {
2718 if (mask.m_value == -1)
2719 m_assembler.tst<32>(src, src);
2720 else {
2721 signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2722 m_assembler.tst<32>(src, dataTempRegister);
2723 }
2724 m_assembler.cset<32>(dest, ARM64Condition(cond));
2725 }
2726
2727 void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
2728 {
2729 load32(address, getCachedMemoryTempRegisterIDAndInvalidate());
2730 test32(cond, memoryTempRegister, mask, dest);
2731 }
2732
2733 void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest)
2734 {
2735 load8(address, getCachedMemoryTempRegisterIDAndInvalidate());
2736 test32(cond, memoryTempRegister, mask, dest);
2737 }
2738
2739 void test64(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest)
2740 {
2741 m_assembler.tst<64>(op1, op2);
2742 m_assembler.cset<32>(dest, ARM64Condition(cond));
2743 }
2744
2745 void test64(ResultCondition cond, RegisterID src, TrustedImm32 mask, RegisterID dest)
2746 {
2747 if (mask.m_value == -1)
2748 m_assembler.tst<64>(src, src);
2749 else {
2750 signExtend32ToPtr(mask, getCachedDataTempRegisterIDAndInvalidate());
2751 m_assembler.tst<64>(src, dataTempRegister);
2752 }
2753 m_assembler.cset<32>(dest, ARM64Condition(cond));
2754 }
2755
2756 void setCarry(RegisterID dest)
2757 {
2758 m_assembler.cset<32>(dest, ARM64Assembler::ConditionCS);
2759 }
2760
2761 // Patchable operations
2762
2763 ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dest)
2764 {
2765 DataLabel32 label(this);
2766 moveWithFixedWidth(imm, dest);
2767 return label;
2768 }
2769
2770 ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dest)
2771 {
2772 DataLabelPtr label(this);
2773 moveWithFixedWidth(imm, dest);
2774 return label;
2775 }
2776
2777 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2778 {
2779 dataLabel = DataLabelPtr(this);
2780 moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
2781 return branch64(cond, left, dataTempRegister);
2782 }
2783
2784 ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2785 {
2786 dataLabel = DataLabelPtr(this);
2787 moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
2788 return branch64(cond, left, dataTempRegister);
2789 }
2790
2791 ALWAYS_INLINE Jump branch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
2792 {
2793 dataLabel = DataLabel32(this);
2794 moveWithPatch(initialRightValue, getCachedDataTempRegisterIDAndInvalidate());
2795 return branch32(cond, left, dataTempRegister);
2796 }
2797
2798 PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right)
2799 {
2800 m_makeJumpPatchable = true;
2801 Jump result = branch64(cond, left, TrustedImm64(right));
2802 m_makeJumpPatchable = false;
2803 return PatchableJump(result);
2804 }
2805
2806 PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1))
2807 {
2808 m_makeJumpPatchable = true;
2809 Jump result = branchTest32(cond, reg, mask);
2810 m_makeJumpPatchable = false;
2811 return PatchableJump(result);
2812 }
2813
2814 PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm)
2815 {
2816 m_makeJumpPatchable = true;
2817 Jump result = branch32(cond, reg, imm);
2818 m_makeJumpPatchable = false;
2819 return PatchableJump(result);
2820 }
2821
2822 PatchableJump patchableBranch64(RelationalCondition cond, RegisterID reg, TrustedImm64 imm)
2823 {
2824 m_makeJumpPatchable = true;
2825 Jump result = branch64(cond, reg, imm);
2826 m_makeJumpPatchable = false;
2827 return PatchableJump(result);
2828 }
2829
2830 PatchableJump patchableBranch64(RelationalCondition cond, RegisterID left, RegisterID right)
2831 {
2832 m_makeJumpPatchable = true;
2833 Jump result = branch64(cond, left, right);
2834 m_makeJumpPatchable = false;
2835 return PatchableJump(result);
2836 }
2837
2838 PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0))
2839 {
2840 m_makeJumpPatchable = true;
2841 Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue);
2842 m_makeJumpPatchable = false;
2843 return PatchableJump(result);
2844 }
2845
2846 PatchableJump patchableBranch32WithPatch(RelationalCondition cond, Address left, DataLabel32& dataLabel, TrustedImm32 initialRightValue = TrustedImm32(0))
2847 {
2848 m_makeJumpPatchable = true;
2849 Jump result = branch32WithPatch(cond, left, dataLabel, initialRightValue);
2850 m_makeJumpPatchable = false;
2851 return PatchableJump(result);
2852 }
2853
2854 PatchableJump patchableJump()
2855 {
2856 m_makeJumpPatchable = true;
2857 Jump result = jump();
2858 m_makeJumpPatchable = false;
2859 return PatchableJump(result);
2860 }
2861
2862 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address)
2863 {
2864 DataLabelPtr label(this);
2865 moveWithFixedWidth(initialValue, getCachedDataTempRegisterIDAndInvalidate());
2866 store64(dataTempRegister, address);
2867 return label;
2868 }
2869
2870 ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address)
2871 {
2872 return storePtrWithPatch(TrustedImmPtr(0), address);
2873 }
2874
2875 static void reemitInitialMoveWithPatch(void* address, void* value)
2876 {
2877 ARM64Assembler::setPointer(static_cast<int*>(address), value, dataTempRegister, true);
2878 }
2879
2880 // Miscellaneous operations:
2881
2882 void breakpoint(uint16_t imm = 0)
2883 {
2884 m_assembler.brk(imm);
2885 }
2886
2887 void nop()
2888 {
2889 m_assembler.nop();
2890 }
2891
2892 void memoryFence()
2893 {
2894 m_assembler.dmbSY();
2895 }
2896
2897
2898 // Misc helper functions.
2899
2900 // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc.
2901 static RelationalCondition invert(RelationalCondition cond)
2902 {
2903 return static_cast<RelationalCondition>(ARM64Assembler::invert(static_cast<ARM64Assembler::Condition>(cond)));
2904 }
2905
2906 static FunctionPtr readCallTarget(CodeLocationCall call)
2907 {
2908 return FunctionPtr(reinterpret_cast<void(*)()>(ARM64Assembler::readCallTarget(call.dataLocation())));
2909 }
2910
2911 static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination)
2912 {
2913 ARM64Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation());
2914 }
2915
2916 static ptrdiff_t maxJumpReplacementSize()
2917 {
2918 return ARM64Assembler::maxJumpReplacementSize();
2919 }
2920
2921 RegisterID scratchRegisterForBlinding()
2922 {
2923 // We *do not* have a scratch register for blinding.
2924 RELEASE_ASSERT_NOT_REACHED();
2925 return getCachedDataTempRegisterIDAndInvalidate();
2926 }
2927
2928 static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; }
2929 static bool canJumpReplacePatchableBranch32WithPatch() { return false; }
2930
2931 static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label)
2932 {
2933 return label.labelAtOffset(0);
2934 }
2935
2936 static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr)
2937 {
2938 UNREACHABLE_FOR_PLATFORM();
2939 return CodeLocationLabel();
2940 }
2941
2942 static CodeLocationLabel startOfPatchableBranch32WithPatchOnAddress(CodeLocationDataLabel32)
2943 {
2944 UNREACHABLE_FOR_PLATFORM();
2945 return CodeLocationLabel();
2946 }
2947
2948 static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID, void* initialValue)
2949 {
2950 reemitInitialMoveWithPatch(instructionStart.dataLocation(), initialValue);
2951 }
2952
2953 static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*)
2954 {
2955 UNREACHABLE_FOR_PLATFORM();
2956 }
2957
2958 static void revertJumpReplacementToPatchableBranch32WithPatch(CodeLocationLabel, Address, int32_t)
2959 {
2960 UNREACHABLE_FOR_PLATFORM();
2961 }
2962
2963 static void repatchCall(CodeLocationCall call, CodeLocationLabel destination)
2964 {
2965 ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
2966 }
2967
2968 static void repatchCall(CodeLocationCall call, FunctionPtr destination)
2969 {
2970 ARM64Assembler::repatchPointer(call.dataLabelPtrAtOffset(REPATCH_OFFSET_CALL_TO_POINTER).dataLocation(), destination.executableAddress());
2971 }
2972
2973#if ENABLE(MASM_PROBE)
2974 void probe(ProbeFunction, void* arg1, void* arg2);
2975#endif // ENABLE(MASM_PROBE)
2976
2977protected:
2978 ALWAYS_INLINE Jump makeBranch(ARM64Assembler::Condition cond)
2979 {
2980 m_assembler.b_cond(cond);
2981 AssemblerLabel label = m_assembler.label();
2982 m_assembler.nop();
2983 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpConditionFixedSize : ARM64Assembler::JumpCondition, cond);
2984 }
2985 ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(ARM64Condition(cond)); }
2986 ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(ARM64Condition(cond)); }
2987 ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(ARM64Condition(cond)); }
2988
2989 template <int dataSize>
2990 ALWAYS_INLINE Jump makeCompareAndBranch(ZeroCondition cond, RegisterID reg)
2991 {
2992 if (cond == IsZero)
2993 m_assembler.cbz<dataSize>(reg);
2994 else
2995 m_assembler.cbnz<dataSize>(reg);
2996 AssemblerLabel label = m_assembler.label();
2997 m_assembler.nop();
2998 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpCompareAndBranchFixedSize : ARM64Assembler::JumpCompareAndBranch, static_cast<ARM64Assembler::Condition>(cond), dataSize == 64, reg);
2999 }
3000
3001 ALWAYS_INLINE Jump makeTestBitAndBranch(RegisterID reg, unsigned bit, ZeroCondition cond)
3002 {
3003 ASSERT(bit < 64);
3004 bit &= 0x3f;
3005 if (cond == IsZero)
3006 m_assembler.tbz(reg, bit);
3007 else
3008 m_assembler.tbnz(reg, bit);
3009 AssemblerLabel label = m_assembler.label();
3010 m_assembler.nop();
3011 return Jump(label, m_makeJumpPatchable ? ARM64Assembler::JumpTestBitFixedSize : ARM64Assembler::JumpTestBit, static_cast<ARM64Assembler::Condition>(cond), bit, reg);
3012 }
3013
3014 ARM64Assembler::Condition ARM64Condition(RelationalCondition cond)
3015 {
3016 return static_cast<ARM64Assembler::Condition>(cond);
3017 }
3018
3019 ARM64Assembler::Condition ARM64Condition(ResultCondition cond)
3020 {
3021 return static_cast<ARM64Assembler::Condition>(cond);
3022 }
3023
3024 ARM64Assembler::Condition ARM64Condition(DoubleCondition cond)
3025 {
3026 return static_cast<ARM64Assembler::Condition>(cond);
3027 }
3028
3029private:
3030 ALWAYS_INLINE RegisterID getCachedDataTempRegisterIDAndInvalidate()
3031 {
3032 RELEASE_ASSERT(m_allowScratchRegister);
3033 return m_dataMemoryTempRegister.registerIDInvalidate();
3034 }
3035 ALWAYS_INLINE RegisterID getCachedMemoryTempRegisterIDAndInvalidate()
3036 {
3037 RELEASE_ASSERT(m_allowScratchRegister);
3038 return m_cachedMemoryTempRegister.registerIDInvalidate();
3039 }
3040
3041 ALWAYS_INLINE bool isInIntRange(int64_t value)
3042 {
3043 return value == ((value << 32) >> 32);
3044 }
3045
3046 template<typename ImmediateType, typename rawType>
3047 void moveInternal(ImmediateType imm, RegisterID dest)
3048 {
3049 const int dataSize = sizeof(rawType) * 8;
3050 const int numberHalfWords = dataSize / 16;
3051 rawType value = bitwise_cast<rawType>(imm.m_value);
3052 uint16_t halfword[numberHalfWords];
3053
3054 // Handle 0 and ~0 here to simplify code below
3055 if (!value) {
3056 m_assembler.movz<dataSize>(dest, 0);
3057 return;
3058 }
3059 if (!~value) {
3060 m_assembler.movn<dataSize>(dest, 0);
3061 return;
3062 }
3063
3064 LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(value)) : LogicalImmediate::create32(static_cast<uint32_t>(value));
3065
3066 if (logicalImm.isValid()) {
3067 m_assembler.movi<dataSize>(dest, logicalImm);
3068 return;
3069 }
3070
3071 // Figure out how many halfwords are 0 or FFFF, then choose movz or movn accordingly.
3072 int zeroOrNegateVote = 0;
3073 for (int i = 0; i < numberHalfWords; ++i) {
3074 halfword[i] = getHalfword(value, i);
3075 if (!halfword[i])
3076 zeroOrNegateVote++;
3077 else if (halfword[i] == 0xffff)
3078 zeroOrNegateVote--;
3079 }
3080
3081 bool needToClearRegister = true;
3082 if (zeroOrNegateVote >= 0) {
3083 for (int i = 0; i < numberHalfWords; i++) {
3084 if (halfword[i]) {
3085 if (needToClearRegister) {
3086 m_assembler.movz<dataSize>(dest, halfword[i], 16*i);
3087 needToClearRegister = false;
3088 } else
3089 m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
3090 }
3091 }
3092 } else {
3093 for (int i = 0; i < numberHalfWords; i++) {
3094 if (halfword[i] != 0xffff) {
3095 if (needToClearRegister) {
3096 m_assembler.movn<dataSize>(dest, ~halfword[i], 16*i);
3097 needToClearRegister = false;
3098 } else
3099 m_assembler.movk<dataSize>(dest, halfword[i], 16*i);
3100 }
3101 }
3102 }
3103 }
3104
3105 template<int datasize>
3106 void loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm);
3107
3108 template<int datasize>
3109 void loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm);
3110
3111 template<int datasize>
3112 void loadSignedAddressedByUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm);
3113
3114 template<int datasize>
3115 void loadSignedAddressedByUnscaledImmediate(RegisterID rt, RegisterID rn, int simm);
3116
3117 template<int datasize>
3118 void storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm);
3119
3120 template<int datasize>
3121 void storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm);
3122
3123 void moveWithFixedWidth(TrustedImm32 imm, RegisterID dest)
3124 {
3125 int32_t value = imm.m_value;
3126 m_assembler.movz<32>(dest, getHalfword(value, 0));
3127 m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
3128 }
3129
3130 void moveWithFixedWidth(TrustedImmPtr imm, RegisterID dest)
3131 {
3132 intptr_t value = reinterpret_cast<intptr_t>(imm.m_value);
3133 m_assembler.movz<64>(dest, getHalfword(value, 0));
3134 m_assembler.movk<64>(dest, getHalfword(value, 1), 16);
3135 m_assembler.movk<64>(dest, getHalfword(value, 2), 32);
3136 }
3137
3138 void signExtend32ToPtrWithFixedWidth(int32_t value, RegisterID dest)
3139 {
3140 if (value >= 0) {
3141 m_assembler.movz<32>(dest, getHalfword(value, 0));
3142 m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
3143 } else {
3144 m_assembler.movn<32>(dest, ~getHalfword(value, 0));
3145 m_assembler.movk<32>(dest, getHalfword(value, 1), 16);
3146 }
3147 }
3148
3149 template<int datasize>
3150 ALWAYS_INLINE void load(const void* address, RegisterID dest)
3151 {
3152 intptr_t currentRegisterContents;
3153 if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
3154 intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
3155 intptr_t addressDelta = addressAsInt - currentRegisterContents;
3156
3157 if (dest == memoryTempRegister)
3158 m_cachedMemoryTempRegister.invalidate();
3159
3160 if (isInIntRange(addressDelta)) {
3161 if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
3162 m_assembler.ldur<datasize>(dest, memoryTempRegister, addressDelta);
3163 return;
3164 }
3165
3166 if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
3167 m_assembler.ldr<datasize>(dest, memoryTempRegister, addressDelta);
3168 return;
3169 }
3170 }
3171
3172 if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
3173 m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
3174 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
3175 m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
3176 return;
3177 }
3178 }
3179
3180 move(TrustedImmPtr(address), memoryTempRegister);
3181 if (dest == memoryTempRegister)
3182 m_cachedMemoryTempRegister.invalidate();
3183 else
3184 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
3185 m_assembler.ldr<datasize>(dest, memoryTempRegister, ARM64Registers::zr);
3186 }
3187
3188 template<int datasize>
3189 ALWAYS_INLINE void store(RegisterID src, const void* address)
3190 {
3191 ASSERT(src != memoryTempRegister);
3192 intptr_t currentRegisterContents;
3193 if (m_cachedMemoryTempRegister.value(currentRegisterContents)) {
3194 intptr_t addressAsInt = reinterpret_cast<intptr_t>(address);
3195 intptr_t addressDelta = addressAsInt - currentRegisterContents;
3196
3197 if (isInIntRange(addressDelta)) {
3198 if (ARM64Assembler::canEncodeSImmOffset(addressDelta)) {
3199 m_assembler.stur<datasize>(src, memoryTempRegister, addressDelta);
3200 return;
3201 }
3202
3203 if (ARM64Assembler::canEncodePImmOffset<datasize>(addressDelta)) {
3204 m_assembler.str<datasize>(src, memoryTempRegister, addressDelta);
3205 return;
3206 }
3207 }
3208
3209 if ((addressAsInt & (~maskHalfWord0)) == (currentRegisterContents & (~maskHalfWord0))) {
3210 m_assembler.movk<64>(memoryTempRegister, addressAsInt & maskHalfWord0, 0);
3211 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
3212 m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
3213 return;
3214 }
3215 }
3216
3217 move(TrustedImmPtr(address), memoryTempRegister);
3218 m_cachedMemoryTempRegister.setValue(reinterpret_cast<intptr_t>(address));
3219 m_assembler.str<datasize>(src, memoryTempRegister, ARM64Registers::zr);
3220 }
3221
3222 template <int dataSize>
3223 ALWAYS_INLINE bool tryMoveUsingCacheRegisterContents(intptr_t immediate, CachedTempRegister& dest)
3224 {
3225#if 1
3226 Q_UNUSED(immediate);
3227 Q_UNUSED(dest)
3228#else
3229 intptr_t currentRegisterContents;
3230 if (dest.value(currentRegisterContents)) {
3231 if (currentRegisterContents == immediate)
3232 return true;
3233
3234 LogicalImmediate logicalImm = dataSize == 64 ? LogicalImmediate::create64(static_cast<uint64_t>(immediate)) : LogicalImmediate::create32(static_cast<uint32_t>(immediate));
3235
3236 if (logicalImm.isValid()) {
3237 m_assembler.movi<dataSize>(dest.registerIDNoInvalidate(), logicalImm);
3238 dest.setValue(immediate);
3239 return true;
3240 }
3241
3242 if ((immediate & maskUpperWord) == (currentRegisterContents & maskUpperWord)) {
3243 if ((immediate & maskHalfWord1) != (currentRegisterContents & maskHalfWord1))
3244 m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), (immediate & maskHalfWord1) >> 16, 16);
3245
3246 if ((immediate & maskHalfWord0) != (currentRegisterContents & maskHalfWord0))
3247 m_assembler.movk<dataSize>(dest.registerIDNoInvalidate(), immediate & maskHalfWord0, 0);
3248
3249 dest.setValue(immediate);
3250 return true;
3251 }
3252 }
3253#endif
3254
3255 return false;
3256 }
3257
3258 void moveToCachedReg(TrustedImm32 imm, CachedTempRegister& dest)
3259 {
3260 if (tryMoveUsingCacheRegisterContents<32>(static_cast<intptr_t>(imm.m_value), dest))
3261 return;
3262
3263 moveInternal<TrustedImm32, int32_t>(imm, dest.registerIDNoInvalidate());
3264 dest.setValue(imm.m_value);
3265 }
3266
3267 void moveToCachedReg(TrustedImmPtr imm, CachedTempRegister& dest)
3268 {
3269 if (tryMoveUsingCacheRegisterContents<64>(imm.asIntptr(), dest))
3270 return;
3271
3272 moveInternal<TrustedImmPtr, intptr_t>(imm, dest.registerIDNoInvalidate());
3273 dest.setValue(imm.asIntptr());
3274 }
3275
3276 void moveToCachedReg(TrustedImm64 imm, CachedTempRegister& dest)
3277 {
3278 if (tryMoveUsingCacheRegisterContents<64>(static_cast<intptr_t>(imm.m_value), dest))
3279 return;
3280
3281 moveInternal<TrustedImm64, int64_t>(imm, dest.registerIDNoInvalidate());
3282 dest.setValue(imm.m_value);
3283 }
3284
3285 template<int datasize>
3286 bool tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset);
3287
3288 template<int datasize>
3289 bool tryLoadSignedWithOffset(RegisterID rt, RegisterID rn, int32_t offset);
3290
3291 template<int datasize>
3292 bool tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset);
3293
3294 template<int datasize>
3295 bool tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset);
3296
3297 template<int datasize>
3298 bool tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset);
3299
3300 Jump jumpAfterFloatingPointCompare(DoubleCondition cond)
3301 {
3302 if (cond == DoubleNotEqual) {
3303 // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump.
3304 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
3305 Jump result = makeBranch(ARM64Assembler::ConditionNE);
3306 unordered.link(this);
3307 return result;
3308 }
3309 if (cond == DoubleEqualOrUnordered) {
3310 Jump unordered = makeBranch(ARM64Assembler::ConditionVS);
3311 Jump notEqual = makeBranch(ARM64Assembler::ConditionNE);
3312 unordered.link(this);
3313 // We get here if either unordered or equal.
3314 Jump result = jump();
3315 notEqual.link(this);
3316 return result;
3317 }
3318 return makeBranch(cond);
3319 }
3320
3321 template <typename, template <typename> class> friend class LinkBufferBase;
3322 template <typename> friend class BranchCompactingLinkBuffer;
3323 template <typename> friend struct BranchCompactingExecutableOffsetCalculator;
3324 void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); }
3325 int executableOffsetFor(int location) { return m_assembler.executableOffsetFor(location); }
3326
3327 static void linkCall(void* code, Call call, FunctionPtr function)
3328 {
3329 if (!call.isFlagSet(Call::Near))
3330 ARM64Assembler::linkPointer(code, call.m_label.labelAtOffset(REPATCH_OFFSET_CALL_TO_POINTER), function.value());
3331#if 0
3332 else if (call.isFlagSet(Call::Tail))
3333 ARM64Assembler::linkJump(code, call.m_label, function.value());
3334#endif
3335 else
3336 ARM64Assembler::linkCall(code, call.m_label, function.value());
3337 }
3338
3339 CachedTempRegister m_dataMemoryTempRegister;
3340 CachedTempRegister m_cachedMemoryTempRegister;
3341 bool m_makeJumpPatchable;
3342 bool m_allowScratchRegister = true;
3343};
3344
3345template<int datasize>
3346ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
3347{
3348 m_assembler.ldr<datasize>(rt, rn, pimm);
3349}
3350
3351template<int datasize>
3352ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
3353{
3354 m_assembler.ldur<datasize>(rt, rn, simm);
3355}
3356
3357template<int datasize>
3358ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
3359{
3360 loadUnsignedImmediate<datasize>(rt, rn, pimm);
3361}
3362
3363template<int datasize>
3364ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
3365{
3366 loadUnscaledImmediate<datasize>(rt, rn, simm);
3367}
3368
3369template<int datasize>
3370ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate(RegisterID rt, RegisterID rn, unsigned pimm)
3371{
3372 m_assembler.str<datasize>(rt, rn, pimm);
3373}
3374
3375template<int datasize>
3376ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate(RegisterID rt, RegisterID rn, int simm)
3377{
3378 m_assembler.stur<datasize>(rt, rn, simm);
3379}
3380
3381
3382// Extend the {load,store}{Unsigned,Unscaled}Immediate templated general register methods to cover all load/store sizes
3383template<>
3384ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
3385{
3386 m_assembler.ldrb(rt, rn, pimm);
3387}
3388
3389template<>
3390ALWAYS_INLINE void MacroAssemblerARM64::loadUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
3391{
3392 m_assembler.ldrh(rt, rn, pimm);
3393}
3394
3395template<>
3396ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
3397{
3398 m_assembler.ldrsb<64>(rt, rn, pimm);
3399}
3400
3401template<>
3402ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
3403{
3404 m_assembler.ldrsh<64>(rt, rn, pimm);
3405}
3406
3407template<>
3408ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
3409{
3410 m_assembler.ldurb(rt, rn, simm);
3411}
3412
3413template<>
3414ALWAYS_INLINE void MacroAssemblerARM64::loadUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
3415{
3416 m_assembler.ldurh(rt, rn, simm);
3417}
3418
3419template<>
3420ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
3421{
3422 m_assembler.ldursb<64>(rt, rn, simm);
3423}
3424
3425template<>
3426ALWAYS_INLINE void MacroAssemblerARM64::loadSignedAddressedByUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
3427{
3428 m_assembler.ldursh<64>(rt, rn, simm);
3429}
3430
3431template<>
3432ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<8>(RegisterID rt, RegisterID rn, unsigned pimm)
3433{
3434 m_assembler.strb(rt, rn, pimm);
3435}
3436
3437template<>
3438ALWAYS_INLINE void MacroAssemblerARM64::storeUnsignedImmediate<16>(RegisterID rt, RegisterID rn, unsigned pimm)
3439{
3440 m_assembler.strh(rt, rn, pimm);
3441}
3442
3443template<>
3444ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<8>(RegisterID rt, RegisterID rn, int simm)
3445{
3446 m_assembler.sturb(rt, rn, simm);
3447}
3448
3449template<>
3450ALWAYS_INLINE void MacroAssemblerARM64::storeUnscaledImmediate<16>(RegisterID rt, RegisterID rn, int simm)
3451{
3452 m_assembler.sturh(rt, rn, simm);
3453}
3454
3455template<int datasize>
3456ALWAYS_INLINE bool MacroAssemblerARM64::tryLoadSignedWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
3457{
3458 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
3459 loadSignedAddressedByUnscaledImmediate<datasize>(rt, rn, offset);
3460 return true;
3461 }
3462 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
3463 loadSignedAddressedByUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
3464 return true;
3465 }
3466 return false;
3467}
3468
3469template<int datasize>
3470ALWAYS_INLINE bool MacroAssemblerARM64::tryStoreWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
3471{
3472 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
3473 storeUnscaledImmediate<datasize>(rt, rn, offset);
3474 return true;
3475 }
3476 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
3477 storeUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
3478 return true;
3479 }
3480 return false;
3481}
3482
3483template<int datasize>
3484ALWAYS_INLINE bool MacroAssemblerARM64::tryStoreWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
3485{
3486 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
3487 m_assembler.stur<datasize>(rt, rn, offset);
3488 return true;
3489 }
3490 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
3491 m_assembler.str<datasize>(rt, rn, static_cast<unsigned>(offset));
3492 return true;
3493 }
3494 return false;
3495}
3496
3497
3498template<int datasize>
3499ALWAYS_INLINE bool MacroAssemblerARM64::tryLoadWithOffset(RegisterID rt, RegisterID rn, int32_t offset)
3500{
3501 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
3502 loadUnscaledImmediate<datasize>(rt, rn, offset);
3503 return true;
3504 }
3505 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
3506 loadUnsignedImmediate<datasize>(rt, rn, static_cast<unsigned>(offset));
3507 return true;
3508 }
3509 return false;
3510}
3511
3512template<int datasize>
3513ALWAYS_INLINE bool MacroAssemblerARM64::tryLoadWithOffset(FPRegisterID rt, RegisterID rn, int32_t offset)
3514 {
3515 if (ARM64Assembler::canEncodeSImmOffset(offset)) {
3516 m_assembler.ldur<datasize>(rt, rn, offset);
3517 return true;
3518 }
3519 if (ARM64Assembler::canEncodePImmOffset<datasize>(offset)) {
3520 m_assembler.ldr<datasize>(rt, rn, static_cast<unsigned>(offset));
3521 return true;
3522 }
3523 return false;
3524 }
3525
3526} // namespace JSC
3527
3528#endif // ENABLE(ASSEMBLER)
3529
3530#endif // MacroAssemblerARM64_h
3531

source code of qtdeclarative/src/3rdparty/masm/assembler/MacroAssemblerARM64.h