1 | /* |
2 | * Copyright (C) 2009, 2010 Apple Inc. All rights reserved. |
3 | * Copyright (C) 2010 University of Szeged |
4 | * |
5 | * Redistribution and use in source and binary forms, with or without |
6 | * modification, are permitted provided that the following conditions |
7 | * are met: |
8 | * 1. Redistributions of source code must retain the above copyright |
9 | * notice, this list of conditions and the following disclaimer. |
10 | * 2. Redistributions in binary form must reproduce the above copyright |
11 | * notice, this list of conditions and the following disclaimer in the |
12 | * documentation and/or other materials provided with the distribution. |
13 | * |
14 | * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY |
15 | * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR |
17 | * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR |
18 | * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, |
19 | * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, |
20 | * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR |
21 | * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY |
22 | * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
23 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
24 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
25 | */ |
26 | |
27 | #ifndef MacroAssemblerARMv7_h |
28 | #define MacroAssemblerARMv7_h |
29 | |
30 | #if ENABLE(ASSEMBLER) && CPU(ARM_THUMB2) |
31 | |
32 | #include "ARMv7Assembler.h" |
33 | #include "AbstractMacroAssembler.h" |
34 | |
35 | namespace JSC { |
36 | |
37 | class MacroAssemblerARMv7 : public AbstractMacroAssembler<ARMv7Assembler> { |
38 | protected: // the YarrJIT needs know about addressTempRegister in order to push it. |
39 | // FIXME: switch dataTempRegister & addressTempRegister, or possibly use r7? |
40 | // - dTR is likely used more than aTR, and we'll get better instruction |
41 | // encoding if it's in the low 8 registers. |
42 | static const RegisterID dataTempRegister = ARMRegisters::ip; |
43 | static const RegisterID addressTempRegister = ARMRegisters::r6; |
44 | |
45 | static const ARMRegisters::FPDoubleRegisterID fpTempRegister = ARMRegisters::d7; |
46 | inline ARMRegisters::FPSingleRegisterID fpTempRegisterAsSingle() { return ARMRegisters::asSingle(fpTempRegister); } |
47 | |
48 | public: |
49 | static const int PointerSize = 4; |
50 | |
51 | MacroAssemblerARMv7() |
52 | : m_makeJumpPatchable(false) |
53 | { |
54 | } |
55 | |
56 | typedef ARMv7Assembler::LinkRecord LinkRecord; |
57 | typedef ARMv7Assembler::JumpType JumpType; |
58 | typedef ARMv7Assembler::JumpLinkType JumpLinkType; |
59 | |
60 | static bool isCompactPtrAlignedAddressOffset(ptrdiff_t value) |
61 | { |
62 | return value >= -255 && value <= 255; |
63 | } |
64 | |
65 | Vector<LinkRecord, 0, UnsafeVectorOverflow>& jumpsToLink() { return m_assembler.jumpsToLink(); } |
66 | void* unlinkedCode() { return m_assembler.unlinkedCode(); } |
67 | bool canCompact(JumpType jumpType) { return m_assembler.canCompact(jumpType); } |
68 | JumpLinkType computeJumpType(JumpType jumpType, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(jumpType, from, to); } |
69 | JumpLinkType computeJumpType(LinkRecord& record, const uint8_t* from, const uint8_t* to) { return m_assembler.computeJumpType(record, from, to); } |
70 | void recordLinkOffsets(int32_t regionStart, int32_t regionEnd, int32_t offset) {return m_assembler.recordLinkOffsets(regionStart, regionEnd, offset); } |
71 | int jumpSizeDelta(JumpType jumpType, JumpLinkType jumpLinkType) { return m_assembler.jumpSizeDelta(jumpType, jumpLinkType); } |
72 | void link(LinkRecord& record, uint8_t* from, uint8_t* to) { return m_assembler.link(record, from, to); } |
73 | |
74 | struct ArmAddress { |
75 | enum AddressType { |
76 | HasOffset, |
77 | HasIndex, |
78 | } type; |
79 | RegisterID base; |
80 | union { |
81 | int32_t offset; |
82 | struct { |
83 | RegisterID index; |
84 | Scale scale; |
85 | }; |
86 | } u; |
87 | |
88 | explicit ArmAddress(RegisterID base, int32_t offset = 0) |
89 | : type(HasOffset) |
90 | , base(base) |
91 | { |
92 | u.offset = offset; |
93 | } |
94 | |
95 | explicit ArmAddress(RegisterID base, RegisterID index, Scale scale = TimesOne) |
96 | : type(HasIndex) |
97 | , base(base) |
98 | { |
99 | u.index = index; |
100 | u.scale = scale; |
101 | } |
102 | }; |
103 | |
104 | public: |
105 | typedef ARMRegisters::FPDoubleRegisterID FPRegisterID; |
106 | |
107 | static const Scale ScalePtr = TimesFour; |
108 | |
109 | enum RelationalCondition { |
110 | Equal = ARMv7Assembler::ConditionEQ, |
111 | NotEqual = ARMv7Assembler::ConditionNE, |
112 | Above = ARMv7Assembler::ConditionHI, |
113 | AboveOrEqual = ARMv7Assembler::ConditionHS, |
114 | Below = ARMv7Assembler::ConditionLO, |
115 | BelowOrEqual = ARMv7Assembler::ConditionLS, |
116 | GreaterThan = ARMv7Assembler::ConditionGT, |
117 | GreaterThanOrEqual = ARMv7Assembler::ConditionGE, |
118 | LessThan = ARMv7Assembler::ConditionLT, |
119 | LessThanOrEqual = ARMv7Assembler::ConditionLE |
120 | }; |
121 | |
122 | enum ResultCondition { |
123 | Overflow = ARMv7Assembler::ConditionVS, |
124 | Signed = ARMv7Assembler::ConditionMI, |
125 | Zero = ARMv7Assembler::ConditionEQ, |
126 | NonZero = ARMv7Assembler::ConditionNE |
127 | }; |
128 | |
129 | enum DoubleCondition { |
130 | // These conditions will only evaluate to true if the comparison is ordered - i.e. neither operand is NaN. |
131 | DoubleEqual = ARMv7Assembler::ConditionEQ, |
132 | DoubleNotEqual = ARMv7Assembler::ConditionVC, // Not the right flag! check for this & handle differently. |
133 | DoubleGreaterThan = ARMv7Assembler::ConditionGT, |
134 | DoubleGreaterThanOrEqual = ARMv7Assembler::ConditionGE, |
135 | DoubleLessThan = ARMv7Assembler::ConditionLO, |
136 | DoubleLessThanOrEqual = ARMv7Assembler::ConditionLS, |
137 | // If either operand is NaN, these conditions always evaluate to true. |
138 | DoubleEqualOrUnordered = ARMv7Assembler::ConditionVS, // Not the right flag! check for this & handle differently. |
139 | DoubleNotEqualOrUnordered = ARMv7Assembler::ConditionNE, |
140 | DoubleGreaterThanOrUnordered = ARMv7Assembler::ConditionHI, |
141 | DoubleGreaterThanOrEqualOrUnordered = ARMv7Assembler::ConditionHS, |
142 | DoubleLessThanOrUnordered = ARMv7Assembler::ConditionLT, |
143 | DoubleLessThanOrEqualOrUnordered = ARMv7Assembler::ConditionLE, |
144 | }; |
145 | |
146 | static const RegisterID stackPointerRegister = ARMRegisters::sp; |
147 | static const RegisterID linkRegister = ARMRegisters::lr; |
148 | |
149 | // Integer arithmetic operations: |
150 | // |
151 | // Operations are typically two operand - operation(source, srcDst) |
152 | // For many operations the source may be an TrustedImm32, the srcDst operand |
153 | // may often be a memory location (explictly described using an Address |
154 | // object). |
155 | |
156 | void add32(RegisterID src, RegisterID dest) |
157 | { |
158 | m_assembler.add(dest, dest, src); |
159 | } |
160 | |
161 | void add32(TrustedImm32 imm, RegisterID dest) |
162 | { |
163 | add32(imm, dest, dest); |
164 | } |
165 | |
166 | void add32(AbsoluteAddress src, RegisterID dest) |
167 | { |
168 | load32(src.m_ptr, dataTempRegister); |
169 | add32(dataTempRegister, dest); |
170 | } |
171 | |
172 | void add32(TrustedImm32 imm, RegisterID src, RegisterID dest) |
173 | { |
174 | ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); |
175 | if (armImm.isValid()) |
176 | m_assembler.add(dest, src, armImm); |
177 | else { |
178 | move(imm, dataTempRegister); |
179 | m_assembler.add(dest, src, dataTempRegister); |
180 | } |
181 | } |
182 | |
183 | void add32(RegisterID op1, RegisterID op2, RegisterID dest) |
184 | { |
185 | m_assembler.add(dest, op1, op2); |
186 | } |
187 | |
188 | void add32(TrustedImm32 imm, Address address) |
189 | { |
190 | load32(address, dataTempRegister); |
191 | |
192 | ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); |
193 | if (armImm.isValid()) |
194 | m_assembler.add(dataTempRegister, dataTempRegister, armImm); |
195 | else { |
196 | // Hrrrm, since dataTempRegister holds the data loaded, |
197 | // use addressTempRegister to hold the immediate. |
198 | move(imm, addressTempRegister); |
199 | m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister); |
200 | } |
201 | |
202 | store32(dataTempRegister, address); |
203 | } |
204 | |
205 | void add32(Address src, RegisterID dest) |
206 | { |
207 | load32(src, dataTempRegister); |
208 | add32(dataTempRegister, dest); |
209 | } |
210 | |
211 | void add32(TrustedImm32 imm, AbsoluteAddress address) |
212 | { |
213 | load32(address.m_ptr, dataTempRegister); |
214 | |
215 | ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); |
216 | if (armImm.isValid()) |
217 | m_assembler.add(dataTempRegister, dataTempRegister, armImm); |
218 | else { |
219 | // Hrrrm, since dataTempRegister holds the data loaded, |
220 | // use addressTempRegister to hold the immediate. |
221 | move(imm, addressTempRegister); |
222 | m_assembler.add(dataTempRegister, dataTempRegister, addressTempRegister); |
223 | } |
224 | |
225 | store32(dataTempRegister, address.m_ptr); |
226 | } |
227 | |
228 | void getEffectiveAddress(BaseIndex address, RegisterID dest) |
229 | { |
230 | m_assembler.lsl(addressTempRegister, address.index, static_cast<int>(address.scale)); |
231 | m_assembler.add(dest, address.base, addressTempRegister); |
232 | if (address.offset) |
233 | add32(TrustedImm32(address.offset), dest); |
234 | } |
235 | |
236 | void add64(TrustedImm32 imm, AbsoluteAddress address) |
237 | { |
238 | move(TrustedImmPtr(address.m_ptr), addressTempRegister); |
239 | |
240 | m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0)); |
241 | ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); |
242 | if (armImm.isValid()) |
243 | m_assembler.add_S(dataTempRegister, dataTempRegister, armImm); |
244 | else { |
245 | move(imm, addressTempRegister); |
246 | m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister); |
247 | move(TrustedImmPtr(address.m_ptr), addressTempRegister); |
248 | } |
249 | m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(0)); |
250 | |
251 | m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4)); |
252 | m_assembler.adc(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(imm.m_value >> 31)); |
253 | m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt12(4)); |
254 | } |
255 | |
256 | void and32(RegisterID op1, RegisterID op2, RegisterID dest) |
257 | { |
258 | m_assembler.ARM_and(dest, op1, op2); |
259 | } |
260 | |
261 | void and32(TrustedImm32 imm, RegisterID src, RegisterID dest) |
262 | { |
263 | ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); |
264 | if (armImm.isValid()) |
265 | m_assembler.ARM_and(dest, src, armImm); |
266 | else { |
267 | move(imm, dataTempRegister); |
268 | m_assembler.ARM_and(dest, src, dataTempRegister); |
269 | } |
270 | } |
271 | |
272 | void and32(RegisterID src, RegisterID dest) |
273 | { |
274 | and32(dest, src, dest); |
275 | } |
276 | |
277 | void and32(TrustedImm32 imm, RegisterID dest) |
278 | { |
279 | and32(imm, dest, dest); |
280 | } |
281 | |
282 | void and32(Address src, RegisterID dest) |
283 | { |
284 | load32(src, dataTempRegister); |
285 | and32(dataTempRegister, dest); |
286 | } |
287 | |
288 | void countLeadingZeros32(RegisterID src, RegisterID dest) |
289 | { |
290 | m_assembler.clz(dest, src); |
291 | } |
292 | |
293 | void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) |
294 | { |
295 | // Clamp the shift to the range 0..31 |
296 | ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f); |
297 | ASSERT(armImm.isValid()); |
298 | m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm); |
299 | |
300 | m_assembler.lsl(dest, src, dataTempRegister); |
301 | } |
302 | |
303 | void lshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) |
304 | { |
305 | m_assembler.lsl(dest, src, imm.m_value & 0x1f); |
306 | } |
307 | |
308 | void lshift32(RegisterID shiftAmount, RegisterID dest) |
309 | { |
310 | lshift32(dest, shiftAmount, dest); |
311 | } |
312 | |
313 | void lshift32(TrustedImm32 imm, RegisterID dest) |
314 | { |
315 | lshift32(dest, imm, dest); |
316 | } |
317 | |
318 | void mul32(RegisterID src, RegisterID dest) |
319 | { |
320 | m_assembler.smull(dest, dataTempRegister, dest, src); |
321 | } |
322 | |
323 | void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest) |
324 | { |
325 | move(imm, dataTempRegister); |
326 | m_assembler.smull(dest, dataTempRegister, src, dataTempRegister); |
327 | } |
328 | |
329 | void mul32(RegisterID op1, RegisterID op2, RegisterID dest) |
330 | { |
331 | m_assembler.smull(dest, dataTempRegister, op1, op2); |
332 | } |
333 | |
334 | void mul32(Address src, RegisterID dest) |
335 | { |
336 | load32(src, dataTempRegister); |
337 | mul32(dataTempRegister, dest); |
338 | } |
339 | |
340 | void neg32(RegisterID srcDest) |
341 | { |
342 | m_assembler.neg(srcDest, srcDest); |
343 | } |
344 | |
345 | void or32(RegisterID src, RegisterID dest) |
346 | { |
347 | m_assembler.orr(dest, dest, src); |
348 | } |
349 | |
350 | void or32(Address src, RegisterID dest) |
351 | { |
352 | load32(src, dataTempRegister); |
353 | or32(dataTempRegister, dest); |
354 | } |
355 | |
356 | void or32(RegisterID src, AbsoluteAddress dest) |
357 | { |
358 | move(TrustedImmPtr(dest.m_ptr), addressTempRegister); |
359 | load32(addressTempRegister, dataTempRegister); |
360 | or32(src, dataTempRegister); |
361 | store32(dataTempRegister, addressTempRegister); |
362 | } |
363 | |
364 | void or32(TrustedImm32 imm, RegisterID dest) |
365 | { |
366 | or32(imm, dest, dest); |
367 | } |
368 | |
369 | void or32(RegisterID op1, RegisterID op2, RegisterID dest) |
370 | { |
371 | m_assembler.orr(dest, op1, op2); |
372 | } |
373 | |
374 | void or32(TrustedImm32 imm, RegisterID src, RegisterID dest) |
375 | { |
376 | ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); |
377 | if (armImm.isValid()) |
378 | m_assembler.orr(dest, src, armImm); |
379 | else { |
380 | move(imm, dataTempRegister); |
381 | m_assembler.orr(dest, src, dataTempRegister); |
382 | } |
383 | } |
384 | |
385 | void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) |
386 | { |
387 | // Clamp the shift to the range 0..31 |
388 | ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f); |
389 | ASSERT(armImm.isValid()); |
390 | m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm); |
391 | |
392 | m_assembler.asr(dest, src, dataTempRegister); |
393 | } |
394 | |
395 | void rshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) |
396 | { |
397 | m_assembler.asr(dest, src, imm.m_value & 0x1f); |
398 | } |
399 | |
400 | void rshift32(RegisterID shiftAmount, RegisterID dest) |
401 | { |
402 | rshift32(dest, shiftAmount, dest); |
403 | } |
404 | |
405 | void rshift32(TrustedImm32 imm, RegisterID dest) |
406 | { |
407 | rshift32(dest, imm, dest); |
408 | } |
409 | |
410 | void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) |
411 | { |
412 | // Clamp the shift to the range 0..31 |
413 | ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(0x1f); |
414 | ASSERT(armImm.isValid()); |
415 | m_assembler.ARM_and(dataTempRegister, shiftAmount, armImm); |
416 | |
417 | m_assembler.lsr(dest, src, dataTempRegister); |
418 | } |
419 | |
420 | void urshift32(RegisterID src, TrustedImm32 imm, RegisterID dest) |
421 | { |
422 | m_assembler.lsr(dest, src, imm.m_value & 0x1f); |
423 | } |
424 | |
425 | void urshift32(RegisterID shiftAmount, RegisterID dest) |
426 | { |
427 | urshift32(dest, shiftAmount, dest); |
428 | } |
429 | |
430 | void urshift32(TrustedImm32 imm, RegisterID dest) |
431 | { |
432 | urshift32(dest, imm, dest); |
433 | } |
434 | |
435 | void sub32(RegisterID src, RegisterID dest) |
436 | { |
437 | m_assembler.sub(dest, dest, src); |
438 | } |
439 | |
440 | void sub32(TrustedImm32 imm, RegisterID dest) |
441 | { |
442 | ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); |
443 | if (armImm.isValid()) |
444 | m_assembler.sub(dest, dest, armImm); |
445 | else { |
446 | move(imm, dataTempRegister); |
447 | m_assembler.sub(dest, dest, dataTempRegister); |
448 | } |
449 | } |
450 | |
451 | void sub32(TrustedImm32 imm, Address address) |
452 | { |
453 | load32(address, dataTempRegister); |
454 | |
455 | ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); |
456 | if (armImm.isValid()) |
457 | m_assembler.sub(dataTempRegister, dataTempRegister, armImm); |
458 | else { |
459 | // Hrrrm, since dataTempRegister holds the data loaded, |
460 | // use addressTempRegister to hold the immediate. |
461 | move(imm, addressTempRegister); |
462 | m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister); |
463 | } |
464 | |
465 | store32(dataTempRegister, address); |
466 | } |
467 | |
468 | void sub32(Address src, RegisterID dest) |
469 | { |
470 | load32(src, dataTempRegister); |
471 | sub32(dataTempRegister, dest); |
472 | } |
473 | |
474 | void sub32(TrustedImm32 imm, AbsoluteAddress address) |
475 | { |
476 | load32(address.m_ptr, dataTempRegister); |
477 | |
478 | ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12OrEncodedImm(imm.m_value); |
479 | if (armImm.isValid()) |
480 | m_assembler.sub(dataTempRegister, dataTempRegister, armImm); |
481 | else { |
482 | // Hrrrm, since dataTempRegister holds the data loaded, |
483 | // use addressTempRegister to hold the immediate. |
484 | move(imm, addressTempRegister); |
485 | m_assembler.sub(dataTempRegister, dataTempRegister, addressTempRegister); |
486 | } |
487 | |
488 | store32(dataTempRegister, address.m_ptr); |
489 | } |
490 | |
491 | void xor32(Address src, RegisterID dest) |
492 | { |
493 | load32(src, dataTempRegister); |
494 | xor32(dataTempRegister, dest); |
495 | } |
496 | |
497 | void xor32(RegisterID op1, RegisterID op2, RegisterID dest) |
498 | { |
499 | m_assembler.eor(dest, op1, op2); |
500 | } |
501 | |
502 | void xor32(TrustedImm32 imm, RegisterID src, RegisterID dest) |
503 | { |
504 | if (imm.m_value == -1) { |
505 | m_assembler.mvn(dest, src); |
506 | return; |
507 | } |
508 | |
509 | ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); |
510 | if (armImm.isValid()) |
511 | m_assembler.eor(dest, src, armImm); |
512 | else { |
513 | move(imm, dataTempRegister); |
514 | m_assembler.eor(dest, src, dataTempRegister); |
515 | } |
516 | } |
517 | |
518 | void xor32(RegisterID src, RegisterID dest) |
519 | { |
520 | xor32(dest, src, dest); |
521 | } |
522 | |
523 | void xor32(TrustedImm32 imm, RegisterID dest) |
524 | { |
525 | if (imm.m_value == -1) |
526 | m_assembler.mvn(dest, dest); |
527 | else |
528 | xor32(imm, dest, dest); |
529 | } |
530 | |
531 | |
532 | // Memory access operations: |
533 | // |
534 | // Loads are of the form load(address, destination) and stores of the form |
535 | // store(source, address). The source for a store may be an TrustedImm32. Address |
536 | // operand objects to loads and store will be implicitly constructed if a |
537 | // register is passed. |
538 | |
539 | // internal function, but public because of "using load32;" in template sub-classes to pull |
540 | // in the other public overloads. |
541 | void load32(ArmAddress address, RegisterID dest) |
542 | { |
543 | if (address.type == ArmAddress::HasIndex) |
544 | m_assembler.ldr(dest, address.base, address.u.index, address.u.scale); |
545 | else if (address.u.offset >= 0) { |
546 | ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); |
547 | ASSERT(armImm.isValid()); |
548 | m_assembler.ldr(dest, address.base, armImm); |
549 | } else { |
550 | ASSERT(address.u.offset >= -255); |
551 | m_assembler.ldr(dest, address.base, address.u.offset, true, false); |
552 | } |
553 | } |
554 | |
555 | private: |
556 | void load16(ArmAddress address, RegisterID dest) |
557 | { |
558 | if (address.type == ArmAddress::HasIndex) |
559 | m_assembler.ldrh(dest, address.base, address.u.index, address.u.scale); |
560 | else if (address.u.offset >= 0) { |
561 | ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); |
562 | ASSERT(armImm.isValid()); |
563 | m_assembler.ldrh(dest, address.base, armImm); |
564 | } else { |
565 | ASSERT(address.u.offset >= -255); |
566 | m_assembler.ldrh(dest, address.base, address.u.offset, true, false); |
567 | } |
568 | } |
569 | |
570 | void load16Signed(ArmAddress address, RegisterID dest) |
571 | { |
572 | ASSERT(address.type == ArmAddress::HasIndex); |
573 | m_assembler.ldrsh(dest, address.base, address.u.index, address.u.scale); |
574 | } |
575 | |
576 | void load8(ArmAddress address, RegisterID dest) |
577 | { |
578 | if (address.type == ArmAddress::HasIndex) |
579 | m_assembler.ldrb(dest, address.base, address.u.index, address.u.scale); |
580 | else if (address.u.offset >= 0) { |
581 | ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); |
582 | ASSERT(armImm.isValid()); |
583 | m_assembler.ldrb(dest, address.base, armImm); |
584 | } else { |
585 | ASSERT(address.u.offset >= -255); |
586 | m_assembler.ldrb(dest, address.base, address.u.offset, true, false); |
587 | } |
588 | } |
589 | |
590 | void load8Signed(ArmAddress address, RegisterID dest) |
591 | { |
592 | ASSERT(address.type == ArmAddress::HasIndex); |
593 | m_assembler.ldrsb(dest, address.base, address.u.index, address.u.scale); |
594 | } |
595 | |
596 | protected: |
597 | void store32(RegisterID src, ArmAddress address) |
598 | { |
599 | if (address.type == ArmAddress::HasIndex) |
600 | m_assembler.str(src, address.base, address.u.index, address.u.scale); |
601 | else if (address.u.offset >= 0) { |
602 | ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); |
603 | ASSERT(armImm.isValid()); |
604 | m_assembler.str(src, address.base, armImm); |
605 | } else { |
606 | ASSERT(address.u.offset >= -255); |
607 | m_assembler.str(src, address.base, address.u.offset, true, false); |
608 | } |
609 | } |
610 | |
611 | private: |
612 | void store8(RegisterID src, ArmAddress address) |
613 | { |
614 | if (address.type == ArmAddress::HasIndex) |
615 | m_assembler.strb(src, address.base, address.u.index, address.u.scale); |
616 | else if (address.u.offset >= 0) { |
617 | ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); |
618 | ASSERT(armImm.isValid()); |
619 | m_assembler.strb(src, address.base, armImm); |
620 | } else { |
621 | ASSERT(address.u.offset >= -255); |
622 | m_assembler.strb(src, address.base, address.u.offset, true, false); |
623 | } |
624 | } |
625 | |
626 | void store16(RegisterID src, ArmAddress address) |
627 | { |
628 | if (address.type == ArmAddress::HasIndex) |
629 | m_assembler.strh(src, address.base, address.u.index, address.u.scale); |
630 | else if (address.u.offset >= 0) { |
631 | ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.u.offset); |
632 | ASSERT(armImm.isValid()); |
633 | m_assembler.strh(src, address.base, armImm); |
634 | } else { |
635 | ASSERT(address.u.offset >= -255); |
636 | m_assembler.strh(src, address.base, address.u.offset, true, false); |
637 | } |
638 | } |
639 | |
640 | public: |
641 | void load32(ImplicitAddress address, RegisterID dest) |
642 | { |
643 | load32(setupArmAddress(address), dest); |
644 | } |
645 | |
646 | void load32(BaseIndex address, RegisterID dest) |
647 | { |
648 | load32(setupArmAddress(address), dest); |
649 | } |
650 | |
651 | void load32WithUnalignedHalfWords(BaseIndex address, RegisterID dest) |
652 | { |
653 | load32(setupArmAddress(address), dest); |
654 | } |
655 | |
656 | void load16Unaligned(ImplicitAddress address, RegisterID dest) |
657 | { |
658 | load16(setupArmAddress(address), dest); |
659 | } |
660 | |
661 | void load16Unaligned(BaseIndex address, RegisterID dest) |
662 | { |
663 | load16(setupArmAddress(address), dest); |
664 | } |
665 | |
666 | void load32(const void* address, RegisterID dest) |
667 | { |
668 | move(TrustedImmPtr(address), addressTempRegister); |
669 | m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); |
670 | } |
671 | |
672 | ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) |
673 | { |
674 | ConvertibleLoadLabel result(this); |
675 | ASSERT(address.offset >= 0 && address.offset <= 255); |
676 | m_assembler.ldrWide8BitImmediate(dest, address.base, address.offset); |
677 | return result; |
678 | } |
679 | |
680 | void load8(ImplicitAddress address, RegisterID dest) |
681 | { |
682 | load8(setupArmAddress(address), dest); |
683 | } |
684 | |
685 | void load8Signed(ImplicitAddress, RegisterID) |
686 | { |
687 | UNREACHABLE_FOR_PLATFORM(); |
688 | } |
689 | |
690 | void load8(BaseIndex address, RegisterID dest) |
691 | { |
692 | load8(setupArmAddress(address), dest); |
693 | } |
694 | |
695 | void load8Signed(BaseIndex address, RegisterID dest) |
696 | { |
697 | load8Signed(setupArmAddress(address), dest); |
698 | } |
699 | |
700 | DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) |
701 | { |
702 | DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister); |
703 | load32(ArmAddress(address.base, dataTempRegister), dest); |
704 | return label; |
705 | } |
706 | |
707 | DataLabelCompact load32WithCompactAddressOffsetPatch(Address address, RegisterID dest) |
708 | { |
709 | padBeforePatch(); |
710 | |
711 | RegisterID base = address.base; |
712 | |
713 | DataLabelCompact label(this); |
714 | ASSERT(isCompactPtrAlignedAddressOffset(address.offset)); |
715 | |
716 | m_assembler.ldr(dest, base, address.offset, true, false); |
717 | return label; |
718 | } |
719 | |
720 | void load16(BaseIndex address, RegisterID dest) |
721 | { |
722 | m_assembler.ldrh(dest, makeBaseIndexBase(address), address.index, address.scale); |
723 | } |
724 | |
725 | void load16Signed(BaseIndex address, RegisterID dest) |
726 | { |
727 | load16Signed(setupArmAddress(address), dest); |
728 | } |
729 | |
730 | void load16(ImplicitAddress address, RegisterID dest) |
731 | { |
732 | ARMThumbImmediate armImm = ARMThumbImmediate::makeUInt12(address.offset); |
733 | if (armImm.isValid()) |
734 | m_assembler.ldrh(dest, address.base, armImm); |
735 | else { |
736 | move(TrustedImm32(address.offset), dataTempRegister); |
737 | m_assembler.ldrh(dest, address.base, dataTempRegister); |
738 | } |
739 | } |
740 | |
741 | void load16Signed(ImplicitAddress, RegisterID) |
742 | { |
743 | UNREACHABLE_FOR_PLATFORM(); |
744 | } |
745 | |
746 | DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) |
747 | { |
748 | DataLabel32 label = moveWithPatch(TrustedImm32(address.offset), dataTempRegister); |
749 | store32(src, ArmAddress(address.base, dataTempRegister)); |
750 | return label; |
751 | } |
752 | |
753 | void store32(RegisterID src, ImplicitAddress address) |
754 | { |
755 | store32(src, setupArmAddress(address)); |
756 | } |
757 | |
758 | void store32(RegisterID src, BaseIndex address) |
759 | { |
760 | store32(src, setupArmAddress(address)); |
761 | } |
762 | |
763 | void store32(TrustedImm32 imm, ImplicitAddress address) |
764 | { |
765 | move(imm, dataTempRegister); |
766 | store32(dataTempRegister, setupArmAddress(address)); |
767 | } |
768 | |
769 | void store32(TrustedImm32 imm, BaseIndex address) |
770 | { |
771 | move(imm, dataTempRegister); |
772 | store32(dataTempRegister, setupArmAddress(address)); |
773 | } |
774 | |
775 | void store32(RegisterID src, const void* address) |
776 | { |
777 | move(TrustedImmPtr(address), addressTempRegister); |
778 | m_assembler.str(src, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); |
779 | } |
780 | |
781 | void store32(TrustedImm32 imm, const void* address) |
782 | { |
783 | move(imm, dataTempRegister); |
784 | store32(dataTempRegister, address); |
785 | } |
786 | |
787 | void store8(RegisterID src, BaseIndex address) |
788 | { |
789 | store8(src, setupArmAddress(address)); |
790 | } |
791 | |
792 | void store8(RegisterID src, Address address) |
793 | { |
794 | store8(src, setupArmAddress(address)); |
795 | } |
796 | |
797 | void store8(TrustedImm32 imm, Address address) |
798 | { |
799 | move(imm, dataTempRegister); |
800 | store8(dataTempRegister, address); |
801 | } |
802 | |
803 | void store8(RegisterID src, void* address) |
804 | { |
805 | move(TrustedImmPtr(address), addressTempRegister); |
806 | store8(src, ArmAddress(addressTempRegister, 0)); |
807 | } |
808 | |
809 | void store8(TrustedImm32 imm, void* address) |
810 | { |
811 | move(imm, dataTempRegister); |
812 | store8(dataTempRegister, address); |
813 | } |
814 | |
815 | void store16(RegisterID src, BaseIndex address) |
816 | { |
817 | store16(src, setupArmAddress(address)); |
818 | } |
819 | |
820 | // Possibly clobbers src, but not on this architecture. |
821 | void moveDoubleToInts(FPRegisterID src, RegisterID dest1, RegisterID dest2) |
822 | { |
823 | m_assembler.vmov(dest1, dest2, src); |
824 | } |
825 | |
826 | void moveIntsToDouble(RegisterID src1, RegisterID src2, FPRegisterID dest, FPRegisterID scratch) |
827 | { |
828 | UNUSED_PARAM(scratch); |
829 | m_assembler.vmov(dest, src1, src2); |
830 | } |
831 | |
832 | #if ENABLE(JIT_CONSTANT_BLINDING) |
833 | static bool shouldBlindForSpecificArch(uint32_t value) |
834 | { |
835 | ARMThumbImmediate immediate = ARMThumbImmediate::makeEncodedImm(value); |
836 | |
837 | // Couldn't be encoded as an immediate, so assume it's untrusted. |
838 | if (!immediate.isValid()) |
839 | return true; |
840 | |
841 | // If we can encode the immediate, we have less than 16 attacker |
842 | // controlled bits. |
843 | if (immediate.isEncodedImm()) |
844 | return false; |
845 | |
846 | // Don't let any more than 12 bits of an instruction word |
847 | // be controlled by an attacker. |
848 | return !immediate.isUInt12(); |
849 | } |
850 | #endif |
851 | |
852 | // Floating-point operations: |
853 | |
854 | static bool supportsFloatingPoint() { return true; } |
855 | static bool supportsFloatingPointTruncate() { return true; } |
856 | static bool supportsFloatingPointSqrt() { return true; } |
857 | static bool supportsFloatingPointAbs() { return true; } |
858 | |
859 | void loadDouble(ImplicitAddress address, FPRegisterID dest) |
860 | { |
861 | RegisterID base = address.base; |
862 | int32_t offset = address.offset; |
863 | |
864 | // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2. |
865 | if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) { |
866 | add32(TrustedImm32(offset), base, addressTempRegister); |
867 | base = addressTempRegister; |
868 | offset = 0; |
869 | } |
870 | |
871 | m_assembler.vldr(dest, base, offset); |
872 | } |
873 | |
874 | void loadFloat(ImplicitAddress address, FPRegisterID dest) |
875 | { |
876 | RegisterID base = address.base; |
877 | int32_t offset = address.offset; |
878 | |
879 | // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2. |
880 | if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) { |
881 | add32(TrustedImm32(offset), base, addressTempRegister); |
882 | base = addressTempRegister; |
883 | offset = 0; |
884 | } |
885 | |
886 | m_assembler.flds(ARMRegisters::asSingle(dest), base, offset); |
887 | } |
888 | |
889 | void loadDouble(BaseIndex address, FPRegisterID dest) |
890 | { |
891 | move(address.index, addressTempRegister); |
892 | lshift32(TrustedImm32(address.scale), addressTempRegister); |
893 | add32(address.base, addressTempRegister); |
894 | loadDouble(Address(addressTempRegister, address.offset), dest); |
895 | } |
896 | |
897 | void loadFloat(BaseIndex address, FPRegisterID dest) |
898 | { |
899 | move(address.index, addressTempRegister); |
900 | lshift32(TrustedImm32(address.scale), addressTempRegister); |
901 | add32(address.base, addressTempRegister); |
902 | loadFloat(Address(addressTempRegister, address.offset), dest); |
903 | } |
904 | |
905 | void moveDouble(FPRegisterID src, FPRegisterID dest) |
906 | { |
907 | if (src != dest) |
908 | m_assembler.vmov(dest, src); |
909 | } |
910 | |
911 | void loadDouble(const void* address, FPRegisterID dest) |
912 | { |
913 | move(TrustedImmPtr(address), addressTempRegister); |
914 | m_assembler.vldr(dest, addressTempRegister, 0); |
915 | } |
916 | |
917 | void storeDouble(FPRegisterID src, ImplicitAddress address) |
918 | { |
919 | RegisterID base = address.base; |
920 | int32_t offset = address.offset; |
921 | |
922 | // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2. |
923 | if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) { |
924 | add32(TrustedImm32(offset), base, addressTempRegister); |
925 | base = addressTempRegister; |
926 | offset = 0; |
927 | } |
928 | |
929 | m_assembler.vstr(src, base, offset); |
930 | } |
931 | |
932 | void storeFloat(FPRegisterID src, ImplicitAddress address) |
933 | { |
934 | RegisterID base = address.base; |
935 | int32_t offset = address.offset; |
936 | |
937 | // Arm vfp addresses can be offset by a 9-bit ones-comp immediate, left shifted by 2. |
938 | if ((offset & 3) || (offset > (255 * 4)) || (offset < -(255 * 4))) { |
939 | add32(TrustedImm32(offset), base, addressTempRegister); |
940 | base = addressTempRegister; |
941 | offset = 0; |
942 | } |
943 | |
944 | m_assembler.fsts(ARMRegisters::asSingle(src), base, offset); |
945 | } |
946 | |
947 | void storeDouble(FPRegisterID src, const void* address) |
948 | { |
949 | move(TrustedImmPtr(address), addressTempRegister); |
950 | storeDouble(src, addressTempRegister); |
951 | } |
952 | |
953 | void storeDouble(FPRegisterID src, BaseIndex address) |
954 | { |
955 | move(address.index, addressTempRegister); |
956 | lshift32(TrustedImm32(address.scale), addressTempRegister); |
957 | add32(address.base, addressTempRegister); |
958 | storeDouble(src, Address(addressTempRegister, address.offset)); |
959 | } |
960 | |
961 | void storeFloat(FPRegisterID src, BaseIndex address) |
962 | { |
963 | move(address.index, addressTempRegister); |
964 | lshift32(TrustedImm32(address.scale), addressTempRegister); |
965 | add32(address.base, addressTempRegister); |
966 | storeFloat(src, Address(addressTempRegister, address.offset)); |
967 | } |
968 | |
969 | void addDouble(FPRegisterID src, FPRegisterID dest) |
970 | { |
971 | m_assembler.vadd(dest, dest, src); |
972 | } |
973 | |
974 | void addDouble(Address src, FPRegisterID dest) |
975 | { |
976 | loadDouble(src, fpTempRegister); |
977 | addDouble(fpTempRegister, dest); |
978 | } |
979 | |
980 | void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) |
981 | { |
982 | m_assembler.vadd(dest, op1, op2); |
983 | } |
984 | |
985 | void addDouble(AbsoluteAddress address, FPRegisterID dest) |
986 | { |
987 | loadDouble(address.m_ptr, fpTempRegister); |
988 | m_assembler.vadd(dest, dest, fpTempRegister); |
989 | } |
990 | |
991 | void divDouble(FPRegisterID src, FPRegisterID dest) |
992 | { |
993 | m_assembler.vdiv(dest, dest, src); |
994 | } |
995 | |
996 | void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) |
997 | { |
998 | m_assembler.vdiv(dest, op1, op2); |
999 | } |
1000 | |
1001 | void divDouble(Address src, FPRegisterID dest) |
1002 | { |
1003 | loadDouble(src, fpTempRegister); |
1004 | divDouble(fpTempRegister, dest); |
1005 | } |
1006 | |
1007 | void subDouble(FPRegisterID src, FPRegisterID dest) |
1008 | { |
1009 | m_assembler.vsub(dest, dest, src); |
1010 | } |
1011 | |
1012 | void subDouble(Address src, FPRegisterID dest) |
1013 | { |
1014 | loadDouble(src, fpTempRegister); |
1015 | subDouble(fpTempRegister, dest); |
1016 | } |
1017 | |
1018 | void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) |
1019 | { |
1020 | m_assembler.vsub(dest, op1, op2); |
1021 | } |
1022 | |
1023 | void mulDouble(FPRegisterID src, FPRegisterID dest) |
1024 | { |
1025 | m_assembler.vmul(dest, dest, src); |
1026 | } |
1027 | |
1028 | void mulDouble(Address src, FPRegisterID dest) |
1029 | { |
1030 | loadDouble(src, fpTempRegister); |
1031 | mulDouble(fpTempRegister, dest); |
1032 | } |
1033 | |
1034 | void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) |
1035 | { |
1036 | m_assembler.vmul(dest, op1, op2); |
1037 | } |
1038 | |
1039 | void sqrtDouble(FPRegisterID src, FPRegisterID dest) |
1040 | { |
1041 | m_assembler.vsqrt(dest, src); |
1042 | } |
1043 | |
1044 | void absDouble(FPRegisterID src, FPRegisterID dest) |
1045 | { |
1046 | m_assembler.vabs(dest, src); |
1047 | } |
1048 | |
1049 | void negateDouble(FPRegisterID src, FPRegisterID dest) |
1050 | { |
1051 | m_assembler.vneg(dest, src); |
1052 | } |
1053 | |
1054 | void convertInt32ToDouble(RegisterID src, FPRegisterID dest) |
1055 | { |
1056 | m_assembler.vmov(fpTempRegister, src, src); |
1057 | m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle()); |
1058 | } |
1059 | |
1060 | void convertInt32ToDouble(Address address, FPRegisterID dest) |
1061 | { |
1062 | // Fixme: load directly into the fpr! |
1063 | load32(address, dataTempRegister); |
1064 | m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister); |
1065 | m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle()); |
1066 | } |
1067 | |
1068 | void convertInt32ToDouble(AbsoluteAddress address, FPRegisterID dest) |
1069 | { |
1070 | // Fixme: load directly into the fpr! |
1071 | load32(address.m_ptr, dataTempRegister); |
1072 | m_assembler.vmov(fpTempRegister, dataTempRegister, dataTempRegister); |
1073 | m_assembler.vcvt_signedToFloatingPoint(dest, fpTempRegisterAsSingle()); |
1074 | } |
1075 | |
1076 | void convertUInt32ToDouble(RegisterID src, FPRegisterID dest, RegisterID /*scratch*/) |
1077 | { |
1078 | m_assembler.vmov(fpTempRegister, src, src); |
1079 | m_assembler.vcvt_unsignedToFloatingPoint(dest, fpTempRegisterAsSingle()); |
1080 | } |
1081 | |
1082 | void convertFloatToDouble(FPRegisterID src, FPRegisterID dst) |
1083 | { |
1084 | m_assembler.vcvtds(dst, ARMRegisters::asSingle(src)); |
1085 | } |
1086 | |
1087 | void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst) |
1088 | { |
1089 | m_assembler.vcvtsd(ARMRegisters::asSingle(dst), src); |
1090 | } |
1091 | |
1092 | Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right) |
1093 | { |
1094 | m_assembler.vcmp(left, right); |
1095 | m_assembler.vmrs(); |
1096 | |
1097 | if (cond == DoubleNotEqual) { |
1098 | // ConditionNE jumps if NotEqual *or* unordered - force the unordered cases not to jump. |
1099 | Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); |
1100 | Jump result = makeBranch(ARMv7Assembler::ConditionNE); |
1101 | unordered.link(this); |
1102 | return result; |
1103 | } |
1104 | if (cond == DoubleEqualOrUnordered) { |
1105 | Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); |
1106 | Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE); |
1107 | unordered.link(this); |
1108 | // We get here if either unordered or equal. |
1109 | Jump result = jump(); |
1110 | notEqual.link(this); |
1111 | return result; |
1112 | } |
1113 | return makeBranch(cond); |
1114 | } |
1115 | |
1116 | enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful }; |
1117 | Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) |
1118 | { |
1119 | // Convert into dest. |
1120 | m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src); |
1121 | m_assembler.vmov(dest, fpTempRegisterAsSingle()); |
1122 | |
1123 | // Calculate 2x dest. If the value potentially underflowed, it will have |
1124 | // clamped to 0x80000000, so 2x dest is zero in this case. In the case of |
1125 | // overflow the result will be equal to -2. |
1126 | Jump underflow = branchAdd32(Zero, dest, dest, dataTempRegister); |
1127 | Jump noOverflow = branch32(NotEqual, dataTempRegister, TrustedImm32(-2)); |
1128 | |
1129 | // For BranchIfTruncateSuccessful, we branch if 'noOverflow' jumps. |
1130 | underflow.link(this); |
1131 | if (branchType == BranchIfTruncateSuccessful) |
1132 | return noOverflow; |
1133 | |
1134 | // We'll reach the current point in the code on failure, so plant a |
1135 | // jump here & link the success case. |
1136 | Jump failure = jump(); |
1137 | noOverflow.link(this); |
1138 | return failure; |
1139 | } |
1140 | |
1141 | Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) |
1142 | { |
1143 | m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src); |
1144 | m_assembler.vmov(dest, fpTempRegisterAsSingle()); |
1145 | |
1146 | Jump overflow = branch32(Equal, dest, TrustedImm32(0x7fffffff)); |
1147 | Jump success = branch32(GreaterThanOrEqual, dest, TrustedImm32(0)); |
1148 | overflow.link(this); |
1149 | |
1150 | if (branchType == BranchIfTruncateSuccessful) |
1151 | return success; |
1152 | |
1153 | Jump failure = jump(); |
1154 | success.link(this); |
1155 | return failure; |
1156 | } |
1157 | |
1158 | // Result is undefined if the value is outside of the integer range. |
1159 | void truncateDoubleToInt32(FPRegisterID src, RegisterID dest) |
1160 | { |
1161 | m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src); |
1162 | m_assembler.vmov(dest, fpTempRegisterAsSingle()); |
1163 | } |
1164 | |
1165 | void truncateDoubleToUint32(FPRegisterID src, RegisterID dest) |
1166 | { |
1167 | m_assembler.vcvt_floatingPointToUnsigned(fpTempRegisterAsSingle(), src); |
1168 | m_assembler.vmov(dest, fpTempRegisterAsSingle()); |
1169 | } |
1170 | |
1171 | // Convert 'src' to an integer, and places the resulting 'dest'. |
1172 | // If the result is not representable as a 32 bit value, branch. |
1173 | // May also branch for some values that are representable in 32 bits |
1174 | // (specifically, in this case, 0). |
1175 | void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID) |
1176 | { |
1177 | m_assembler.vcvt_floatingPointToSigned(fpTempRegisterAsSingle(), src); |
1178 | m_assembler.vmov(dest, fpTempRegisterAsSingle()); |
1179 | |
1180 | // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. |
1181 | m_assembler.vcvt_signedToFloatingPoint(fpTempRegister, fpTempRegisterAsSingle()); |
1182 | failureCases.append(branchDouble(DoubleNotEqualOrUnordered, src, fpTempRegister)); |
1183 | |
1184 | // If the result is zero, it might have been -0.0, and the double comparison won't catch this! |
1185 | failureCases.append(branchTest32(Zero, dest)); |
1186 | } |
1187 | |
1188 | Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID) |
1189 | { |
1190 | m_assembler.vcmpz(reg); |
1191 | m_assembler.vmrs(); |
1192 | Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); |
1193 | Jump result = makeBranch(ARMv7Assembler::ConditionNE); |
1194 | unordered.link(this); |
1195 | return result; |
1196 | } |
1197 | |
1198 | Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID) |
1199 | { |
1200 | m_assembler.vcmpz(reg); |
1201 | m_assembler.vmrs(); |
1202 | Jump unordered = makeBranch(ARMv7Assembler::ConditionVS); |
1203 | Jump notEqual = makeBranch(ARMv7Assembler::ConditionNE); |
1204 | unordered.link(this); |
1205 | // We get here if either unordered or equal. |
1206 | Jump result = jump(); |
1207 | notEqual.link(this); |
1208 | return result; |
1209 | } |
1210 | |
1211 | // Stack manipulation operations: |
1212 | // |
1213 | // The ABI is assumed to provide a stack abstraction to memory, |
1214 | // containing machine word sized units of data. Push and pop |
1215 | // operations add and remove a single register sized unit of data |
1216 | // to or from the stack. Peek and poke operations read or write |
1217 | // values on the stack, without moving the current stack position. |
1218 | |
1219 | void pop(RegisterID dest) |
1220 | { |
1221 | // store postindexed with writeback |
1222 | m_assembler.ldr(dest, ARMRegisters::sp, 4 /*sizeof(void*)*/, false, true); |
1223 | } |
1224 | |
1225 | void push(RegisterID src) |
1226 | { |
1227 | // store preindexed with writeback |
1228 | m_assembler.str(src, ARMRegisters::sp, -4 /*sizeof(void*)*/, true, true); |
1229 | } |
1230 | |
1231 | void push(Address address) |
1232 | { |
1233 | load32(address, dataTempRegister); |
1234 | push(dataTempRegister); |
1235 | } |
1236 | |
1237 | void push(TrustedImm32 imm) |
1238 | { |
1239 | move(imm, dataTempRegister); |
1240 | push(dataTempRegister); |
1241 | } |
1242 | |
1243 | // Register move operations: |
1244 | // |
1245 | // Move values in registers. |
1246 | |
1247 | void move(TrustedImm32 imm, RegisterID dest) |
1248 | { |
1249 | uint32_t value = imm.m_value; |
1250 | |
1251 | ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(value); |
1252 | |
1253 | if (armImm.isValid()) |
1254 | m_assembler.mov(dest, armImm); |
1255 | else if ((armImm = ARMThumbImmediate::makeEncodedImm(~value)).isValid()) |
1256 | m_assembler.mvn(dest, armImm); |
1257 | else { |
1258 | m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(value)); |
1259 | if (value & 0xffff0000) |
1260 | m_assembler.movt(dest, ARMThumbImmediate::makeUInt16(value >> 16)); |
1261 | } |
1262 | } |
1263 | |
1264 | void move(RegisterID src, RegisterID dest) |
1265 | { |
1266 | if (src != dest) |
1267 | m_assembler.mov(dest, src); |
1268 | } |
1269 | |
1270 | void move(TrustedImmPtr imm, RegisterID dest) |
1271 | { |
1272 | move(TrustedImm32(imm), dest); |
1273 | } |
1274 | |
1275 | void swap(RegisterID reg1, RegisterID reg2) |
1276 | { |
1277 | move(reg1, dataTempRegister); |
1278 | move(reg2, reg1); |
1279 | move(dataTempRegister, reg2); |
1280 | } |
1281 | |
1282 | void signExtend32ToPtr(RegisterID src, RegisterID dest) |
1283 | { |
1284 | move(src, dest); |
1285 | } |
1286 | |
1287 | void zeroExtend32ToPtr(RegisterID src, RegisterID dest) |
1288 | { |
1289 | move(src, dest); |
1290 | } |
1291 | |
1292 | // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc. |
1293 | static RelationalCondition invert(RelationalCondition cond) |
1294 | { |
1295 | return static_cast<RelationalCondition>(cond ^ 1); |
1296 | } |
1297 | |
1298 | void nop() |
1299 | { |
1300 | m_assembler.nop(); |
1301 | } |
1302 | |
1303 | static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination) |
1304 | { |
1305 | ARMv7Assembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation()); |
1306 | } |
1307 | |
1308 | static ptrdiff_t maxJumpReplacementSize() |
1309 | { |
1310 | return ARMv7Assembler::maxJumpReplacementSize(); |
1311 | } |
1312 | |
1313 | // Forwards / external control flow operations: |
1314 | // |
1315 | // This set of jump and conditional branch operations return a Jump |
1316 | // object which may linked at a later point, allow forwards jump, |
1317 | // or jumps that will require external linkage (after the code has been |
1318 | // relocated). |
1319 | // |
1320 | // For branches, signed <, >, <= and >= are denoted as l, g, le, and ge |
1321 | // respecitvely, for unsigned comparisons the names b, a, be, and ae are |
1322 | // used (representing the names 'below' and 'above'). |
1323 | // |
1324 | // Operands to the comparision are provided in the expected order, e.g. |
1325 | // jle32(reg1, TrustedImm32(5)) will branch if the value held in reg1, when |
1326 | // treated as a signed 32bit value, is less than or equal to 5. |
1327 | // |
1328 | // jz and jnz test whether the first operand is equal to zero, and take |
1329 | // an optional second operand of a mask under which to perform the test. |
1330 | private: |
1331 | |
1332 | // Should we be using TEQ for equal/not-equal? |
1333 | void compare32(RegisterID left, TrustedImm32 right) |
1334 | { |
1335 | int32_t imm = right.m_value; |
1336 | if (!imm) |
1337 | m_assembler.tst(left, left); |
1338 | else { |
1339 | ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm); |
1340 | if (armImm.isValid()) |
1341 | m_assembler.cmp(left, armImm); |
1342 | else if ((armImm = ARMThumbImmediate::makeEncodedImm(-imm)).isValid()) |
1343 | m_assembler.cmn(left, armImm); |
1344 | else { |
1345 | move(TrustedImm32(imm), dataTempRegister); |
1346 | m_assembler.cmp(left, dataTempRegister); |
1347 | } |
1348 | } |
1349 | } |
1350 | |
1351 | void test32(RegisterID reg, TrustedImm32 mask) |
1352 | { |
1353 | int32_t imm = mask.m_value; |
1354 | |
1355 | if (imm == -1) |
1356 | m_assembler.tst(reg, reg); |
1357 | else { |
1358 | ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm); |
1359 | if (armImm.isValid()) |
1360 | m_assembler.tst(reg, armImm); |
1361 | else { |
1362 | move(mask, dataTempRegister); |
1363 | m_assembler.tst(reg, dataTempRegister); |
1364 | } |
1365 | } |
1366 | } |
1367 | |
1368 | public: |
1369 | Jump branch32(RelationalCondition cond, RegisterID left, RegisterID right) |
1370 | { |
1371 | m_assembler.cmp(left, right); |
1372 | return Jump(makeBranch(cond)); |
1373 | } |
1374 | |
1375 | Jump branch32(RelationalCondition cond, RegisterID left, TrustedImm32 right) |
1376 | { |
1377 | compare32(left, right); |
1378 | return Jump(makeBranch(cond)); |
1379 | } |
1380 | |
1381 | Jump branch32(RelationalCondition cond, RegisterID left, Address right) |
1382 | { |
1383 | load32(right, dataTempRegister); |
1384 | return branch32(cond, left, dataTempRegister); |
1385 | } |
1386 | |
1387 | Jump branch32(RelationalCondition cond, Address left, RegisterID right) |
1388 | { |
1389 | load32(left, dataTempRegister); |
1390 | return branch32(cond, dataTempRegister, right); |
1391 | } |
1392 | |
1393 | Jump branch32(RelationalCondition cond, Address left, TrustedImm32 right) |
1394 | { |
1395 | // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ |
1396 | load32(left, addressTempRegister); |
1397 | return branch32(cond, addressTempRegister, right); |
1398 | } |
1399 | |
1400 | Jump branch32(RelationalCondition cond, BaseIndex left, TrustedImm32 right) |
1401 | { |
1402 | // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ |
1403 | load32(left, addressTempRegister); |
1404 | return branch32(cond, addressTempRegister, right); |
1405 | } |
1406 | |
1407 | Jump branch32WithUnalignedHalfWords(RelationalCondition cond, BaseIndex left, TrustedImm32 right) |
1408 | { |
1409 | // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ |
1410 | load32WithUnalignedHalfWords(left, addressTempRegister); |
1411 | return branch32(cond, addressTempRegister, right); |
1412 | } |
1413 | |
1414 | Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right) |
1415 | { |
1416 | load32(left.m_ptr, dataTempRegister); |
1417 | return branch32(cond, dataTempRegister, right); |
1418 | } |
1419 | |
1420 | Jump branch32(RelationalCondition cond, AbsoluteAddress left, TrustedImm32 right) |
1421 | { |
1422 | // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ |
1423 | load32(left.m_ptr, addressTempRegister); |
1424 | return branch32(cond, addressTempRegister, right); |
1425 | } |
1426 | |
1427 | Jump branch8(RelationalCondition cond, RegisterID left, TrustedImm32 right) |
1428 | { |
1429 | compare32(left, right); |
1430 | return Jump(makeBranch(cond)); |
1431 | } |
1432 | |
1433 | Jump branch8(RelationalCondition cond, Address left, TrustedImm32 right) |
1434 | { |
1435 | ASSERT(!(0xffffff00 & right.m_value)); |
1436 | // use addressTempRegister incase the branch8 we call uses dataTempRegister. :-/ |
1437 | load8(left, addressTempRegister); |
1438 | return branch8(cond, addressTempRegister, right); |
1439 | } |
1440 | |
1441 | Jump branch8(RelationalCondition cond, BaseIndex left, TrustedImm32 right) |
1442 | { |
1443 | ASSERT(!(0xffffff00 & right.m_value)); |
1444 | // use addressTempRegister incase the branch32 we call uses dataTempRegister. :-/ |
1445 | load8(left, addressTempRegister); |
1446 | return branch32(cond, addressTempRegister, right); |
1447 | } |
1448 | |
1449 | Jump branchTest32(ResultCondition cond, RegisterID reg, RegisterID mask) |
1450 | { |
1451 | m_assembler.tst(reg, mask); |
1452 | return Jump(makeBranch(cond)); |
1453 | } |
1454 | |
1455 | Jump branchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) |
1456 | { |
1457 | test32(reg, mask); |
1458 | return Jump(makeBranch(cond)); |
1459 | } |
1460 | |
1461 | Jump branchTest32(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) |
1462 | { |
1463 | // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/ |
1464 | load32(address, addressTempRegister); |
1465 | return branchTest32(cond, addressTempRegister, mask); |
1466 | } |
1467 | |
1468 | Jump branchTest32(ResultCondition cond, BaseIndex address, TrustedImm32 mask = TrustedImm32(-1)) |
1469 | { |
1470 | // use addressTempRegister incase the branchTest32 we call uses dataTempRegister. :-/ |
1471 | load32(address, addressTempRegister); |
1472 | return branchTest32(cond, addressTempRegister, mask); |
1473 | } |
1474 | |
1475 | Jump branchTest8(ResultCondition cond, Address address, TrustedImm32 mask = TrustedImm32(-1)) |
1476 | { |
1477 | // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/ |
1478 | load8(address, addressTempRegister); |
1479 | return branchTest32(cond, addressTempRegister, mask); |
1480 | } |
1481 | |
1482 | Jump branchTest8(ResultCondition cond, AbsoluteAddress address, TrustedImm32 mask = TrustedImm32(-1)) |
1483 | { |
1484 | // use addressTempRegister incase the branchTest8 we call uses dataTempRegister. :-/ |
1485 | move(TrustedImmPtr(address.m_ptr), addressTempRegister); |
1486 | load8(Address(addressTempRegister), addressTempRegister); |
1487 | return branchTest32(cond, addressTempRegister, mask); |
1488 | } |
1489 | |
1490 | void jump(RegisterID target) |
1491 | { |
1492 | m_assembler.bx(target); |
1493 | } |
1494 | |
1495 | // Address is a memory location containing the address to jump to |
1496 | void jump(Address address) |
1497 | { |
1498 | load32(address, dataTempRegister); |
1499 | m_assembler.bx(dataTempRegister); |
1500 | } |
1501 | |
1502 | void jump(AbsoluteAddress address) |
1503 | { |
1504 | move(TrustedImmPtr(address.m_ptr), dataTempRegister); |
1505 | load32(Address(dataTempRegister), dataTempRegister); |
1506 | m_assembler.bx(dataTempRegister); |
1507 | } |
1508 | |
1509 | |
1510 | // Arithmetic control flow operations: |
1511 | // |
1512 | // This set of conditional branch operations branch based |
1513 | // on the result of an arithmetic operation. The operation |
1514 | // is performed as normal, storing the result. |
1515 | // |
1516 | // * jz operations branch if the result is zero. |
1517 | // * jo operations branch if the (signed) arithmetic |
1518 | // operation caused an overflow to occur. |
1519 | |
1520 | Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) |
1521 | { |
1522 | m_assembler.add_S(dest, op1, op2); |
1523 | return Jump(makeBranch(cond)); |
1524 | } |
1525 | |
1526 | Jump branchAdd32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest) |
1527 | { |
1528 | ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); |
1529 | if (armImm.isValid()) |
1530 | m_assembler.add_S(dest, op1, armImm); |
1531 | else { |
1532 | move(imm, dataTempRegister); |
1533 | m_assembler.add_S(dest, op1, dataTempRegister); |
1534 | } |
1535 | return Jump(makeBranch(cond)); |
1536 | } |
1537 | |
1538 | Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest) |
1539 | { |
1540 | return branchAdd32(cond, dest, src, dest); |
1541 | } |
1542 | |
1543 | Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest) |
1544 | { |
1545 | return branchAdd32(cond, dest, imm, dest); |
1546 | } |
1547 | |
1548 | Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest) |
1549 | { |
1550 | // Move the high bits of the address into addressTempRegister, |
1551 | // and load the value into dataTempRegister. |
1552 | move(TrustedImmPtr(dest.m_ptr), addressTempRegister); |
1553 | m_assembler.ldr(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); |
1554 | |
1555 | // Do the add. |
1556 | ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); |
1557 | if (armImm.isValid()) |
1558 | m_assembler.add_S(dataTempRegister, dataTempRegister, armImm); |
1559 | else { |
1560 | // If the operand does not fit into an immediate then load it temporarily |
1561 | // into addressTempRegister; since we're overwriting addressTempRegister |
1562 | // we'll need to reload it with the high bits of the address afterwards. |
1563 | move(imm, addressTempRegister); |
1564 | m_assembler.add_S(dataTempRegister, dataTempRegister, addressTempRegister); |
1565 | move(TrustedImmPtr(dest.m_ptr), addressTempRegister); |
1566 | } |
1567 | |
1568 | // Store the result. |
1569 | m_assembler.str(dataTempRegister, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); |
1570 | |
1571 | return Jump(makeBranch(cond)); |
1572 | } |
1573 | |
1574 | Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) |
1575 | { |
1576 | m_assembler.smull(dest, dataTempRegister, src1, src2); |
1577 | |
1578 | if (cond == Overflow) { |
1579 | m_assembler.asr(addressTempRegister, dest, 31); |
1580 | return branch32(NotEqual, addressTempRegister, dataTempRegister); |
1581 | } |
1582 | |
1583 | return branchTest32(cond, dest); |
1584 | } |
1585 | |
1586 | Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest) |
1587 | { |
1588 | return branchMul32(cond, src, dest, dest); |
1589 | } |
1590 | |
1591 | Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) |
1592 | { |
1593 | move(imm, dataTempRegister); |
1594 | return branchMul32(cond, dataTempRegister, src, dest); |
1595 | } |
1596 | |
1597 | Jump branchNeg32(ResultCondition cond, RegisterID srcDest) |
1598 | { |
1599 | ARMThumbImmediate zero = ARMThumbImmediate::makeUInt12(0); |
1600 | m_assembler.sub_S(srcDest, zero, srcDest); |
1601 | return Jump(makeBranch(cond)); |
1602 | } |
1603 | |
1604 | Jump branchOr32(ResultCondition cond, RegisterID src, RegisterID dest) |
1605 | { |
1606 | m_assembler.orr_S(dest, dest, src); |
1607 | return Jump(makeBranch(cond)); |
1608 | } |
1609 | |
1610 | Jump branchSub32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) |
1611 | { |
1612 | m_assembler.sub_S(dest, op1, op2); |
1613 | return Jump(makeBranch(cond)); |
1614 | } |
1615 | |
1616 | Jump branchSub32(ResultCondition cond, RegisterID op1, TrustedImm32 imm, RegisterID dest) |
1617 | { |
1618 | ARMThumbImmediate armImm = ARMThumbImmediate::makeEncodedImm(imm.m_value); |
1619 | if (armImm.isValid()) |
1620 | m_assembler.sub_S(dest, op1, armImm); |
1621 | else { |
1622 | move(imm, dataTempRegister); |
1623 | m_assembler.sub_S(dest, op1, dataTempRegister); |
1624 | } |
1625 | return Jump(makeBranch(cond)); |
1626 | } |
1627 | |
1628 | Jump branchSub32(ResultCondition cond, RegisterID src, RegisterID dest) |
1629 | { |
1630 | return branchSub32(cond, dest, src, dest); |
1631 | } |
1632 | |
1633 | Jump branchSub32(ResultCondition cond, TrustedImm32 imm, RegisterID dest) |
1634 | { |
1635 | return branchSub32(cond, dest, imm, dest); |
1636 | } |
1637 | |
1638 | void relativeTableJump(RegisterID index, int scale) |
1639 | { |
1640 | ASSERT(scale >= 0 && scale <= 31); |
1641 | |
1642 | // dataTempRegister will point after the jump if index register contains zero |
1643 | move(ARMRegisters::pc, dataTempRegister); |
1644 | m_assembler.add(dataTempRegister, dataTempRegister, ARMThumbImmediate::makeEncodedImm(9)); |
1645 | |
1646 | ShiftTypeAndAmount shift(SRType_LSL, scale); |
1647 | m_assembler.add(dataTempRegister, dataTempRegister, index, shift); |
1648 | jump(dataTempRegister); |
1649 | } |
1650 | |
1651 | // Miscellaneous operations: |
1652 | |
1653 | void breakpoint(uint8_t imm = 0) |
1654 | { |
1655 | m_assembler.bkpt(imm); |
1656 | } |
1657 | |
1658 | ALWAYS_INLINE Call nearCall() |
1659 | { |
1660 | moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); |
1661 | return Call(m_assembler.blx(dataTempRegister), Call::LinkableNear); |
1662 | } |
1663 | |
1664 | ALWAYS_INLINE Call call() |
1665 | { |
1666 | moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); |
1667 | return Call(m_assembler.blx(dataTempRegister), Call::Linkable); |
1668 | } |
1669 | |
1670 | ALWAYS_INLINE Call call(RegisterID target) |
1671 | { |
1672 | return Call(m_assembler.blx(target), Call::None); |
1673 | } |
1674 | |
1675 | ALWAYS_INLINE Call call(Address address) |
1676 | { |
1677 | load32(address, dataTempRegister); |
1678 | return Call(m_assembler.blx(dataTempRegister), Call::None); |
1679 | } |
1680 | |
1681 | ALWAYS_INLINE void ret() |
1682 | { |
1683 | m_assembler.bx(linkRegister); |
1684 | } |
1685 | |
1686 | void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) |
1687 | { |
1688 | m_assembler.cmp(left, right); |
1689 | m_assembler.it(armV7Condition(cond), false); |
1690 | m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); |
1691 | m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); |
1692 | } |
1693 | |
1694 | void compare32(RelationalCondition cond, Address left, RegisterID right, RegisterID dest) |
1695 | { |
1696 | load32(left, dataTempRegister); |
1697 | compare32(cond, dataTempRegister, right, dest); |
1698 | } |
1699 | |
1700 | void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) |
1701 | { |
1702 | load8(left, addressTempRegister); |
1703 | compare32(cond, addressTempRegister, right, dest); |
1704 | } |
1705 | |
1706 | void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) |
1707 | { |
1708 | compare32(left, right); |
1709 | m_assembler.it(armV7Condition(cond), false); |
1710 | m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); |
1711 | m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); |
1712 | } |
1713 | |
1714 | // FIXME: |
1715 | // The mask should be optional... paerhaps the argument order should be |
1716 | // dest-src, operations always have a dest? ... possibly not true, considering |
1717 | // asm ops like test, or pseudo ops like pop(). |
1718 | void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) |
1719 | { |
1720 | load32(address, dataTempRegister); |
1721 | test32(dataTempRegister, mask); |
1722 | m_assembler.it(armV7Condition(cond), false); |
1723 | m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); |
1724 | m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); |
1725 | } |
1726 | |
1727 | void test8(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) |
1728 | { |
1729 | load8(address, dataTempRegister); |
1730 | test32(dataTempRegister, mask); |
1731 | m_assembler.it(armV7Condition(cond), false); |
1732 | m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(1)); |
1733 | m_assembler.mov(dest, ARMThumbImmediate::makeUInt16(0)); |
1734 | } |
1735 | |
1736 | ALWAYS_INLINE DataLabel32 moveWithPatch(TrustedImm32 imm, RegisterID dst) |
1737 | { |
1738 | padBeforePatch(); |
1739 | moveFixedWidthEncoding(imm, dst); |
1740 | return DataLabel32(this); |
1741 | } |
1742 | |
1743 | ALWAYS_INLINE DataLabelPtr moveWithPatch(TrustedImmPtr imm, RegisterID dst) |
1744 | { |
1745 | padBeforePatch(); |
1746 | moveFixedWidthEncoding(TrustedImm32(imm), dst); |
1747 | return DataLabelPtr(this); |
1748 | } |
1749 | |
1750 | ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, RegisterID left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) |
1751 | { |
1752 | dataLabel = moveWithPatch(initialRightValue, dataTempRegister); |
1753 | return branch32(cond, left, dataTempRegister); |
1754 | } |
1755 | |
1756 | ALWAYS_INLINE Jump branchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) |
1757 | { |
1758 | load32(left, addressTempRegister); |
1759 | dataLabel = moveWithPatch(initialRightValue, dataTempRegister); |
1760 | return branch32(cond, addressTempRegister, dataTempRegister); |
1761 | } |
1762 | |
1763 | PatchableJump patchableBranchPtr(RelationalCondition cond, Address left, TrustedImmPtr right = TrustedImmPtr(0)) |
1764 | { |
1765 | m_makeJumpPatchable = true; |
1766 | Jump result = branch32(cond, left, TrustedImm32(right)); |
1767 | m_makeJumpPatchable = false; |
1768 | return PatchableJump(result); |
1769 | } |
1770 | |
1771 | PatchableJump patchableBranchTest32(ResultCondition cond, RegisterID reg, TrustedImm32 mask = TrustedImm32(-1)) |
1772 | { |
1773 | m_makeJumpPatchable = true; |
1774 | Jump result = branchTest32(cond, reg, mask); |
1775 | m_makeJumpPatchable = false; |
1776 | return PatchableJump(result); |
1777 | } |
1778 | |
1779 | PatchableJump patchableBranch32(RelationalCondition cond, RegisterID reg, TrustedImm32 imm) |
1780 | { |
1781 | m_makeJumpPatchable = true; |
1782 | Jump result = branch32(cond, reg, imm); |
1783 | m_makeJumpPatchable = false; |
1784 | return PatchableJump(result); |
1785 | } |
1786 | |
1787 | PatchableJump patchableBranchPtrWithPatch(RelationalCondition cond, Address left, DataLabelPtr& dataLabel, TrustedImmPtr initialRightValue = TrustedImmPtr(0)) |
1788 | { |
1789 | m_makeJumpPatchable = true; |
1790 | Jump result = branchPtrWithPatch(cond, left, dataLabel, initialRightValue); |
1791 | m_makeJumpPatchable = false; |
1792 | return PatchableJump(result); |
1793 | } |
1794 | |
1795 | PatchableJump patchableJump() |
1796 | { |
1797 | padBeforePatch(); |
1798 | m_makeJumpPatchable = true; |
1799 | Jump result = jump(); |
1800 | m_makeJumpPatchable = false; |
1801 | return PatchableJump(result); |
1802 | } |
1803 | |
1804 | ALWAYS_INLINE DataLabelPtr storePtrWithPatch(TrustedImmPtr initialValue, ImplicitAddress address) |
1805 | { |
1806 | DataLabelPtr label = moveWithPatch(initialValue, dataTempRegister); |
1807 | store32(dataTempRegister, address); |
1808 | return label; |
1809 | } |
1810 | ALWAYS_INLINE DataLabelPtr storePtrWithPatch(ImplicitAddress address) { return storePtrWithPatch(TrustedImmPtr(0), address); } |
1811 | |
1812 | ALWAYS_INLINE Call tailRecursiveCall() |
1813 | { |
1814 | // Like a normal call, but don't link. |
1815 | moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); |
1816 | return Call(m_assembler.bx(dataTempRegister), Call::Linkable); |
1817 | } |
1818 | |
1819 | ALWAYS_INLINE Call makeTailRecursiveCall(Jump oldJump) |
1820 | { |
1821 | oldJump.link(this); |
1822 | return tailRecursiveCall(); |
1823 | } |
1824 | |
1825 | |
1826 | int executableOffsetFor(int location) |
1827 | { |
1828 | return m_assembler.executableOffsetFor(location); |
1829 | } |
1830 | |
1831 | static FunctionPtr readCallTarget(CodeLocationCall call) |
1832 | { |
1833 | return FunctionPtr(reinterpret_cast<void(*)()>(ARMv7Assembler::readCallTarget(call.dataLocation()))); |
1834 | } |
1835 | |
1836 | static bool canJumpReplacePatchableBranchPtrWithPatch() { return false; } |
1837 | |
1838 | static CodeLocationLabel startOfBranchPtrWithPatchOnRegister(CodeLocationDataLabelPtr label) |
1839 | { |
1840 | const unsigned twoWordOpSize = 4; |
1841 | return label.labelAtOffset(-twoWordOpSize * 2); |
1842 | } |
1843 | |
1844 | static void revertJumpReplacementToBranchPtrWithPatch(CodeLocationLabel instructionStart, RegisterID rd, void* initialValue) |
1845 | { |
1846 | #if OS(LINUX) || OS(QNX) |
1847 | ARMv7Assembler::revertJumpTo_movT3movtcmpT2(instructionStart.dataLocation(), rd, dataTempRegister, reinterpret_cast<uintptr_t>(initialValue)); |
1848 | #else |
1849 | UNUSED_PARAM(rd); |
1850 | ARMv7Assembler::revertJumpTo_movT3(instructionStart.dataLocation(), dataTempRegister, ARMThumbImmediate::makeUInt16(reinterpret_cast<uintptr_t>(initialValue) & 0xffff)); |
1851 | #endif |
1852 | } |
1853 | |
1854 | static CodeLocationLabel startOfPatchableBranchPtrWithPatchOnAddress(CodeLocationDataLabelPtr) |
1855 | { |
1856 | UNREACHABLE_FOR_PLATFORM(); |
1857 | return CodeLocationLabel(); |
1858 | } |
1859 | |
1860 | static void revertJumpReplacementToPatchableBranchPtrWithPatch(CodeLocationLabel, Address, void*) |
1861 | { |
1862 | UNREACHABLE_FOR_PLATFORM(); |
1863 | } |
1864 | |
1865 | protected: |
1866 | ALWAYS_INLINE Jump jump() |
1867 | { |
1868 | m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint. |
1869 | moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); |
1870 | return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpNoConditionFixedSize : ARMv7Assembler::JumpNoCondition); |
1871 | } |
1872 | |
1873 | ALWAYS_INLINE Jump makeBranch(ARMv7Assembler::Condition cond) |
1874 | { |
1875 | m_assembler.label(); // Force nop-padding if we're in the middle of a watchpoint. |
1876 | m_assembler.it(cond, true, true); |
1877 | moveFixedWidthEncoding(TrustedImm32(0), dataTempRegister); |
1878 | return Jump(m_assembler.bx(dataTempRegister), m_makeJumpPatchable ? ARMv7Assembler::JumpConditionFixedSize : ARMv7Assembler::JumpCondition, cond); |
1879 | } |
1880 | ALWAYS_INLINE Jump makeBranch(RelationalCondition cond) { return makeBranch(armV7Condition(cond)); } |
1881 | ALWAYS_INLINE Jump makeBranch(ResultCondition cond) { return makeBranch(armV7Condition(cond)); } |
1882 | ALWAYS_INLINE Jump makeBranch(DoubleCondition cond) { return makeBranch(armV7Condition(cond)); } |
1883 | |
1884 | ArmAddress setupArmAddress(BaseIndex address) |
1885 | { |
1886 | if (address.offset) { |
1887 | ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset); |
1888 | if (imm.isValid()) |
1889 | m_assembler.add(addressTempRegister, address.base, imm); |
1890 | else { |
1891 | move(TrustedImm32(address.offset), addressTempRegister); |
1892 | m_assembler.add(addressTempRegister, addressTempRegister, address.base); |
1893 | } |
1894 | |
1895 | return ArmAddress(addressTempRegister, address.index, address.scale); |
1896 | } else |
1897 | return ArmAddress(address.base, address.index, address.scale); |
1898 | } |
1899 | |
1900 | ArmAddress setupArmAddress(Address address) |
1901 | { |
1902 | if ((address.offset >= -0xff) && (address.offset <= 0xfff)) |
1903 | return ArmAddress(address.base, address.offset); |
1904 | |
1905 | move(TrustedImm32(address.offset), addressTempRegister); |
1906 | return ArmAddress(address.base, addressTempRegister); |
1907 | } |
1908 | |
1909 | ArmAddress setupArmAddress(ImplicitAddress address) |
1910 | { |
1911 | if ((address.offset >= -0xff) && (address.offset <= 0xfff)) |
1912 | return ArmAddress(address.base, address.offset); |
1913 | |
1914 | move(TrustedImm32(address.offset), addressTempRegister); |
1915 | return ArmAddress(address.base, addressTempRegister); |
1916 | } |
1917 | |
1918 | RegisterID makeBaseIndexBase(BaseIndex address) |
1919 | { |
1920 | if (!address.offset) |
1921 | return address.base; |
1922 | |
1923 | ARMThumbImmediate imm = ARMThumbImmediate::makeUInt12OrEncodedImm(address.offset); |
1924 | if (imm.isValid()) |
1925 | m_assembler.add(addressTempRegister, address.base, imm); |
1926 | else { |
1927 | move(TrustedImm32(address.offset), addressTempRegister); |
1928 | m_assembler.add(addressTempRegister, addressTempRegister, address.base); |
1929 | } |
1930 | |
1931 | return addressTempRegister; |
1932 | } |
1933 | |
1934 | void moveFixedWidthEncoding(TrustedImm32 imm, RegisterID dst) |
1935 | { |
1936 | uint32_t value = imm.m_value; |
1937 | m_assembler.movT3(dst, ARMThumbImmediate::makeUInt16(value & 0xffff)); |
1938 | m_assembler.movt(dst, ARMThumbImmediate::makeUInt16(value >> 16)); |
1939 | } |
1940 | |
1941 | ARMv7Assembler::Condition armV7Condition(RelationalCondition cond) |
1942 | { |
1943 | return static_cast<ARMv7Assembler::Condition>(cond); |
1944 | } |
1945 | |
1946 | ARMv7Assembler::Condition armV7Condition(ResultCondition cond) |
1947 | { |
1948 | return static_cast<ARMv7Assembler::Condition>(cond); |
1949 | } |
1950 | |
1951 | ARMv7Assembler::Condition armV7Condition(DoubleCondition cond) |
1952 | { |
1953 | return static_cast<ARMv7Assembler::Condition>(cond); |
1954 | } |
1955 | |
1956 | private: |
1957 | template <typename, template <typename> class> friend class LinkBufferBase; |
1958 | |
1959 | static void linkCall(void* code, Call call, FunctionPtr function) |
1960 | { |
1961 | ARMv7Assembler::linkCall(code, call.m_label, function.value()); |
1962 | } |
1963 | |
1964 | static void repatchCall(CodeLocationCall call, CodeLocationLabel destination) |
1965 | { |
1966 | ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); |
1967 | } |
1968 | |
1969 | static void repatchCall(CodeLocationCall call, FunctionPtr destination) |
1970 | { |
1971 | ARMv7Assembler::relinkCall(call.dataLocation(), destination.executableAddress()); |
1972 | } |
1973 | |
1974 | bool m_makeJumpPatchable; |
1975 | }; |
1976 | |
1977 | } // namespace JSC |
1978 | |
1979 | #endif // ENABLE(ASSEMBLER) |
1980 | |
1981 | #endif // MacroAssemblerARMv7_h |
1982 | |