| 1 | /* | 
| 2 |  * Copyright (C) 2009 University of Szeged | 
| 3 |  * All rights reserved. | 
| 4 |  * | 
| 5 |  * Redistribution and use in source and binary forms, with or without | 
| 6 |  * modification, are permitted provided that the following conditions | 
| 7 |  * are met: | 
| 8 |  * 1. Redistributions of source code must retain the above copyright | 
| 9 |  *    notice, this list of conditions and the following disclaimer. | 
| 10 |  * 2. Redistributions in binary form must reproduce the above copyright | 
| 11 |  *    notice, this list of conditions and the following disclaimer in the | 
| 12 |  *    documentation and/or other materials provided with the distribution. | 
| 13 |  * | 
| 14 |  * THIS SOFTWARE IS PROVIDED BY UNIVERSITY OF SZEGED ``AS IS'' AND ANY | 
| 15 |  * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | 
| 16 |  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR | 
| 17 |  * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL UNIVERSITY OF SZEGED OR | 
| 18 |  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, | 
| 19 |  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, | 
| 20 |  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR | 
| 21 |  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY | 
| 22 |  * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 
| 23 |  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 
| 24 |  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 
| 25 |  */ | 
| 26 |  | 
| 27 | #include "config.h" | 
| 28 |  | 
| 29 | #if ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) | 
| 30 |  | 
| 31 | #include "ARMAssembler.h" | 
| 32 |  | 
| 33 | namespace JSC { | 
| 34 |  | 
| 35 | // Patching helpers | 
| 36 |  | 
| 37 | void ARMAssembler::patchConstantPoolLoad(void* loadAddr, void* constPoolAddr) | 
| 38 | { | 
| 39 |     ARMWord *ldr = reinterpret_cast<ARMWord*>(loadAddr); | 
| 40 |     ARMWord diff = reinterpret_cast<ARMWord*>(constPoolAddr) - ldr; | 
| 41 |     ARMWord index = (*ldr & 0xfff) >> 1; | 
| 42 |  | 
| 43 |     ASSERT(diff >= 1); | 
| 44 |     if (diff >= 2 || index > 0) { | 
| 45 |         diff = (diff + index - 2) * sizeof(ARMWord); | 
| 46 |         ASSERT(diff <= 0xfff); | 
| 47 |         *ldr = (*ldr & ~0xfff) | diff; | 
| 48 |     } else | 
| 49 |         *ldr = (*ldr & ~(0xfff | ARMAssembler::DT_UP)) | sizeof(ARMWord); | 
| 50 | } | 
| 51 |  | 
| 52 | // Handle immediates | 
| 53 |  | 
| 54 | ARMWord ARMAssembler::getOp2(ARMWord imm) | 
| 55 | { | 
| 56 |     int rol; | 
| 57 |  | 
| 58 |     if (imm <= 0xff) | 
| 59 |         return OP2_IMM | imm; | 
| 60 |  | 
| 61 |     if ((imm & 0xff000000) == 0) { | 
| 62 |         imm <<= 8; | 
| 63 |         rol = 8; | 
| 64 |     } | 
| 65 |     else { | 
| 66 |         imm = (imm << 24) | (imm >> 8); | 
| 67 |         rol = 0; | 
| 68 |     } | 
| 69 |  | 
| 70 |     if ((imm & 0xff000000) == 0) { | 
| 71 |         imm <<= 8; | 
| 72 |         rol += 4; | 
| 73 |     } | 
| 74 |  | 
| 75 |     if ((imm & 0xf0000000) == 0) { | 
| 76 |         imm <<= 4; | 
| 77 |         rol += 2; | 
| 78 |     } | 
| 79 |  | 
| 80 |     if ((imm & 0xc0000000) == 0) { | 
| 81 |         imm <<= 2; | 
| 82 |         rol += 1; | 
| 83 |     } | 
| 84 |  | 
| 85 |     if ((imm & 0x00ffffff) == 0) | 
| 86 |         return OP2_IMM | (imm >> 24) | (rol << 8); | 
| 87 |  | 
| 88 |     return INVALID_IMM; | 
| 89 | } | 
| 90 |  | 
| 91 | int ARMAssembler::genInt(int reg, ARMWord imm, bool positive) | 
| 92 | { | 
| 93 |     // Step1: Search a non-immediate part | 
| 94 |     ARMWord mask; | 
| 95 |     ARMWord imm1; | 
| 96 |     ARMWord imm2; | 
| 97 |     int rol; | 
| 98 |  | 
| 99 |     mask = 0xff000000; | 
| 100 |     rol = 8; | 
| 101 |     while(1) { | 
| 102 |         if ((imm & mask) == 0) { | 
| 103 |             imm = (imm << rol) | (imm >> (32 - rol)); | 
| 104 |             rol = 4 + (rol >> 1); | 
| 105 |             break; | 
| 106 |         } | 
| 107 |         rol += 2; | 
| 108 |         mask >>= 2; | 
| 109 |         if (mask & 0x3) { | 
| 110 |             // rol 8 | 
| 111 |             imm = (imm << 8) | (imm >> 24); | 
| 112 |             mask = 0xff00; | 
| 113 |             rol = 24; | 
| 114 |             while (1) { | 
| 115 |                 if ((imm & mask) == 0) { | 
| 116 |                     imm = (imm << rol) | (imm >> (32 - rol)); | 
| 117 |                     rol = (rol >> 1) - 8; | 
| 118 |                     break; | 
| 119 |                 } | 
| 120 |                 rol += 2; | 
| 121 |                 mask >>= 2; | 
| 122 |                 if (mask & 0x3) | 
| 123 |                     return 0; | 
| 124 |             } | 
| 125 |             break; | 
| 126 |         } | 
| 127 |     } | 
| 128 |  | 
| 129 |     ASSERT((imm & 0xff) == 0); | 
| 130 |  | 
| 131 |     if ((imm & 0xff000000) == 0) { | 
| 132 |         imm1 = OP2_IMM | ((imm >> 16) & 0xff) | (((rol + 4) & 0xf) << 8); | 
| 133 |         imm2 = OP2_IMM | ((imm >> 8) & 0xff) | (((rol + 8) & 0xf) << 8); | 
| 134 |     } else if (imm & 0xc0000000) { | 
| 135 |         imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8); | 
| 136 |         imm <<= 8; | 
| 137 |         rol += 4; | 
| 138 |  | 
| 139 |         if ((imm & 0xff000000) == 0) { | 
| 140 |             imm <<= 8; | 
| 141 |             rol += 4; | 
| 142 |         } | 
| 143 |  | 
| 144 |         if ((imm & 0xf0000000) == 0) { | 
| 145 |             imm <<= 4; | 
| 146 |             rol += 2; | 
| 147 |         } | 
| 148 |  | 
| 149 |         if ((imm & 0xc0000000) == 0) { | 
| 150 |             imm <<= 2; | 
| 151 |             rol += 1; | 
| 152 |         } | 
| 153 |  | 
| 154 |         if ((imm & 0x00ffffff) == 0) | 
| 155 |             imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8); | 
| 156 |         else | 
| 157 |             return 0; | 
| 158 |     } else { | 
| 159 |         if ((imm & 0xf0000000) == 0) { | 
| 160 |             imm <<= 4; | 
| 161 |             rol += 2; | 
| 162 |         } | 
| 163 |  | 
| 164 |         if ((imm & 0xc0000000) == 0) { | 
| 165 |             imm <<= 2; | 
| 166 |             rol += 1; | 
| 167 |         } | 
| 168 |  | 
| 169 |         imm1 = OP2_IMM | ((imm >> 24) & 0xff) | ((rol & 0xf) << 8); | 
| 170 |         imm <<= 8; | 
| 171 |         rol += 4; | 
| 172 |  | 
| 173 |         if ((imm & 0xf0000000) == 0) { | 
| 174 |             imm <<= 4; | 
| 175 |             rol += 2; | 
| 176 |         } | 
| 177 |  | 
| 178 |         if ((imm & 0xc0000000) == 0) { | 
| 179 |             imm <<= 2; | 
| 180 |             rol += 1; | 
| 181 |         } | 
| 182 |  | 
| 183 |         if ((imm & 0x00ffffff) == 0) | 
| 184 |             imm2 = OP2_IMM | (imm >> 24) | ((rol & 0xf) << 8); | 
| 185 |         else | 
| 186 |             return 0; | 
| 187 |     } | 
| 188 |  | 
| 189 |     if (positive) { | 
| 190 |         mov_r(reg, imm1); | 
| 191 |         orr_r(reg, reg, imm2); | 
| 192 |     } else { | 
| 193 |         mvn_r(reg, imm1); | 
| 194 |         bic_r(reg, reg, imm2); | 
| 195 |     } | 
| 196 |  | 
| 197 |     return 1; | 
| 198 | } | 
| 199 |  | 
| 200 | ARMWord ARMAssembler::getImm(ARMWord imm, int tmpReg, bool invert) | 
| 201 | { | 
| 202 |     ARMWord tmp; | 
| 203 |  | 
| 204 |     // Do it by 1 instruction | 
| 205 |     tmp = getOp2(imm); | 
| 206 |     if (tmp != INVALID_IMM) | 
| 207 |         return tmp; | 
| 208 |  | 
| 209 |     tmp = getOp2(~imm); | 
| 210 |     if (tmp != INVALID_IMM) { | 
| 211 |         if (invert) | 
| 212 |             return tmp | OP2_INV_IMM; | 
| 213 |         mvn_r(tmpReg, tmp); | 
| 214 |         return tmpReg; | 
| 215 |     } | 
| 216 |  | 
| 217 |     return encodeComplexImm(imm, tmpReg); | 
| 218 | } | 
| 219 |  | 
| 220 | void ARMAssembler::moveImm(ARMWord imm, int dest) | 
| 221 | { | 
| 222 |     ARMWord tmp; | 
| 223 |  | 
| 224 |     // Do it by 1 instruction | 
| 225 |     tmp = getOp2(imm); | 
| 226 |     if (tmp != INVALID_IMM) { | 
| 227 |         mov_r(dest, tmp); | 
| 228 |         return; | 
| 229 |     } | 
| 230 |  | 
| 231 |     tmp = getOp2(~imm); | 
| 232 |     if (tmp != INVALID_IMM) { | 
| 233 |         mvn_r(dest, tmp); | 
| 234 |         return; | 
| 235 |     } | 
| 236 |  | 
| 237 |     encodeComplexImm(imm, dest); | 
| 238 | } | 
| 239 |  | 
| 240 | ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest) | 
| 241 | { | 
| 242 | #if WTF_ARM_ARCH_AT_LEAST(7) | 
| 243 |     ARMWord tmp = getImm16Op2(imm); | 
| 244 |     if (tmp != INVALID_IMM) { | 
| 245 |         movw_r(dest, tmp); | 
| 246 |         return dest; | 
| 247 |     } | 
| 248 |     movw_r(dest, getImm16Op2(imm & 0xffff)); | 
| 249 |     movt_r(dest, getImm16Op2(imm >> 16)); | 
| 250 |     return dest; | 
| 251 | #else | 
| 252 |     // Do it by 2 instruction | 
| 253 |     if (genInt(dest, imm, true)) | 
| 254 |         return dest; | 
| 255 |     if (genInt(dest, ~imm, false)) | 
| 256 |         return dest; | 
| 257 |  | 
| 258 |     ldr_imm(dest, imm); | 
| 259 |     return dest; | 
| 260 | #endif | 
| 261 | } | 
| 262 |  | 
| 263 | // Memory load/store helpers | 
| 264 |  | 
| 265 | void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset) | 
| 266 | { | 
| 267 |     if (offset >= 0) { | 
| 268 |         if (offset <= 0xfff) | 
| 269 |             dtr_u(isLoad, srcDst, base, offset); | 
| 270 |         else if (offset <= 0xfffff) { | 
| 271 |             add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8)); | 
| 272 |             dtr_u(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff); | 
| 273 |         } else { | 
| 274 |             ARMWord reg = getImm(offset, ARMRegisters::S0); | 
| 275 |             dtr_ur(isLoad, srcDst, base, reg); | 
| 276 |         } | 
| 277 |     } else { | 
| 278 |         offset = -offset; | 
| 279 |         if (offset <= 0xfff) | 
| 280 |             dtr_d(isLoad, srcDst, base, offset); | 
| 281 |         else if (offset <= 0xfffff) { | 
| 282 |             sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8)); | 
| 283 |             dtr_d(isLoad, srcDst, ARMRegisters::S0, offset & 0xfff); | 
| 284 |         } else { | 
| 285 |             ARMWord reg = getImm(offset, ARMRegisters::S0); | 
| 286 |             dtr_dr(isLoad, srcDst, base, reg); | 
| 287 |         } | 
| 288 |     } | 
| 289 | } | 
| 290 |  | 
| 291 | void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset) | 
| 292 | { | 
| 293 |     ARMWord op2; | 
| 294 |  | 
| 295 |     ASSERT(scale >= 0 && scale <= 3); | 
| 296 |     op2 = lsl(index, scale); | 
| 297 |  | 
| 298 |     if (offset >= 0 && offset <= 0xfff) { | 
| 299 |         add_r(ARMRegisters::S0, base, op2); | 
| 300 |         dtr_u(isLoad, srcDst, ARMRegisters::S0, offset); | 
| 301 |         return; | 
| 302 |     } | 
| 303 |     if (offset <= 0 && offset >= -0xfff) { | 
| 304 |         add_r(ARMRegisters::S0, base, op2); | 
| 305 |         dtr_d(isLoad, srcDst, ARMRegisters::S0, -offset); | 
| 306 |         return; | 
| 307 |     } | 
| 308 |  | 
| 309 |     ldr_un_imm(ARMRegisters::S0, offset); | 
| 310 |     add_r(ARMRegisters::S0, ARMRegisters::S0, op2); | 
| 311 |     dtr_ur(isLoad, srcDst, base, ARMRegisters::S0); | 
| 312 | } | 
| 313 |  | 
| 314 | void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset) | 
| 315 | { | 
| 316 |     if (offset & 0x3) { | 
| 317 |         if (offset <= 0x3ff && offset >= 0) { | 
| 318 |             fdtr_u(isLoad, srcDst, base, offset >> 2); | 
| 319 |             return; | 
| 320 |         } | 
| 321 |         if (offset <= 0x3ffff && offset >= 0) { | 
| 322 |             add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8)); | 
| 323 |             fdtr_u(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff); | 
| 324 |             return; | 
| 325 |         } | 
| 326 |         offset = -offset; | 
| 327 |  | 
| 328 |         if (offset <= 0x3ff && offset >= 0) { | 
| 329 |             fdtr_d(isLoad, srcDst, base, offset >> 2); | 
| 330 |             return; | 
| 331 |         } | 
| 332 |         if (offset <= 0x3ffff && offset >= 0) { | 
| 333 |             sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8)); | 
| 334 |             fdtr_d(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff); | 
| 335 |             return; | 
| 336 |         } | 
| 337 |         offset = -offset; | 
| 338 |     } | 
| 339 |  | 
| 340 |     ldr_un_imm(ARMRegisters::S0, offset); | 
| 341 |     add_r(ARMRegisters::S0, ARMRegisters::S0, base); | 
| 342 |     fdtr_u(isLoad, srcDst, ARMRegisters::S0, 0); | 
| 343 | } | 
| 344 |  | 
| 345 | void* ARMAssembler::executableCopy(ExecutablePool* allocator) | 
| 346 | { | 
| 347 |     // 64-bit alignment is required for next constant pool and JIT code as well | 
| 348 |     m_buffer.flushWithoutBarrier(true); | 
| 349 |     if (m_buffer.uncheckedSize() & 0x7) | 
| 350 |         bkpt(0); | 
| 351 |  | 
| 352 |     char* data = reinterpret_cast<char*>(m_buffer.executableCopy(allocator)); | 
| 353 |  | 
| 354 |     for (Jumps::Iterator iter = m_jumps.begin(); iter != m_jumps.end(); ++iter) { | 
| 355 |         // The last bit is set if the constant must be placed on constant pool. | 
| 356 |         int pos = (*iter) & (~0x1); | 
| 357 |         ARMWord* ldrAddr = reinterpret_cast<ARMWord*>(data + pos); | 
| 358 |         ARMWord* addr = getLdrImmAddress(ldrAddr); | 
| 359 |         if (*addr != 0xffffffff) { | 
| 360 |             if (!(*iter & 1)) { | 
| 361 |                 int diff = reinterpret_cast<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetching); | 
| 362 |  | 
| 363 |                 if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) { | 
| 364 |                     *ldrAddr = B | getConditionalField(*ldrAddr) | (diff & BRANCH_MASK); | 
| 365 |                     continue; | 
| 366 |                 } | 
| 367 |             } | 
| 368 |             *addr = reinterpret_cast<ARMWord>(data + *addr); | 
| 369 |         } | 
| 370 |     } | 
| 371 |  | 
| 372 |     return data; | 
| 373 | } | 
| 374 |  | 
| 375 | } // namespace JSC | 
| 376 |  | 
| 377 | #endif // ENABLE(ASSEMBLER) && CPU(ARM_TRADITIONAL) | 
| 378 |  |